0f5bcf79d72721bdb9243a485c215d4fd740ca06
[linux-2.6.git] / drivers / net / tg3.c
1 /*
2  * tg3.c: Broadcom Tigon3 ethernet driver.
3  *
4  * Copyright (C) 2001, 2002, 2003, 2004 David S. Miller (davem@redhat.com)
5  * Copyright (C) 2001, 2002, 2003 Jeff Garzik (jgarzik@pobox.com)
6  * Copyright (C) 2004 Sun Microsystems Inc.
7  * Copyright (C) 2005-2011 Broadcom Corporation.
8  *
9  * Firmware is:
10  *      Derived from proprietary unpublished source code,
11  *      Copyright (C) 2000-2003 Broadcom Corporation.
12  *
13  *      Permission is hereby granted for the distribution of this firmware
14  *      data in hexadecimal or equivalent format, provided this copyright
15  *      notice is accompanying it.
16  */
17
18
19 #include <linux/module.h>
20 #include <linux/moduleparam.h>
21 #include <linux/stringify.h>
22 #include <linux/kernel.h>
23 #include <linux/types.h>
24 #include <linux/compiler.h>
25 #include <linux/slab.h>
26 #include <linux/delay.h>
27 #include <linux/in.h>
28 #include <linux/init.h>
29 #include <linux/interrupt.h>
30 #include <linux/ioport.h>
31 #include <linux/pci.h>
32 #include <linux/netdevice.h>
33 #include <linux/etherdevice.h>
34 #include <linux/skbuff.h>
35 #include <linux/ethtool.h>
36 #include <linux/mdio.h>
37 #include <linux/mii.h>
38 #include <linux/phy.h>
39 #include <linux/brcmphy.h>
40 #include <linux/if_vlan.h>
41 #include <linux/ip.h>
42 #include <linux/tcp.h>
43 #include <linux/workqueue.h>
44 #include <linux/prefetch.h>
45 #include <linux/dma-mapping.h>
46 #include <linux/firmware.h>
47
48 #include <net/checksum.h>
49 #include <net/ip.h>
50
51 #include <asm/system.h>
52 #include <linux/io.h>
53 #include <asm/byteorder.h>
54 #include <linux/uaccess.h>
55
56 #ifdef CONFIG_SPARC
57 #include <asm/idprom.h>
58 #include <asm/prom.h>
59 #endif
60
61 #define BAR_0   0
62 #define BAR_2   2
63
64 #include "tg3.h"
65
66 /* Functions & macros to verify TG3_FLAGS types */
67
68 static inline int _tg3_flag(enum TG3_FLAGS flag, unsigned long *bits)
69 {
70         return test_bit(flag, bits);
71 }
72
73 static inline void _tg3_flag_set(enum TG3_FLAGS flag, unsigned long *bits)
74 {
75         set_bit(flag, bits);
76 }
77
78 static inline void _tg3_flag_clear(enum TG3_FLAGS flag, unsigned long *bits)
79 {
80         clear_bit(flag, bits);
81 }
82
83 #define tg3_flag(tp, flag)                              \
84         _tg3_flag(TG3_FLAG_##flag, (tp)->tg3_flags)
85 #define tg3_flag_set(tp, flag)                          \
86         _tg3_flag_set(TG3_FLAG_##flag, (tp)->tg3_flags)
87 #define tg3_flag_clear(tp, flag)                        \
88         _tg3_flag_clear(TG3_FLAG_##flag, (tp)->tg3_flags)
89
90 #define DRV_MODULE_NAME         "tg3"
91 #define TG3_MAJ_NUM                     3
92 #define TG3_MIN_NUM                     119
93 #define DRV_MODULE_VERSION      \
94         __stringify(TG3_MAJ_NUM) "." __stringify(TG3_MIN_NUM)
95 #define DRV_MODULE_RELDATE      "May 18, 2011"
96
97 #define TG3_DEF_MAC_MODE        0
98 #define TG3_DEF_RX_MODE         0
99 #define TG3_DEF_TX_MODE         0
100 #define TG3_DEF_MSG_ENABLE        \
101         (NETIF_MSG_DRV          | \
102          NETIF_MSG_PROBE        | \
103          NETIF_MSG_LINK         | \
104          NETIF_MSG_TIMER        | \
105          NETIF_MSG_IFDOWN       | \
106          NETIF_MSG_IFUP         | \
107          NETIF_MSG_RX_ERR       | \
108          NETIF_MSG_TX_ERR)
109
110 #define TG3_GRC_LCLCTL_PWRSW_DELAY      100
111
112 /* length of time before we decide the hardware is borked,
113  * and dev->tx_timeout() should be called to fix the problem
114  */
115
116 #define TG3_TX_TIMEOUT                  (5 * HZ)
117
118 /* hardware minimum and maximum for a single frame's data payload */
119 #define TG3_MIN_MTU                     60
120 #define TG3_MAX_MTU(tp) \
121         (tg3_flag(tp, JUMBO_CAPABLE) ? 9000 : 1500)
122
123 /* These numbers seem to be hard coded in the NIC firmware somehow.
124  * You can't change the ring sizes, but you can change where you place
125  * them in the NIC onboard memory.
126  */
127 #define TG3_RX_STD_RING_SIZE(tp) \
128         (tg3_flag(tp, LRG_PROD_RING_CAP) ? \
129          TG3_RX_STD_MAX_SIZE_5717 : TG3_RX_STD_MAX_SIZE_5700)
130 #define TG3_DEF_RX_RING_PENDING         200
131 #define TG3_RX_JMB_RING_SIZE(tp) \
132         (tg3_flag(tp, LRG_PROD_RING_CAP) ? \
133          TG3_RX_JMB_MAX_SIZE_5717 : TG3_RX_JMB_MAX_SIZE_5700)
134 #define TG3_DEF_RX_JUMBO_RING_PENDING   100
135 #define TG3_RSS_INDIR_TBL_SIZE          128
136
137 /* Do not place this n-ring entries value into the tp struct itself,
138  * we really want to expose these constants to GCC so that modulo et
139  * al.  operations are done with shifts and masks instead of with
140  * hw multiply/modulo instructions.  Another solution would be to
141  * replace things like '% foo' with '& (foo - 1)'.
142  */
143
144 #define TG3_TX_RING_SIZE                512
145 #define TG3_DEF_TX_RING_PENDING         (TG3_TX_RING_SIZE - 1)
146
147 #define TG3_RX_STD_RING_BYTES(tp) \
148         (sizeof(struct tg3_rx_buffer_desc) * TG3_RX_STD_RING_SIZE(tp))
149 #define TG3_RX_JMB_RING_BYTES(tp) \
150         (sizeof(struct tg3_ext_rx_buffer_desc) * TG3_RX_JMB_RING_SIZE(tp))
151 #define TG3_RX_RCB_RING_BYTES(tp) \
152         (sizeof(struct tg3_rx_buffer_desc) * (tp->rx_ret_ring_mask + 1))
153 #define TG3_TX_RING_BYTES       (sizeof(struct tg3_tx_buffer_desc) * \
154                                  TG3_TX_RING_SIZE)
155 #define NEXT_TX(N)              (((N) + 1) & (TG3_TX_RING_SIZE - 1))
156
157 #define TG3_DMA_BYTE_ENAB               64
158
159 #define TG3_RX_STD_DMA_SZ               1536
160 #define TG3_RX_JMB_DMA_SZ               9046
161
162 #define TG3_RX_DMA_TO_MAP_SZ(x)         ((x) + TG3_DMA_BYTE_ENAB)
163
164 #define TG3_RX_STD_MAP_SZ               TG3_RX_DMA_TO_MAP_SZ(TG3_RX_STD_DMA_SZ)
165 #define TG3_RX_JMB_MAP_SZ               TG3_RX_DMA_TO_MAP_SZ(TG3_RX_JMB_DMA_SZ)
166
167 #define TG3_RX_STD_BUFF_RING_SIZE(tp) \
168         (sizeof(struct ring_info) * TG3_RX_STD_RING_SIZE(tp))
169
170 #define TG3_RX_JMB_BUFF_RING_SIZE(tp) \
171         (sizeof(struct ring_info) * TG3_RX_JMB_RING_SIZE(tp))
172
173 /* Due to a hardware bug, the 5701 can only DMA to memory addresses
174  * that are at least dword aligned when used in PCIX mode.  The driver
175  * works around this bug by double copying the packet.  This workaround
176  * is built into the normal double copy length check for efficiency.
177  *
178  * However, the double copy is only necessary on those architectures
179  * where unaligned memory accesses are inefficient.  For those architectures
180  * where unaligned memory accesses incur little penalty, we can reintegrate
181  * the 5701 in the normal rx path.  Doing so saves a device structure
182  * dereference by hardcoding the double copy threshold in place.
183  */
184 #define TG3_RX_COPY_THRESHOLD           256
185 #if NET_IP_ALIGN == 0 || defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS)
186         #define TG3_RX_COPY_THRESH(tp)  TG3_RX_COPY_THRESHOLD
187 #else
188         #define TG3_RX_COPY_THRESH(tp)  ((tp)->rx_copy_thresh)
189 #endif
190
191 /* minimum number of free TX descriptors required to wake up TX process */
192 #define TG3_TX_WAKEUP_THRESH(tnapi)             ((tnapi)->tx_pending / 4)
193
194 #define TG3_RAW_IP_ALIGN 2
195
196 #define TG3_FW_UPDATE_TIMEOUT_SEC       5
197
198 #define FIRMWARE_TG3            "tigon/tg3.bin"
199 #define FIRMWARE_TG3TSO         "tigon/tg3_tso.bin"
200 #define FIRMWARE_TG3TSO5        "tigon/tg3_tso5.bin"
201
202 static char version[] __devinitdata =
203         DRV_MODULE_NAME ".c:v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")";
204
205 MODULE_AUTHOR("David S. Miller (davem@redhat.com) and Jeff Garzik (jgarzik@pobox.com)");
206 MODULE_DESCRIPTION("Broadcom Tigon3 ethernet driver");
207 MODULE_LICENSE("GPL");
208 MODULE_VERSION(DRV_MODULE_VERSION);
209 MODULE_FIRMWARE(FIRMWARE_TG3);
210 MODULE_FIRMWARE(FIRMWARE_TG3TSO);
211 MODULE_FIRMWARE(FIRMWARE_TG3TSO5);
212
213 static int tg3_debug = -1;      /* -1 == use TG3_DEF_MSG_ENABLE as value */
214 module_param(tg3_debug, int, 0);
215 MODULE_PARM_DESC(tg3_debug, "Tigon3 bitmapped debugging message enable value");
216
217 static DEFINE_PCI_DEVICE_TABLE(tg3_pci_tbl) = {
218         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5700)},
219         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5701)},
220         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702)},
221         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703)},
222         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704)},
223         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702FE)},
224         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705)},
225         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705_2)},
226         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705M)},
227         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705M_2)},
228         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702X)},
229         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703X)},
230         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704S)},
231         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702A3)},
232         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703A3)},
233         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5782)},
234         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5788)},
235         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5789)},
236         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5901)},
237         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5901_2)},
238         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704S_2)},
239         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705F)},
240         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5721)},
241         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5722)},
242         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751)},
243         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751M)},
244         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751F)},
245         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5752)},
246         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5752M)},
247         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753)},
248         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753M)},
249         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753F)},
250         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5754)},
251         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5754M)},
252         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5755)},
253         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5755M)},
254         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5756)},
255         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5786)},
256         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787)},
257         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787M)},
258         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787F)},
259         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5714)},
260         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5714S)},
261         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5715)},
262         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5715S)},
263         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5780)},
264         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5780S)},
265         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5781)},
266         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5906)},
267         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5906M)},
268         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5784)},
269         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5764)},
270         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5723)},
271         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5761)},
272         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5761E)},
273         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5761S)},
274         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5761SE)},
275         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5785_G)},
276         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5785_F)},
277         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57780)},
278         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57760)},
279         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57790)},
280         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57788)},
281         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5717)},
282         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5718)},
283         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57781)},
284         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57785)},
285         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57761)},
286         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57765)},
287         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57791)},
288         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57795)},
289         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5719)},
290         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5720)},
291         {PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9DXX)},
292         {PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9MXX)},
293         {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1000)},
294         {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1001)},
295         {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1003)},
296         {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC9100)},
297         {PCI_DEVICE(PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_TIGON3)},
298         {PCI_DEVICE(0x10cf, 0x11a2)}, /* Fujitsu 1000base-SX with BCM5703SKHB */
299         {}
300 };
301
302 MODULE_DEVICE_TABLE(pci, tg3_pci_tbl);
303
304 static const struct {
305         const char string[ETH_GSTRING_LEN];
306 } ethtool_stats_keys[] = {
307         { "rx_octets" },
308         { "rx_fragments" },
309         { "rx_ucast_packets" },
310         { "rx_mcast_packets" },
311         { "rx_bcast_packets" },
312         { "rx_fcs_errors" },
313         { "rx_align_errors" },
314         { "rx_xon_pause_rcvd" },
315         { "rx_xoff_pause_rcvd" },
316         { "rx_mac_ctrl_rcvd" },
317         { "rx_xoff_entered" },
318         { "rx_frame_too_long_errors" },
319         { "rx_jabbers" },
320         { "rx_undersize_packets" },
321         { "rx_in_length_errors" },
322         { "rx_out_length_errors" },
323         { "rx_64_or_less_octet_packets" },
324         { "rx_65_to_127_octet_packets" },
325         { "rx_128_to_255_octet_packets" },
326         { "rx_256_to_511_octet_packets" },
327         { "rx_512_to_1023_octet_packets" },
328         { "rx_1024_to_1522_octet_packets" },
329         { "rx_1523_to_2047_octet_packets" },
330         { "rx_2048_to_4095_octet_packets" },
331         { "rx_4096_to_8191_octet_packets" },
332         { "rx_8192_to_9022_octet_packets" },
333
334         { "tx_octets" },
335         { "tx_collisions" },
336
337         { "tx_xon_sent" },
338         { "tx_xoff_sent" },
339         { "tx_flow_control" },
340         { "tx_mac_errors" },
341         { "tx_single_collisions" },
342         { "tx_mult_collisions" },
343         { "tx_deferred" },
344         { "tx_excessive_collisions" },
345         { "tx_late_collisions" },
346         { "tx_collide_2times" },
347         { "tx_collide_3times" },
348         { "tx_collide_4times" },
349         { "tx_collide_5times" },
350         { "tx_collide_6times" },
351         { "tx_collide_7times" },
352         { "tx_collide_8times" },
353         { "tx_collide_9times" },
354         { "tx_collide_10times" },
355         { "tx_collide_11times" },
356         { "tx_collide_12times" },
357         { "tx_collide_13times" },
358         { "tx_collide_14times" },
359         { "tx_collide_15times" },
360         { "tx_ucast_packets" },
361         { "tx_mcast_packets" },
362         { "tx_bcast_packets" },
363         { "tx_carrier_sense_errors" },
364         { "tx_discards" },
365         { "tx_errors" },
366
367         { "dma_writeq_full" },
368         { "dma_write_prioq_full" },
369         { "rxbds_empty" },
370         { "rx_discards" },
371         { "rx_errors" },
372         { "rx_threshold_hit" },
373
374         { "dma_readq_full" },
375         { "dma_read_prioq_full" },
376         { "tx_comp_queue_full" },
377
378         { "ring_set_send_prod_index" },
379         { "ring_status_update" },
380         { "nic_irqs" },
381         { "nic_avoided_irqs" },
382         { "nic_tx_threshold_hit" },
383
384         { "mbuf_lwm_thresh_hit" },
385 };
386
387 #define TG3_NUM_STATS   ARRAY_SIZE(ethtool_stats_keys)
388
389
390 static const struct {
391         const char string[ETH_GSTRING_LEN];
392 } ethtool_test_keys[] = {
393         { "nvram test     (online) " },
394         { "link test      (online) " },
395         { "register test  (offline)" },
396         { "memory test    (offline)" },
397         { "loopback test  (offline)" },
398         { "interrupt test (offline)" },
399 };
400
401 #define TG3_NUM_TEST    ARRAY_SIZE(ethtool_test_keys)
402
403
404 static void tg3_write32(struct tg3 *tp, u32 off, u32 val)
405 {
406         writel(val, tp->regs + off);
407 }
408
409 static u32 tg3_read32(struct tg3 *tp, u32 off)
410 {
411         return readl(tp->regs + off);
412 }
413
414 static void tg3_ape_write32(struct tg3 *tp, u32 off, u32 val)
415 {
416         writel(val, tp->aperegs + off);
417 }
418
419 static u32 tg3_ape_read32(struct tg3 *tp, u32 off)
420 {
421         return readl(tp->aperegs + off);
422 }
423
424 static void tg3_write_indirect_reg32(struct tg3 *tp, u32 off, u32 val)
425 {
426         unsigned long flags;
427
428         spin_lock_irqsave(&tp->indirect_lock, flags);
429         pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off);
430         pci_write_config_dword(tp->pdev, TG3PCI_REG_DATA, val);
431         spin_unlock_irqrestore(&tp->indirect_lock, flags);
432 }
433
434 static void tg3_write_flush_reg32(struct tg3 *tp, u32 off, u32 val)
435 {
436         writel(val, tp->regs + off);
437         readl(tp->regs + off);
438 }
439
440 static u32 tg3_read_indirect_reg32(struct tg3 *tp, u32 off)
441 {
442         unsigned long flags;
443         u32 val;
444
445         spin_lock_irqsave(&tp->indirect_lock, flags);
446         pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off);
447         pci_read_config_dword(tp->pdev, TG3PCI_REG_DATA, &val);
448         spin_unlock_irqrestore(&tp->indirect_lock, flags);
449         return val;
450 }
451
452 static void tg3_write_indirect_mbox(struct tg3 *tp, u32 off, u32 val)
453 {
454         unsigned long flags;
455
456         if (off == (MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW)) {
457                 pci_write_config_dword(tp->pdev, TG3PCI_RCV_RET_RING_CON_IDX +
458                                        TG3_64BIT_REG_LOW, val);
459                 return;
460         }
461         if (off == TG3_RX_STD_PROD_IDX_REG) {
462                 pci_write_config_dword(tp->pdev, TG3PCI_STD_RING_PROD_IDX +
463                                        TG3_64BIT_REG_LOW, val);
464                 return;
465         }
466
467         spin_lock_irqsave(&tp->indirect_lock, flags);
468         pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off + 0x5600);
469         pci_write_config_dword(tp->pdev, TG3PCI_REG_DATA, val);
470         spin_unlock_irqrestore(&tp->indirect_lock, flags);
471
472         /* In indirect mode when disabling interrupts, we also need
473          * to clear the interrupt bit in the GRC local ctrl register.
474          */
475         if ((off == (MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW)) &&
476             (val == 0x1)) {
477                 pci_write_config_dword(tp->pdev, TG3PCI_MISC_LOCAL_CTRL,
478                                        tp->grc_local_ctrl|GRC_LCLCTRL_CLEARINT);
479         }
480 }
481
482 static u32 tg3_read_indirect_mbox(struct tg3 *tp, u32 off)
483 {
484         unsigned long flags;
485         u32 val;
486
487         spin_lock_irqsave(&tp->indirect_lock, flags);
488         pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off + 0x5600);
489         pci_read_config_dword(tp->pdev, TG3PCI_REG_DATA, &val);
490         spin_unlock_irqrestore(&tp->indirect_lock, flags);
491         return val;
492 }
493
494 /* usec_wait specifies the wait time in usec when writing to certain registers
495  * where it is unsafe to read back the register without some delay.
496  * GRC_LOCAL_CTRL is one example if the GPIOs are toggled to switch power.
497  * TG3PCI_CLOCK_CTRL is another example if the clock frequencies are changed.
498  */
499 static void _tw32_flush(struct tg3 *tp, u32 off, u32 val, u32 usec_wait)
500 {
501         if (tg3_flag(tp, PCIX_TARGET_HWBUG) || tg3_flag(tp, ICH_WORKAROUND))
502                 /* Non-posted methods */
503                 tp->write32(tp, off, val);
504         else {
505                 /* Posted method */
506                 tg3_write32(tp, off, val);
507                 if (usec_wait)
508                         udelay(usec_wait);
509                 tp->read32(tp, off);
510         }
511         /* Wait again after the read for the posted method to guarantee that
512          * the wait time is met.
513          */
514         if (usec_wait)
515                 udelay(usec_wait);
516 }
517
518 static inline void tw32_mailbox_flush(struct tg3 *tp, u32 off, u32 val)
519 {
520         tp->write32_mbox(tp, off, val);
521         if (!tg3_flag(tp, MBOX_WRITE_REORDER) && !tg3_flag(tp, ICH_WORKAROUND))
522                 tp->read32_mbox(tp, off);
523 }
524
525 static void tg3_write32_tx_mbox(struct tg3 *tp, u32 off, u32 val)
526 {
527         void __iomem *mbox = tp->regs + off;
528         writel(val, mbox);
529         if (tg3_flag(tp, TXD_MBOX_HWBUG))
530                 writel(val, mbox);
531         if (tg3_flag(tp, MBOX_WRITE_REORDER))
532                 readl(mbox);
533 }
534
535 static u32 tg3_read32_mbox_5906(struct tg3 *tp, u32 off)
536 {
537         return readl(tp->regs + off + GRCMBOX_BASE);
538 }
539
540 static void tg3_write32_mbox_5906(struct tg3 *tp, u32 off, u32 val)
541 {
542         writel(val, tp->regs + off + GRCMBOX_BASE);
543 }
544
545 #define tw32_mailbox(reg, val)          tp->write32_mbox(tp, reg, val)
546 #define tw32_mailbox_f(reg, val)        tw32_mailbox_flush(tp, (reg), (val))
547 #define tw32_rx_mbox(reg, val)          tp->write32_rx_mbox(tp, reg, val)
548 #define tw32_tx_mbox(reg, val)          tp->write32_tx_mbox(tp, reg, val)
549 #define tr32_mailbox(reg)               tp->read32_mbox(tp, reg)
550
551 #define tw32(reg, val)                  tp->write32(tp, reg, val)
552 #define tw32_f(reg, val)                _tw32_flush(tp, (reg), (val), 0)
553 #define tw32_wait_f(reg, val, us)       _tw32_flush(tp, (reg), (val), (us))
554 #define tr32(reg)                       tp->read32(tp, reg)
555
556 static void tg3_write_mem(struct tg3 *tp, u32 off, u32 val)
557 {
558         unsigned long flags;
559
560         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906 &&
561             (off >= NIC_SRAM_STATS_BLK) && (off < NIC_SRAM_TX_BUFFER_DESC))
562                 return;
563
564         spin_lock_irqsave(&tp->indirect_lock, flags);
565         if (tg3_flag(tp, SRAM_USE_CONFIG)) {
566                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off);
567                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
568
569                 /* Always leave this as zero. */
570                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
571         } else {
572                 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, off);
573                 tw32_f(TG3PCI_MEM_WIN_DATA, val);
574
575                 /* Always leave this as zero. */
576                 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, 0);
577         }
578         spin_unlock_irqrestore(&tp->indirect_lock, flags);
579 }
580
581 static void tg3_read_mem(struct tg3 *tp, u32 off, u32 *val)
582 {
583         unsigned long flags;
584
585         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906 &&
586             (off >= NIC_SRAM_STATS_BLK) && (off < NIC_SRAM_TX_BUFFER_DESC)) {
587                 *val = 0;
588                 return;
589         }
590
591         spin_lock_irqsave(&tp->indirect_lock, flags);
592         if (tg3_flag(tp, SRAM_USE_CONFIG)) {
593                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off);
594                 pci_read_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
595
596                 /* Always leave this as zero. */
597                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
598         } else {
599                 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, off);
600                 *val = tr32(TG3PCI_MEM_WIN_DATA);
601
602                 /* Always leave this as zero. */
603                 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, 0);
604         }
605         spin_unlock_irqrestore(&tp->indirect_lock, flags);
606 }
607
608 static void tg3_ape_lock_init(struct tg3 *tp)
609 {
610         int i;
611         u32 regbase, bit;
612
613         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
614                 regbase = TG3_APE_LOCK_GRANT;
615         else
616                 regbase = TG3_APE_PER_LOCK_GRANT;
617
618         /* Make sure the driver hasn't any stale locks. */
619         for (i = 0; i < 8; i++) {
620                 if (i == TG3_APE_LOCK_GPIO)
621                         continue;
622                 tg3_ape_write32(tp, regbase + 4 * i, APE_LOCK_GRANT_DRIVER);
623         }
624
625         /* Clear the correct bit of the GPIO lock too. */
626         if (!tp->pci_fn)
627                 bit = APE_LOCK_GRANT_DRIVER;
628         else
629                 bit = 1 << tp->pci_fn;
630
631         tg3_ape_write32(tp, regbase + 4 * TG3_APE_LOCK_GPIO, bit);
632 }
633
634 static int tg3_ape_lock(struct tg3 *tp, int locknum)
635 {
636         int i, off;
637         int ret = 0;
638         u32 status, req, gnt, bit;
639
640         if (!tg3_flag(tp, ENABLE_APE))
641                 return 0;
642
643         switch (locknum) {
644         case TG3_APE_LOCK_GPIO:
645                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
646                         return 0;
647         case TG3_APE_LOCK_GRC:
648         case TG3_APE_LOCK_MEM:
649                 break;
650         default:
651                 return -EINVAL;
652         }
653
654         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761) {
655                 req = TG3_APE_LOCK_REQ;
656                 gnt = TG3_APE_LOCK_GRANT;
657         } else {
658                 req = TG3_APE_PER_LOCK_REQ;
659                 gnt = TG3_APE_PER_LOCK_GRANT;
660         }
661
662         off = 4 * locknum;
663
664         if (locknum != TG3_APE_LOCK_GPIO || !tp->pci_fn)
665                 bit = APE_LOCK_REQ_DRIVER;
666         else
667                 bit = 1 << tp->pci_fn;
668
669         tg3_ape_write32(tp, req + off, bit);
670
671         /* Wait for up to 1 millisecond to acquire lock. */
672         for (i = 0; i < 100; i++) {
673                 status = tg3_ape_read32(tp, gnt + off);
674                 if (status == bit)
675                         break;
676                 udelay(10);
677         }
678
679         if (status != bit) {
680                 /* Revoke the lock request. */
681                 tg3_ape_write32(tp, gnt + off, bit);
682                 ret = -EBUSY;
683         }
684
685         return ret;
686 }
687
688 static void tg3_ape_unlock(struct tg3 *tp, int locknum)
689 {
690         u32 gnt, bit;
691
692         if (!tg3_flag(tp, ENABLE_APE))
693                 return;
694
695         switch (locknum) {
696         case TG3_APE_LOCK_GPIO:
697                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
698                         return;
699         case TG3_APE_LOCK_GRC:
700         case TG3_APE_LOCK_MEM:
701                 break;
702         default:
703                 return;
704         }
705
706         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
707                 gnt = TG3_APE_LOCK_GRANT;
708         else
709                 gnt = TG3_APE_PER_LOCK_GRANT;
710
711         if (locknum != TG3_APE_LOCK_GPIO || !tp->pci_fn)
712                 bit = APE_LOCK_GRANT_DRIVER;
713         else
714                 bit = 1 << tp->pci_fn;
715
716         tg3_ape_write32(tp, gnt + 4 * locknum, bit);
717 }
718
719 static void tg3_disable_ints(struct tg3 *tp)
720 {
721         int i;
722
723         tw32(TG3PCI_MISC_HOST_CTRL,
724              (tp->misc_host_ctrl | MISC_HOST_CTRL_MASK_PCI_INT));
725         for (i = 0; i < tp->irq_max; i++)
726                 tw32_mailbox_f(tp->napi[i].int_mbox, 0x00000001);
727 }
728
729 static void tg3_enable_ints(struct tg3 *tp)
730 {
731         int i;
732
733         tp->irq_sync = 0;
734         wmb();
735
736         tw32(TG3PCI_MISC_HOST_CTRL,
737              (tp->misc_host_ctrl & ~MISC_HOST_CTRL_MASK_PCI_INT));
738
739         tp->coal_now = tp->coalesce_mode | HOSTCC_MODE_ENABLE;
740         for (i = 0; i < tp->irq_cnt; i++) {
741                 struct tg3_napi *tnapi = &tp->napi[i];
742
743                 tw32_mailbox_f(tnapi->int_mbox, tnapi->last_tag << 24);
744                 if (tg3_flag(tp, 1SHOT_MSI))
745                         tw32_mailbox_f(tnapi->int_mbox, tnapi->last_tag << 24);
746
747                 tp->coal_now |= tnapi->coal_now;
748         }
749
750         /* Force an initial interrupt */
751         if (!tg3_flag(tp, TAGGED_STATUS) &&
752             (tp->napi[0].hw_status->status & SD_STATUS_UPDATED))
753                 tw32(GRC_LOCAL_CTRL, tp->grc_local_ctrl | GRC_LCLCTRL_SETINT);
754         else
755                 tw32(HOSTCC_MODE, tp->coal_now);
756
757         tp->coal_now &= ~(tp->napi[0].coal_now | tp->napi[1].coal_now);
758 }
759
760 static inline unsigned int tg3_has_work(struct tg3_napi *tnapi)
761 {
762         struct tg3 *tp = tnapi->tp;
763         struct tg3_hw_status *sblk = tnapi->hw_status;
764         unsigned int work_exists = 0;
765
766         /* check for phy events */
767         if (!(tg3_flag(tp, USE_LINKCHG_REG) || tg3_flag(tp, POLL_SERDES))) {
768                 if (sblk->status & SD_STATUS_LINK_CHG)
769                         work_exists = 1;
770         }
771         /* check for RX/TX work to do */
772         if (sblk->idx[0].tx_consumer != tnapi->tx_cons ||
773             *(tnapi->rx_rcb_prod_idx) != tnapi->rx_rcb_ptr)
774                 work_exists = 1;
775
776         return work_exists;
777 }
778
779 /* tg3_int_reenable
780  *  similar to tg3_enable_ints, but it accurately determines whether there
781  *  is new work pending and can return without flushing the PIO write
782  *  which reenables interrupts
783  */
784 static void tg3_int_reenable(struct tg3_napi *tnapi)
785 {
786         struct tg3 *tp = tnapi->tp;
787
788         tw32_mailbox(tnapi->int_mbox, tnapi->last_tag << 24);
789         mmiowb();
790
791         /* When doing tagged status, this work check is unnecessary.
792          * The last_tag we write above tells the chip which piece of
793          * work we've completed.
794          */
795         if (!tg3_flag(tp, TAGGED_STATUS) && tg3_has_work(tnapi))
796                 tw32(HOSTCC_MODE, tp->coalesce_mode |
797                      HOSTCC_MODE_ENABLE | tnapi->coal_now);
798 }
799
800 static void tg3_switch_clocks(struct tg3 *tp)
801 {
802         u32 clock_ctrl;
803         u32 orig_clock_ctrl;
804
805         if (tg3_flag(tp, CPMU_PRESENT) || tg3_flag(tp, 5780_CLASS))
806                 return;
807
808         clock_ctrl = tr32(TG3PCI_CLOCK_CTRL);
809
810         orig_clock_ctrl = clock_ctrl;
811         clock_ctrl &= (CLOCK_CTRL_FORCE_CLKRUN |
812                        CLOCK_CTRL_CLKRUN_OENABLE |
813                        0x1f);
814         tp->pci_clock_ctrl = clock_ctrl;
815
816         if (tg3_flag(tp, 5705_PLUS)) {
817                 if (orig_clock_ctrl & CLOCK_CTRL_625_CORE) {
818                         tw32_wait_f(TG3PCI_CLOCK_CTRL,
819                                     clock_ctrl | CLOCK_CTRL_625_CORE, 40);
820                 }
821         } else if ((orig_clock_ctrl & CLOCK_CTRL_44MHZ_CORE) != 0) {
822                 tw32_wait_f(TG3PCI_CLOCK_CTRL,
823                             clock_ctrl |
824                             (CLOCK_CTRL_44MHZ_CORE | CLOCK_CTRL_ALTCLK),
825                             40);
826                 tw32_wait_f(TG3PCI_CLOCK_CTRL,
827                             clock_ctrl | (CLOCK_CTRL_ALTCLK),
828                             40);
829         }
830         tw32_wait_f(TG3PCI_CLOCK_CTRL, clock_ctrl, 40);
831 }
832
833 #define PHY_BUSY_LOOPS  5000
834
835 static int tg3_readphy(struct tg3 *tp, int reg, u32 *val)
836 {
837         u32 frame_val;
838         unsigned int loops;
839         int ret;
840
841         if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
842                 tw32_f(MAC_MI_MODE,
843                      (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
844                 udelay(80);
845         }
846
847         *val = 0x0;
848
849         frame_val  = ((tp->phy_addr << MI_COM_PHY_ADDR_SHIFT) &
850                       MI_COM_PHY_ADDR_MASK);
851         frame_val |= ((reg << MI_COM_REG_ADDR_SHIFT) &
852                       MI_COM_REG_ADDR_MASK);
853         frame_val |= (MI_COM_CMD_READ | MI_COM_START);
854
855         tw32_f(MAC_MI_COM, frame_val);
856
857         loops = PHY_BUSY_LOOPS;
858         while (loops != 0) {
859                 udelay(10);
860                 frame_val = tr32(MAC_MI_COM);
861
862                 if ((frame_val & MI_COM_BUSY) == 0) {
863                         udelay(5);
864                         frame_val = tr32(MAC_MI_COM);
865                         break;
866                 }
867                 loops -= 1;
868         }
869
870         ret = -EBUSY;
871         if (loops != 0) {
872                 *val = frame_val & MI_COM_DATA_MASK;
873                 ret = 0;
874         }
875
876         if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
877                 tw32_f(MAC_MI_MODE, tp->mi_mode);
878                 udelay(80);
879         }
880
881         return ret;
882 }
883
884 static int tg3_writephy(struct tg3 *tp, int reg, u32 val)
885 {
886         u32 frame_val;
887         unsigned int loops;
888         int ret;
889
890         if ((tp->phy_flags & TG3_PHYFLG_IS_FET) &&
891             (reg == MII_CTRL1000 || reg == MII_TG3_AUX_CTRL))
892                 return 0;
893
894         if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
895                 tw32_f(MAC_MI_MODE,
896                      (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
897                 udelay(80);
898         }
899
900         frame_val  = ((tp->phy_addr << MI_COM_PHY_ADDR_SHIFT) &
901                       MI_COM_PHY_ADDR_MASK);
902         frame_val |= ((reg << MI_COM_REG_ADDR_SHIFT) &
903                       MI_COM_REG_ADDR_MASK);
904         frame_val |= (val & MI_COM_DATA_MASK);
905         frame_val |= (MI_COM_CMD_WRITE | MI_COM_START);
906
907         tw32_f(MAC_MI_COM, frame_val);
908
909         loops = PHY_BUSY_LOOPS;
910         while (loops != 0) {
911                 udelay(10);
912                 frame_val = tr32(MAC_MI_COM);
913                 if ((frame_val & MI_COM_BUSY) == 0) {
914                         udelay(5);
915                         frame_val = tr32(MAC_MI_COM);
916                         break;
917                 }
918                 loops -= 1;
919         }
920
921         ret = -EBUSY;
922         if (loops != 0)
923                 ret = 0;
924
925         if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
926                 tw32_f(MAC_MI_MODE, tp->mi_mode);
927                 udelay(80);
928         }
929
930         return ret;
931 }
932
933 static int tg3_phy_cl45_write(struct tg3 *tp, u32 devad, u32 addr, u32 val)
934 {
935         int err;
936
937         err = tg3_writephy(tp, MII_TG3_MMD_CTRL, devad);
938         if (err)
939                 goto done;
940
941         err = tg3_writephy(tp, MII_TG3_MMD_ADDRESS, addr);
942         if (err)
943                 goto done;
944
945         err = tg3_writephy(tp, MII_TG3_MMD_CTRL,
946                            MII_TG3_MMD_CTRL_DATA_NOINC | devad);
947         if (err)
948                 goto done;
949
950         err = tg3_writephy(tp, MII_TG3_MMD_ADDRESS, val);
951
952 done:
953         return err;
954 }
955
956 static int tg3_phy_cl45_read(struct tg3 *tp, u32 devad, u32 addr, u32 *val)
957 {
958         int err;
959
960         err = tg3_writephy(tp, MII_TG3_MMD_CTRL, devad);
961         if (err)
962                 goto done;
963
964         err = tg3_writephy(tp, MII_TG3_MMD_ADDRESS, addr);
965         if (err)
966                 goto done;
967
968         err = tg3_writephy(tp, MII_TG3_MMD_CTRL,
969                            MII_TG3_MMD_CTRL_DATA_NOINC | devad);
970         if (err)
971                 goto done;
972
973         err = tg3_readphy(tp, MII_TG3_MMD_ADDRESS, val);
974
975 done:
976         return err;
977 }
978
979 static int tg3_phydsp_read(struct tg3 *tp, u32 reg, u32 *val)
980 {
981         int err;
982
983         err = tg3_writephy(tp, MII_TG3_DSP_ADDRESS, reg);
984         if (!err)
985                 err = tg3_readphy(tp, MII_TG3_DSP_RW_PORT, val);
986
987         return err;
988 }
989
990 static int tg3_phydsp_write(struct tg3 *tp, u32 reg, u32 val)
991 {
992         int err;
993
994         err = tg3_writephy(tp, MII_TG3_DSP_ADDRESS, reg);
995         if (!err)
996                 err = tg3_writephy(tp, MII_TG3_DSP_RW_PORT, val);
997
998         return err;
999 }
1000
1001 static int tg3_phy_auxctl_read(struct tg3 *tp, int reg, u32 *val)
1002 {
1003         int err;
1004
1005         err = tg3_writephy(tp, MII_TG3_AUX_CTRL,
1006                            (reg << MII_TG3_AUXCTL_MISC_RDSEL_SHIFT) |
1007                            MII_TG3_AUXCTL_SHDWSEL_MISC);
1008         if (!err)
1009                 err = tg3_readphy(tp, MII_TG3_AUX_CTRL, val);
1010
1011         return err;
1012 }
1013
1014 static int tg3_phy_auxctl_write(struct tg3 *tp, int reg, u32 set)
1015 {
1016         if (reg == MII_TG3_AUXCTL_SHDWSEL_MISC)
1017                 set |= MII_TG3_AUXCTL_MISC_WREN;
1018
1019         return tg3_writephy(tp, MII_TG3_AUX_CTRL, set | reg);
1020 }
1021
1022 #define TG3_PHY_AUXCTL_SMDSP_ENABLE(tp) \
1023         tg3_phy_auxctl_write((tp), MII_TG3_AUXCTL_SHDWSEL_AUXCTL, \
1024                              MII_TG3_AUXCTL_ACTL_SMDSP_ENA | \
1025                              MII_TG3_AUXCTL_ACTL_TX_6DB)
1026
1027 #define TG3_PHY_AUXCTL_SMDSP_DISABLE(tp) \
1028         tg3_phy_auxctl_write((tp), MII_TG3_AUXCTL_SHDWSEL_AUXCTL, \
1029                              MII_TG3_AUXCTL_ACTL_TX_6DB);
1030
1031 static int tg3_bmcr_reset(struct tg3 *tp)
1032 {
1033         u32 phy_control;
1034         int limit, err;
1035
1036         /* OK, reset it, and poll the BMCR_RESET bit until it
1037          * clears or we time out.
1038          */
1039         phy_control = BMCR_RESET;
1040         err = tg3_writephy(tp, MII_BMCR, phy_control);
1041         if (err != 0)
1042                 return -EBUSY;
1043
1044         limit = 5000;
1045         while (limit--) {
1046                 err = tg3_readphy(tp, MII_BMCR, &phy_control);
1047                 if (err != 0)
1048                         return -EBUSY;
1049
1050                 if ((phy_control & BMCR_RESET) == 0) {
1051                         udelay(40);
1052                         break;
1053                 }
1054                 udelay(10);
1055         }
1056         if (limit < 0)
1057                 return -EBUSY;
1058
1059         return 0;
1060 }
1061
1062 static int tg3_mdio_read(struct mii_bus *bp, int mii_id, int reg)
1063 {
1064         struct tg3 *tp = bp->priv;
1065         u32 val;
1066
1067         spin_lock_bh(&tp->lock);
1068
1069         if (tg3_readphy(tp, reg, &val))
1070                 val = -EIO;
1071
1072         spin_unlock_bh(&tp->lock);
1073
1074         return val;
1075 }
1076
1077 static int tg3_mdio_write(struct mii_bus *bp, int mii_id, int reg, u16 val)
1078 {
1079         struct tg3 *tp = bp->priv;
1080         u32 ret = 0;
1081
1082         spin_lock_bh(&tp->lock);
1083
1084         if (tg3_writephy(tp, reg, val))
1085                 ret = -EIO;
1086
1087         spin_unlock_bh(&tp->lock);
1088
1089         return ret;
1090 }
1091
1092 static int tg3_mdio_reset(struct mii_bus *bp)
1093 {
1094         return 0;
1095 }
1096
1097 static void tg3_mdio_config_5785(struct tg3 *tp)
1098 {
1099         u32 val;
1100         struct phy_device *phydev;
1101
1102         phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
1103         switch (phydev->drv->phy_id & phydev->drv->phy_id_mask) {
1104         case PHY_ID_BCM50610:
1105         case PHY_ID_BCM50610M:
1106                 val = MAC_PHYCFG2_50610_LED_MODES;
1107                 break;
1108         case PHY_ID_BCMAC131:
1109                 val = MAC_PHYCFG2_AC131_LED_MODES;
1110                 break;
1111         case PHY_ID_RTL8211C:
1112                 val = MAC_PHYCFG2_RTL8211C_LED_MODES;
1113                 break;
1114         case PHY_ID_RTL8201E:
1115                 val = MAC_PHYCFG2_RTL8201E_LED_MODES;
1116                 break;
1117         default:
1118                 return;
1119         }
1120
1121         if (phydev->interface != PHY_INTERFACE_MODE_RGMII) {
1122                 tw32(MAC_PHYCFG2, val);
1123
1124                 val = tr32(MAC_PHYCFG1);
1125                 val &= ~(MAC_PHYCFG1_RGMII_INT |
1126                          MAC_PHYCFG1_RXCLK_TO_MASK | MAC_PHYCFG1_TXCLK_TO_MASK);
1127                 val |= MAC_PHYCFG1_RXCLK_TIMEOUT | MAC_PHYCFG1_TXCLK_TIMEOUT;
1128                 tw32(MAC_PHYCFG1, val);
1129
1130                 return;
1131         }
1132
1133         if (!tg3_flag(tp, RGMII_INBAND_DISABLE))
1134                 val |= MAC_PHYCFG2_EMODE_MASK_MASK |
1135                        MAC_PHYCFG2_FMODE_MASK_MASK |
1136                        MAC_PHYCFG2_GMODE_MASK_MASK |
1137                        MAC_PHYCFG2_ACT_MASK_MASK   |
1138                        MAC_PHYCFG2_QUAL_MASK_MASK |
1139                        MAC_PHYCFG2_INBAND_ENABLE;
1140
1141         tw32(MAC_PHYCFG2, val);
1142
1143         val = tr32(MAC_PHYCFG1);
1144         val &= ~(MAC_PHYCFG1_RXCLK_TO_MASK | MAC_PHYCFG1_TXCLK_TO_MASK |
1145                  MAC_PHYCFG1_RGMII_EXT_RX_DEC | MAC_PHYCFG1_RGMII_SND_STAT_EN);
1146         if (!tg3_flag(tp, RGMII_INBAND_DISABLE)) {
1147                 if (tg3_flag(tp, RGMII_EXT_IBND_RX_EN))
1148                         val |= MAC_PHYCFG1_RGMII_EXT_RX_DEC;
1149                 if (tg3_flag(tp, RGMII_EXT_IBND_TX_EN))
1150                         val |= MAC_PHYCFG1_RGMII_SND_STAT_EN;
1151         }
1152         val |= MAC_PHYCFG1_RXCLK_TIMEOUT | MAC_PHYCFG1_TXCLK_TIMEOUT |
1153                MAC_PHYCFG1_RGMII_INT | MAC_PHYCFG1_TXC_DRV;
1154         tw32(MAC_PHYCFG1, val);
1155
1156         val = tr32(MAC_EXT_RGMII_MODE);
1157         val &= ~(MAC_RGMII_MODE_RX_INT_B |
1158                  MAC_RGMII_MODE_RX_QUALITY |
1159                  MAC_RGMII_MODE_RX_ACTIVITY |
1160                  MAC_RGMII_MODE_RX_ENG_DET |
1161                  MAC_RGMII_MODE_TX_ENABLE |
1162                  MAC_RGMII_MODE_TX_LOWPWR |
1163                  MAC_RGMII_MODE_TX_RESET);
1164         if (!tg3_flag(tp, RGMII_INBAND_DISABLE)) {
1165                 if (tg3_flag(tp, RGMII_EXT_IBND_RX_EN))
1166                         val |= MAC_RGMII_MODE_RX_INT_B |
1167                                MAC_RGMII_MODE_RX_QUALITY |
1168                                MAC_RGMII_MODE_RX_ACTIVITY |
1169                                MAC_RGMII_MODE_RX_ENG_DET;
1170                 if (tg3_flag(tp, RGMII_EXT_IBND_TX_EN))
1171                         val |= MAC_RGMII_MODE_TX_ENABLE |
1172                                MAC_RGMII_MODE_TX_LOWPWR |
1173                                MAC_RGMII_MODE_TX_RESET;
1174         }
1175         tw32(MAC_EXT_RGMII_MODE, val);
1176 }
1177
1178 static void tg3_mdio_start(struct tg3 *tp)
1179 {
1180         tp->mi_mode &= ~MAC_MI_MODE_AUTO_POLL;
1181         tw32_f(MAC_MI_MODE, tp->mi_mode);
1182         udelay(80);
1183
1184         if (tg3_flag(tp, MDIOBUS_INITED) &&
1185             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
1186                 tg3_mdio_config_5785(tp);
1187 }
1188
1189 static int tg3_mdio_init(struct tg3 *tp)
1190 {
1191         int i;
1192         u32 reg;
1193         struct phy_device *phydev;
1194
1195         if (tg3_flag(tp, 5717_PLUS)) {
1196                 u32 is_serdes;
1197
1198                 tp->phy_addr = tp->pci_fn + 1;
1199
1200                 if (tp->pci_chip_rev_id != CHIPREV_ID_5717_A0)
1201                         is_serdes = tr32(SG_DIG_STATUS) & SG_DIG_IS_SERDES;
1202                 else
1203                         is_serdes = tr32(TG3_CPMU_PHY_STRAP) &
1204                                     TG3_CPMU_PHY_STRAP_IS_SERDES;
1205                 if (is_serdes)
1206                         tp->phy_addr += 7;
1207         } else
1208                 tp->phy_addr = TG3_PHY_MII_ADDR;
1209
1210         tg3_mdio_start(tp);
1211
1212         if (!tg3_flag(tp, USE_PHYLIB) || tg3_flag(tp, MDIOBUS_INITED))
1213                 return 0;
1214
1215         tp->mdio_bus = mdiobus_alloc();
1216         if (tp->mdio_bus == NULL)
1217                 return -ENOMEM;
1218
1219         tp->mdio_bus->name     = "tg3 mdio bus";
1220         snprintf(tp->mdio_bus->id, MII_BUS_ID_SIZE, "%x",
1221                  (tp->pdev->bus->number << 8) | tp->pdev->devfn);
1222         tp->mdio_bus->priv     = tp;
1223         tp->mdio_bus->parent   = &tp->pdev->dev;
1224         tp->mdio_bus->read     = &tg3_mdio_read;
1225         tp->mdio_bus->write    = &tg3_mdio_write;
1226         tp->mdio_bus->reset    = &tg3_mdio_reset;
1227         tp->mdio_bus->phy_mask = ~(1 << TG3_PHY_MII_ADDR);
1228         tp->mdio_bus->irq      = &tp->mdio_irq[0];
1229
1230         for (i = 0; i < PHY_MAX_ADDR; i++)
1231                 tp->mdio_bus->irq[i] = PHY_POLL;
1232
1233         /* The bus registration will look for all the PHYs on the mdio bus.
1234          * Unfortunately, it does not ensure the PHY is powered up before
1235          * accessing the PHY ID registers.  A chip reset is the
1236          * quickest way to bring the device back to an operational state..
1237          */
1238         if (tg3_readphy(tp, MII_BMCR, &reg) || (reg & BMCR_PDOWN))
1239                 tg3_bmcr_reset(tp);
1240
1241         i = mdiobus_register(tp->mdio_bus);
1242         if (i) {
1243                 dev_warn(&tp->pdev->dev, "mdiobus_reg failed (0x%x)\n", i);
1244                 mdiobus_free(tp->mdio_bus);
1245                 return i;
1246         }
1247
1248         phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
1249
1250         if (!phydev || !phydev->drv) {
1251                 dev_warn(&tp->pdev->dev, "No PHY devices\n");
1252                 mdiobus_unregister(tp->mdio_bus);
1253                 mdiobus_free(tp->mdio_bus);
1254                 return -ENODEV;
1255         }
1256
1257         switch (phydev->drv->phy_id & phydev->drv->phy_id_mask) {
1258         case PHY_ID_BCM57780:
1259                 phydev->interface = PHY_INTERFACE_MODE_GMII;
1260                 phydev->dev_flags |= PHY_BRCM_AUTO_PWRDWN_ENABLE;
1261                 break;
1262         case PHY_ID_BCM50610:
1263         case PHY_ID_BCM50610M:
1264                 phydev->dev_flags |= PHY_BRCM_CLEAR_RGMII_MODE |
1265                                      PHY_BRCM_RX_REFCLK_UNUSED |
1266                                      PHY_BRCM_DIS_TXCRXC_NOENRGY |
1267                                      PHY_BRCM_AUTO_PWRDWN_ENABLE;
1268                 if (tg3_flag(tp, RGMII_INBAND_DISABLE))
1269                         phydev->dev_flags |= PHY_BRCM_STD_IBND_DISABLE;
1270                 if (tg3_flag(tp, RGMII_EXT_IBND_RX_EN))
1271                         phydev->dev_flags |= PHY_BRCM_EXT_IBND_RX_ENABLE;
1272                 if (tg3_flag(tp, RGMII_EXT_IBND_TX_EN))
1273                         phydev->dev_flags |= PHY_BRCM_EXT_IBND_TX_ENABLE;
1274                 /* fallthru */
1275         case PHY_ID_RTL8211C:
1276                 phydev->interface = PHY_INTERFACE_MODE_RGMII;
1277                 break;
1278         case PHY_ID_RTL8201E:
1279         case PHY_ID_BCMAC131:
1280                 phydev->interface = PHY_INTERFACE_MODE_MII;
1281                 phydev->dev_flags |= PHY_BRCM_AUTO_PWRDWN_ENABLE;
1282                 tp->phy_flags |= TG3_PHYFLG_IS_FET;
1283                 break;
1284         }
1285
1286         tg3_flag_set(tp, MDIOBUS_INITED);
1287
1288         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
1289                 tg3_mdio_config_5785(tp);
1290
1291         return 0;
1292 }
1293
1294 static void tg3_mdio_fini(struct tg3 *tp)
1295 {
1296         if (tg3_flag(tp, MDIOBUS_INITED)) {
1297                 tg3_flag_clear(tp, MDIOBUS_INITED);
1298                 mdiobus_unregister(tp->mdio_bus);
1299                 mdiobus_free(tp->mdio_bus);
1300         }
1301 }
1302
1303 /* tp->lock is held. */
1304 static inline void tg3_generate_fw_event(struct tg3 *tp)
1305 {
1306         u32 val;
1307
1308         val = tr32(GRC_RX_CPU_EVENT);
1309         val |= GRC_RX_CPU_DRIVER_EVENT;
1310         tw32_f(GRC_RX_CPU_EVENT, val);
1311
1312         tp->last_event_jiffies = jiffies;
1313 }
1314
1315 #define TG3_FW_EVENT_TIMEOUT_USEC 2500
1316
1317 /* tp->lock is held. */
1318 static void tg3_wait_for_event_ack(struct tg3 *tp)
1319 {
1320         int i;
1321         unsigned int delay_cnt;
1322         long time_remain;
1323
1324         /* If enough time has passed, no wait is necessary. */
1325         time_remain = (long)(tp->last_event_jiffies + 1 +
1326                       usecs_to_jiffies(TG3_FW_EVENT_TIMEOUT_USEC)) -
1327                       (long)jiffies;
1328         if (time_remain < 0)
1329                 return;
1330
1331         /* Check if we can shorten the wait time. */
1332         delay_cnt = jiffies_to_usecs(time_remain);
1333         if (delay_cnt > TG3_FW_EVENT_TIMEOUT_USEC)
1334                 delay_cnt = TG3_FW_EVENT_TIMEOUT_USEC;
1335         delay_cnt = (delay_cnt >> 3) + 1;
1336
1337         for (i = 0; i < delay_cnt; i++) {
1338                 if (!(tr32(GRC_RX_CPU_EVENT) & GRC_RX_CPU_DRIVER_EVENT))
1339                         break;
1340                 udelay(8);
1341         }
1342 }
1343
1344 /* tp->lock is held. */
1345 static void tg3_ump_link_report(struct tg3 *tp)
1346 {
1347         u32 reg;
1348         u32 val;
1349
1350         if (!tg3_flag(tp, 5780_CLASS) || !tg3_flag(tp, ENABLE_ASF))
1351                 return;
1352
1353         tg3_wait_for_event_ack(tp);
1354
1355         tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX, FWCMD_NICDRV_LINK_UPDATE);
1356
1357         tg3_write_mem(tp, NIC_SRAM_FW_CMD_LEN_MBOX, 14);
1358
1359         val = 0;
1360         if (!tg3_readphy(tp, MII_BMCR, &reg))
1361                 val = reg << 16;
1362         if (!tg3_readphy(tp, MII_BMSR, &reg))
1363                 val |= (reg & 0xffff);
1364         tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX, val);
1365
1366         val = 0;
1367         if (!tg3_readphy(tp, MII_ADVERTISE, &reg))
1368                 val = reg << 16;
1369         if (!tg3_readphy(tp, MII_LPA, &reg))
1370                 val |= (reg & 0xffff);
1371         tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 4, val);
1372
1373         val = 0;
1374         if (!(tp->phy_flags & TG3_PHYFLG_MII_SERDES)) {
1375                 if (!tg3_readphy(tp, MII_CTRL1000, &reg))
1376                         val = reg << 16;
1377                 if (!tg3_readphy(tp, MII_STAT1000, &reg))
1378                         val |= (reg & 0xffff);
1379         }
1380         tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 8, val);
1381
1382         if (!tg3_readphy(tp, MII_PHYADDR, &reg))
1383                 val = reg << 16;
1384         else
1385                 val = 0;
1386         tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 12, val);
1387
1388         tg3_generate_fw_event(tp);
1389 }
1390
1391 static void tg3_link_report(struct tg3 *tp)
1392 {
1393         if (!netif_carrier_ok(tp->dev)) {
1394                 netif_info(tp, link, tp->dev, "Link is down\n");
1395                 tg3_ump_link_report(tp);
1396         } else if (netif_msg_link(tp)) {
1397                 netdev_info(tp->dev, "Link is up at %d Mbps, %s duplex\n",
1398                             (tp->link_config.active_speed == SPEED_1000 ?
1399                              1000 :
1400                              (tp->link_config.active_speed == SPEED_100 ?
1401                               100 : 10)),
1402                             (tp->link_config.active_duplex == DUPLEX_FULL ?
1403                              "full" : "half"));
1404
1405                 netdev_info(tp->dev, "Flow control is %s for TX and %s for RX\n",
1406                             (tp->link_config.active_flowctrl & FLOW_CTRL_TX) ?
1407                             "on" : "off",
1408                             (tp->link_config.active_flowctrl & FLOW_CTRL_RX) ?
1409                             "on" : "off");
1410
1411                 if (tp->phy_flags & TG3_PHYFLG_EEE_CAP)
1412                         netdev_info(tp->dev, "EEE is %s\n",
1413                                     tp->setlpicnt ? "enabled" : "disabled");
1414
1415                 tg3_ump_link_report(tp);
1416         }
1417 }
1418
1419 static u16 tg3_advert_flowctrl_1000T(u8 flow_ctrl)
1420 {
1421         u16 miireg;
1422
1423         if ((flow_ctrl & FLOW_CTRL_TX) && (flow_ctrl & FLOW_CTRL_RX))
1424                 miireg = ADVERTISE_PAUSE_CAP;
1425         else if (flow_ctrl & FLOW_CTRL_TX)
1426                 miireg = ADVERTISE_PAUSE_ASYM;
1427         else if (flow_ctrl & FLOW_CTRL_RX)
1428                 miireg = ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM;
1429         else
1430                 miireg = 0;
1431
1432         return miireg;
1433 }
1434
1435 static u16 tg3_advert_flowctrl_1000X(u8 flow_ctrl)
1436 {
1437         u16 miireg;
1438
1439         if ((flow_ctrl & FLOW_CTRL_TX) && (flow_ctrl & FLOW_CTRL_RX))
1440                 miireg = ADVERTISE_1000XPAUSE;
1441         else if (flow_ctrl & FLOW_CTRL_TX)
1442                 miireg = ADVERTISE_1000XPSE_ASYM;
1443         else if (flow_ctrl & FLOW_CTRL_RX)
1444                 miireg = ADVERTISE_1000XPAUSE | ADVERTISE_1000XPSE_ASYM;
1445         else
1446                 miireg = 0;
1447
1448         return miireg;
1449 }
1450
1451 static u8 tg3_resolve_flowctrl_1000X(u16 lcladv, u16 rmtadv)
1452 {
1453         u8 cap = 0;
1454
1455         if (lcladv & ADVERTISE_1000XPAUSE) {
1456                 if (lcladv & ADVERTISE_1000XPSE_ASYM) {
1457                         if (rmtadv & LPA_1000XPAUSE)
1458                                 cap = FLOW_CTRL_TX | FLOW_CTRL_RX;
1459                         else if (rmtadv & LPA_1000XPAUSE_ASYM)
1460                                 cap = FLOW_CTRL_RX;
1461                 } else {
1462                         if (rmtadv & LPA_1000XPAUSE)
1463                                 cap = FLOW_CTRL_TX | FLOW_CTRL_RX;
1464                 }
1465         } else if (lcladv & ADVERTISE_1000XPSE_ASYM) {
1466                 if ((rmtadv & LPA_1000XPAUSE) && (rmtadv & LPA_1000XPAUSE_ASYM))
1467                         cap = FLOW_CTRL_TX;
1468         }
1469
1470         return cap;
1471 }
1472
1473 static void tg3_setup_flow_control(struct tg3 *tp, u32 lcladv, u32 rmtadv)
1474 {
1475         u8 autoneg;
1476         u8 flowctrl = 0;
1477         u32 old_rx_mode = tp->rx_mode;
1478         u32 old_tx_mode = tp->tx_mode;
1479
1480         if (tg3_flag(tp, USE_PHYLIB))
1481                 autoneg = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]->autoneg;
1482         else
1483                 autoneg = tp->link_config.autoneg;
1484
1485         if (autoneg == AUTONEG_ENABLE && tg3_flag(tp, PAUSE_AUTONEG)) {
1486                 if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)
1487                         flowctrl = tg3_resolve_flowctrl_1000X(lcladv, rmtadv);
1488                 else
1489                         flowctrl = mii_resolve_flowctrl_fdx(lcladv, rmtadv);
1490         } else
1491                 flowctrl = tp->link_config.flowctrl;
1492
1493         tp->link_config.active_flowctrl = flowctrl;
1494
1495         if (flowctrl & FLOW_CTRL_RX)
1496                 tp->rx_mode |= RX_MODE_FLOW_CTRL_ENABLE;
1497         else
1498                 tp->rx_mode &= ~RX_MODE_FLOW_CTRL_ENABLE;
1499
1500         if (old_rx_mode != tp->rx_mode)
1501                 tw32_f(MAC_RX_MODE, tp->rx_mode);
1502
1503         if (flowctrl & FLOW_CTRL_TX)
1504                 tp->tx_mode |= TX_MODE_FLOW_CTRL_ENABLE;
1505         else
1506                 tp->tx_mode &= ~TX_MODE_FLOW_CTRL_ENABLE;
1507
1508         if (old_tx_mode != tp->tx_mode)
1509                 tw32_f(MAC_TX_MODE, tp->tx_mode);
1510 }
1511
1512 static void tg3_adjust_link(struct net_device *dev)
1513 {
1514         u8 oldflowctrl, linkmesg = 0;
1515         u32 mac_mode, lcl_adv, rmt_adv;
1516         struct tg3 *tp = netdev_priv(dev);
1517         struct phy_device *phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
1518
1519         spin_lock_bh(&tp->lock);
1520
1521         mac_mode = tp->mac_mode & ~(MAC_MODE_PORT_MODE_MASK |
1522                                     MAC_MODE_HALF_DUPLEX);
1523
1524         oldflowctrl = tp->link_config.active_flowctrl;
1525
1526         if (phydev->link) {
1527                 lcl_adv = 0;
1528                 rmt_adv = 0;
1529
1530                 if (phydev->speed == SPEED_100 || phydev->speed == SPEED_10)
1531                         mac_mode |= MAC_MODE_PORT_MODE_MII;
1532                 else if (phydev->speed == SPEED_1000 ||
1533                          GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785)
1534                         mac_mode |= MAC_MODE_PORT_MODE_GMII;
1535                 else
1536                         mac_mode |= MAC_MODE_PORT_MODE_MII;
1537
1538                 if (phydev->duplex == DUPLEX_HALF)
1539                         mac_mode |= MAC_MODE_HALF_DUPLEX;
1540                 else {
1541                         lcl_adv = tg3_advert_flowctrl_1000T(
1542                                   tp->link_config.flowctrl);
1543
1544                         if (phydev->pause)
1545                                 rmt_adv = LPA_PAUSE_CAP;
1546                         if (phydev->asym_pause)
1547                                 rmt_adv |= LPA_PAUSE_ASYM;
1548                 }
1549
1550                 tg3_setup_flow_control(tp, lcl_adv, rmt_adv);
1551         } else
1552                 mac_mode |= MAC_MODE_PORT_MODE_GMII;
1553
1554         if (mac_mode != tp->mac_mode) {
1555                 tp->mac_mode = mac_mode;
1556                 tw32_f(MAC_MODE, tp->mac_mode);
1557                 udelay(40);
1558         }
1559
1560         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785) {
1561                 if (phydev->speed == SPEED_10)
1562                         tw32(MAC_MI_STAT,
1563                              MAC_MI_STAT_10MBPS_MODE |
1564                              MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
1565                 else
1566                         tw32(MAC_MI_STAT, MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
1567         }
1568
1569         if (phydev->speed == SPEED_1000 && phydev->duplex == DUPLEX_HALF)
1570                 tw32(MAC_TX_LENGTHS,
1571                      ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
1572                       (6 << TX_LENGTHS_IPG_SHIFT) |
1573                       (0xff << TX_LENGTHS_SLOT_TIME_SHIFT)));
1574         else
1575                 tw32(MAC_TX_LENGTHS,
1576                      ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
1577                       (6 << TX_LENGTHS_IPG_SHIFT) |
1578                       (32 << TX_LENGTHS_SLOT_TIME_SHIFT)));
1579
1580         if ((phydev->link && tp->link_config.active_speed == SPEED_INVALID) ||
1581             (!phydev->link && tp->link_config.active_speed != SPEED_INVALID) ||
1582             phydev->speed != tp->link_config.active_speed ||
1583             phydev->duplex != tp->link_config.active_duplex ||
1584             oldflowctrl != tp->link_config.active_flowctrl)
1585                 linkmesg = 1;
1586
1587         tp->link_config.active_speed = phydev->speed;
1588         tp->link_config.active_duplex = phydev->duplex;
1589
1590         spin_unlock_bh(&tp->lock);
1591
1592         if (linkmesg)
1593                 tg3_link_report(tp);
1594 }
1595
1596 static int tg3_phy_init(struct tg3 *tp)
1597 {
1598         struct phy_device *phydev;
1599
1600         if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED)
1601                 return 0;
1602
1603         /* Bring the PHY back to a known state. */
1604         tg3_bmcr_reset(tp);
1605
1606         phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
1607
1608         /* Attach the MAC to the PHY. */
1609         phydev = phy_connect(tp->dev, dev_name(&phydev->dev), tg3_adjust_link,
1610                              phydev->dev_flags, phydev->interface);
1611         if (IS_ERR(phydev)) {
1612                 dev_err(&tp->pdev->dev, "Could not attach to PHY\n");
1613                 return PTR_ERR(phydev);
1614         }
1615
1616         /* Mask with MAC supported features. */
1617         switch (phydev->interface) {
1618         case PHY_INTERFACE_MODE_GMII:
1619         case PHY_INTERFACE_MODE_RGMII:
1620                 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
1621                         phydev->supported &= (PHY_GBIT_FEATURES |
1622                                               SUPPORTED_Pause |
1623                                               SUPPORTED_Asym_Pause);
1624                         break;
1625                 }
1626                 /* fallthru */
1627         case PHY_INTERFACE_MODE_MII:
1628                 phydev->supported &= (PHY_BASIC_FEATURES |
1629                                       SUPPORTED_Pause |
1630                                       SUPPORTED_Asym_Pause);
1631                 break;
1632         default:
1633                 phy_disconnect(tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]);
1634                 return -EINVAL;
1635         }
1636
1637         tp->phy_flags |= TG3_PHYFLG_IS_CONNECTED;
1638
1639         phydev->advertising = phydev->supported;
1640
1641         return 0;
1642 }
1643
1644 static void tg3_phy_start(struct tg3 *tp)
1645 {
1646         struct phy_device *phydev;
1647
1648         if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
1649                 return;
1650
1651         phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
1652
1653         if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) {
1654                 tp->phy_flags &= ~TG3_PHYFLG_IS_LOW_POWER;
1655                 phydev->speed = tp->link_config.orig_speed;
1656                 phydev->duplex = tp->link_config.orig_duplex;
1657                 phydev->autoneg = tp->link_config.orig_autoneg;
1658                 phydev->advertising = tp->link_config.orig_advertising;
1659         }
1660
1661         phy_start(phydev);
1662
1663         phy_start_aneg(phydev);
1664 }
1665
1666 static void tg3_phy_stop(struct tg3 *tp)
1667 {
1668         if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
1669                 return;
1670
1671         phy_stop(tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]);
1672 }
1673
1674 static void tg3_phy_fini(struct tg3 *tp)
1675 {
1676         if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) {
1677                 phy_disconnect(tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]);
1678                 tp->phy_flags &= ~TG3_PHYFLG_IS_CONNECTED;
1679         }
1680 }
1681
1682 static void tg3_phy_fet_toggle_apd(struct tg3 *tp, bool enable)
1683 {
1684         u32 phytest;
1685
1686         if (!tg3_readphy(tp, MII_TG3_FET_TEST, &phytest)) {
1687                 u32 phy;
1688
1689                 tg3_writephy(tp, MII_TG3_FET_TEST,
1690                              phytest | MII_TG3_FET_SHADOW_EN);
1691                 if (!tg3_readphy(tp, MII_TG3_FET_SHDW_AUXSTAT2, &phy)) {
1692                         if (enable)
1693                                 phy |= MII_TG3_FET_SHDW_AUXSTAT2_APD;
1694                         else
1695                                 phy &= ~MII_TG3_FET_SHDW_AUXSTAT2_APD;
1696                         tg3_writephy(tp, MII_TG3_FET_SHDW_AUXSTAT2, phy);
1697                 }
1698                 tg3_writephy(tp, MII_TG3_FET_TEST, phytest);
1699         }
1700 }
1701
1702 static void tg3_phy_toggle_apd(struct tg3 *tp, bool enable)
1703 {
1704         u32 reg;
1705
1706         if (!tg3_flag(tp, 5705_PLUS) ||
1707             (tg3_flag(tp, 5717_PLUS) &&
1708              (tp->phy_flags & TG3_PHYFLG_MII_SERDES)))
1709                 return;
1710
1711         if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
1712                 tg3_phy_fet_toggle_apd(tp, enable);
1713                 return;
1714         }
1715
1716         reg = MII_TG3_MISC_SHDW_WREN |
1717               MII_TG3_MISC_SHDW_SCR5_SEL |
1718               MII_TG3_MISC_SHDW_SCR5_LPED |
1719               MII_TG3_MISC_SHDW_SCR5_DLPTLM |
1720               MII_TG3_MISC_SHDW_SCR5_SDTL |
1721               MII_TG3_MISC_SHDW_SCR5_C125OE;
1722         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5784 || !enable)
1723                 reg |= MII_TG3_MISC_SHDW_SCR5_DLLAPD;
1724
1725         tg3_writephy(tp, MII_TG3_MISC_SHDW, reg);
1726
1727
1728         reg = MII_TG3_MISC_SHDW_WREN |
1729               MII_TG3_MISC_SHDW_APD_SEL |
1730               MII_TG3_MISC_SHDW_APD_WKTM_84MS;
1731         if (enable)
1732                 reg |= MII_TG3_MISC_SHDW_APD_ENABLE;
1733
1734         tg3_writephy(tp, MII_TG3_MISC_SHDW, reg);
1735 }
1736
1737 static void tg3_phy_toggle_automdix(struct tg3 *tp, int enable)
1738 {
1739         u32 phy;
1740
1741         if (!tg3_flag(tp, 5705_PLUS) ||
1742             (tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
1743                 return;
1744
1745         if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
1746                 u32 ephy;
1747
1748                 if (!tg3_readphy(tp, MII_TG3_FET_TEST, &ephy)) {
1749                         u32 reg = MII_TG3_FET_SHDW_MISCCTRL;
1750
1751                         tg3_writephy(tp, MII_TG3_FET_TEST,
1752                                      ephy | MII_TG3_FET_SHADOW_EN);
1753                         if (!tg3_readphy(tp, reg, &phy)) {
1754                                 if (enable)
1755                                         phy |= MII_TG3_FET_SHDW_MISCCTRL_MDIX;
1756                                 else
1757                                         phy &= ~MII_TG3_FET_SHDW_MISCCTRL_MDIX;
1758                                 tg3_writephy(tp, reg, phy);
1759                         }
1760                         tg3_writephy(tp, MII_TG3_FET_TEST, ephy);
1761                 }
1762         } else {
1763                 int ret;
1764
1765                 ret = tg3_phy_auxctl_read(tp,
1766                                           MII_TG3_AUXCTL_SHDWSEL_MISC, &phy);
1767                 if (!ret) {
1768                         if (enable)
1769                                 phy |= MII_TG3_AUXCTL_MISC_FORCE_AMDIX;
1770                         else
1771                                 phy &= ~MII_TG3_AUXCTL_MISC_FORCE_AMDIX;
1772                         tg3_phy_auxctl_write(tp,
1773                                              MII_TG3_AUXCTL_SHDWSEL_MISC, phy);
1774                 }
1775         }
1776 }
1777
1778 static void tg3_phy_set_wirespeed(struct tg3 *tp)
1779 {
1780         int ret;
1781         u32 val;
1782
1783         if (tp->phy_flags & TG3_PHYFLG_NO_ETH_WIRE_SPEED)
1784                 return;
1785
1786         ret = tg3_phy_auxctl_read(tp, MII_TG3_AUXCTL_SHDWSEL_MISC, &val);
1787         if (!ret)
1788                 tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_MISC,
1789                                      val | MII_TG3_AUXCTL_MISC_WIRESPD_EN);
1790 }
1791
1792 static void tg3_phy_apply_otp(struct tg3 *tp)
1793 {
1794         u32 otp, phy;
1795
1796         if (!tp->phy_otp)
1797                 return;
1798
1799         otp = tp->phy_otp;
1800
1801         if (TG3_PHY_AUXCTL_SMDSP_ENABLE(tp))
1802                 return;
1803
1804         phy = ((otp & TG3_OTP_AGCTGT_MASK) >> TG3_OTP_AGCTGT_SHIFT);
1805         phy |= MII_TG3_DSP_TAP1_AGCTGT_DFLT;
1806         tg3_phydsp_write(tp, MII_TG3_DSP_TAP1, phy);
1807
1808         phy = ((otp & TG3_OTP_HPFFLTR_MASK) >> TG3_OTP_HPFFLTR_SHIFT) |
1809               ((otp & TG3_OTP_HPFOVER_MASK) >> TG3_OTP_HPFOVER_SHIFT);
1810         tg3_phydsp_write(tp, MII_TG3_DSP_AADJ1CH0, phy);
1811
1812         phy = ((otp & TG3_OTP_LPFDIS_MASK) >> TG3_OTP_LPFDIS_SHIFT);
1813         phy |= MII_TG3_DSP_AADJ1CH3_ADCCKADJ;
1814         tg3_phydsp_write(tp, MII_TG3_DSP_AADJ1CH3, phy);
1815
1816         phy = ((otp & TG3_OTP_VDAC_MASK) >> TG3_OTP_VDAC_SHIFT);
1817         tg3_phydsp_write(tp, MII_TG3_DSP_EXP75, phy);
1818
1819         phy = ((otp & TG3_OTP_10BTAMP_MASK) >> TG3_OTP_10BTAMP_SHIFT);
1820         tg3_phydsp_write(tp, MII_TG3_DSP_EXP96, phy);
1821
1822         phy = ((otp & TG3_OTP_ROFF_MASK) >> TG3_OTP_ROFF_SHIFT) |
1823               ((otp & TG3_OTP_RCOFF_MASK) >> TG3_OTP_RCOFF_SHIFT);
1824         tg3_phydsp_write(tp, MII_TG3_DSP_EXP97, phy);
1825
1826         TG3_PHY_AUXCTL_SMDSP_DISABLE(tp);
1827 }
1828
1829 static void tg3_phy_eee_adjust(struct tg3 *tp, u32 current_link_up)
1830 {
1831         u32 val;
1832
1833         if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP))
1834                 return;
1835
1836         tp->setlpicnt = 0;
1837
1838         if (tp->link_config.autoneg == AUTONEG_ENABLE &&
1839             current_link_up == 1 &&
1840             tp->link_config.active_duplex == DUPLEX_FULL &&
1841             (tp->link_config.active_speed == SPEED_100 ||
1842              tp->link_config.active_speed == SPEED_1000)) {
1843                 u32 eeectl;
1844
1845                 if (tp->link_config.active_speed == SPEED_1000)
1846                         eeectl = TG3_CPMU_EEE_CTRL_EXIT_16_5_US;
1847                 else
1848                         eeectl = TG3_CPMU_EEE_CTRL_EXIT_36_US;
1849
1850                 tw32(TG3_CPMU_EEE_CTRL, eeectl);
1851
1852                 tg3_phy_cl45_read(tp, MDIO_MMD_AN,
1853                                   TG3_CL45_D7_EEERES_STAT, &val);
1854
1855                 if (val == TG3_CL45_D7_EEERES_STAT_LP_1000T ||
1856                     val == TG3_CL45_D7_EEERES_STAT_LP_100TX)
1857                         tp->setlpicnt = 2;
1858         }
1859
1860         if (!tp->setlpicnt) {
1861                 if (current_link_up == 1 &&
1862                    !TG3_PHY_AUXCTL_SMDSP_ENABLE(tp)) {
1863                         tg3_phydsp_write(tp, MII_TG3_DSP_TAP26, 0x0000);
1864                         TG3_PHY_AUXCTL_SMDSP_DISABLE(tp);
1865                 }
1866
1867                 val = tr32(TG3_CPMU_EEE_MODE);
1868                 tw32(TG3_CPMU_EEE_MODE, val & ~TG3_CPMU_EEEMD_LPI_ENABLE);
1869         }
1870 }
1871
1872 static void tg3_phy_eee_enable(struct tg3 *tp)
1873 {
1874         u32 val;
1875
1876         if (tp->link_config.active_speed == SPEED_1000 &&
1877             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
1878              GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
1879              GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765) &&
1880             !TG3_PHY_AUXCTL_SMDSP_ENABLE(tp)) {
1881                 val = MII_TG3_DSP_TAP26_ALNOKO |
1882                       MII_TG3_DSP_TAP26_RMRXSTO;
1883                 tg3_phydsp_write(tp, MII_TG3_DSP_TAP26, val);
1884                 TG3_PHY_AUXCTL_SMDSP_DISABLE(tp);
1885         }
1886
1887         val = tr32(TG3_CPMU_EEE_MODE);
1888         tw32(TG3_CPMU_EEE_MODE, val | TG3_CPMU_EEEMD_LPI_ENABLE);
1889 }
1890
1891 static int tg3_wait_macro_done(struct tg3 *tp)
1892 {
1893         int limit = 100;
1894
1895         while (limit--) {
1896                 u32 tmp32;
1897
1898                 if (!tg3_readphy(tp, MII_TG3_DSP_CONTROL, &tmp32)) {
1899                         if ((tmp32 & 0x1000) == 0)
1900                                 break;
1901                 }
1902         }
1903         if (limit < 0)
1904                 return -EBUSY;
1905
1906         return 0;
1907 }
1908
1909 static int tg3_phy_write_and_check_testpat(struct tg3 *tp, int *resetp)
1910 {
1911         static const u32 test_pat[4][6] = {
1912         { 0x00005555, 0x00000005, 0x00002aaa, 0x0000000a, 0x00003456, 0x00000003 },
1913         { 0x00002aaa, 0x0000000a, 0x00003333, 0x00000003, 0x0000789a, 0x00000005 },
1914         { 0x00005a5a, 0x00000005, 0x00002a6a, 0x0000000a, 0x00001bcd, 0x00000003 },
1915         { 0x00002a5a, 0x0000000a, 0x000033c3, 0x00000003, 0x00002ef1, 0x00000005 }
1916         };
1917         int chan;
1918
1919         for (chan = 0; chan < 4; chan++) {
1920                 int i;
1921
1922                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
1923                              (chan * 0x2000) | 0x0200);
1924                 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0002);
1925
1926                 for (i = 0; i < 6; i++)
1927                         tg3_writephy(tp, MII_TG3_DSP_RW_PORT,
1928                                      test_pat[chan][i]);
1929
1930                 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0202);
1931                 if (tg3_wait_macro_done(tp)) {
1932                         *resetp = 1;
1933                         return -EBUSY;
1934                 }
1935
1936                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
1937                              (chan * 0x2000) | 0x0200);
1938                 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0082);
1939                 if (tg3_wait_macro_done(tp)) {
1940                         *resetp = 1;
1941                         return -EBUSY;
1942                 }
1943
1944                 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0802);
1945                 if (tg3_wait_macro_done(tp)) {
1946                         *resetp = 1;
1947                         return -EBUSY;
1948                 }
1949
1950                 for (i = 0; i < 6; i += 2) {
1951                         u32 low, high;
1952
1953                         if (tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &low) ||
1954                             tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &high) ||
1955                             tg3_wait_macro_done(tp)) {
1956                                 *resetp = 1;
1957                                 return -EBUSY;
1958                         }
1959                         low &= 0x7fff;
1960                         high &= 0x000f;
1961                         if (low != test_pat[chan][i] ||
1962                             high != test_pat[chan][i+1]) {
1963                                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000b);
1964                                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x4001);
1965                                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x4005);
1966
1967                                 return -EBUSY;
1968                         }
1969                 }
1970         }
1971
1972         return 0;
1973 }
1974
1975 static int tg3_phy_reset_chanpat(struct tg3 *tp)
1976 {
1977         int chan;
1978
1979         for (chan = 0; chan < 4; chan++) {
1980                 int i;
1981
1982                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
1983                              (chan * 0x2000) | 0x0200);
1984                 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0002);
1985                 for (i = 0; i < 6; i++)
1986                         tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x000);
1987                 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0202);
1988                 if (tg3_wait_macro_done(tp))
1989                         return -EBUSY;
1990         }
1991
1992         return 0;
1993 }
1994
1995 static int tg3_phy_reset_5703_4_5(struct tg3 *tp)
1996 {
1997         u32 reg32, phy9_orig;
1998         int retries, do_phy_reset, err;
1999
2000         retries = 10;
2001         do_phy_reset = 1;
2002         do {
2003                 if (do_phy_reset) {
2004                         err = tg3_bmcr_reset(tp);
2005                         if (err)
2006                                 return err;
2007                         do_phy_reset = 0;
2008                 }
2009
2010                 /* Disable transmitter and interrupt.  */
2011                 if (tg3_readphy(tp, MII_TG3_EXT_CTRL, &reg32))
2012                         continue;
2013
2014                 reg32 |= 0x3000;
2015                 tg3_writephy(tp, MII_TG3_EXT_CTRL, reg32);
2016
2017                 /* Set full-duplex, 1000 mbps.  */
2018                 tg3_writephy(tp, MII_BMCR,
2019                              BMCR_FULLDPLX | BMCR_SPEED1000);
2020
2021                 /* Set to master mode.  */
2022                 if (tg3_readphy(tp, MII_CTRL1000, &phy9_orig))
2023                         continue;
2024
2025                 tg3_writephy(tp, MII_CTRL1000,
2026                              CTL1000_AS_MASTER | CTL1000_ENABLE_MASTER);
2027
2028                 err = TG3_PHY_AUXCTL_SMDSP_ENABLE(tp);
2029                 if (err)
2030                         return err;
2031
2032                 /* Block the PHY control access.  */
2033                 tg3_phydsp_write(tp, 0x8005, 0x0800);
2034
2035                 err = tg3_phy_write_and_check_testpat(tp, &do_phy_reset);
2036                 if (!err)
2037                         break;
2038         } while (--retries);
2039
2040         err = tg3_phy_reset_chanpat(tp);
2041         if (err)
2042                 return err;
2043
2044         tg3_phydsp_write(tp, 0x8005, 0x0000);
2045
2046         tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8200);
2047         tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0000);
2048
2049         TG3_PHY_AUXCTL_SMDSP_DISABLE(tp);
2050
2051         tg3_writephy(tp, MII_CTRL1000, phy9_orig);
2052
2053         if (!tg3_readphy(tp, MII_TG3_EXT_CTRL, &reg32)) {
2054                 reg32 &= ~0x3000;
2055                 tg3_writephy(tp, MII_TG3_EXT_CTRL, reg32);
2056         } else if (!err)
2057                 err = -EBUSY;
2058
2059         return err;
2060 }
2061
2062 /* This will reset the tigon3 PHY if there is no valid
2063  * link unless the FORCE argument is non-zero.
2064  */
2065 static int tg3_phy_reset(struct tg3 *tp)
2066 {
2067         u32 val, cpmuctrl;
2068         int err;
2069
2070         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
2071                 val = tr32(GRC_MISC_CFG);
2072                 tw32_f(GRC_MISC_CFG, val & ~GRC_MISC_CFG_EPHY_IDDQ);
2073                 udelay(40);
2074         }
2075         err  = tg3_readphy(tp, MII_BMSR, &val);
2076         err |= tg3_readphy(tp, MII_BMSR, &val);
2077         if (err != 0)
2078                 return -EBUSY;
2079
2080         if (netif_running(tp->dev) && netif_carrier_ok(tp->dev)) {
2081                 netif_carrier_off(tp->dev);
2082                 tg3_link_report(tp);
2083         }
2084
2085         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
2086             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
2087             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
2088                 err = tg3_phy_reset_5703_4_5(tp);
2089                 if (err)
2090                         return err;
2091                 goto out;
2092         }
2093
2094         cpmuctrl = 0;
2095         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 &&
2096             GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5784_AX) {
2097                 cpmuctrl = tr32(TG3_CPMU_CTRL);
2098                 if (cpmuctrl & CPMU_CTRL_GPHY_10MB_RXONLY)
2099                         tw32(TG3_CPMU_CTRL,
2100                              cpmuctrl & ~CPMU_CTRL_GPHY_10MB_RXONLY);
2101         }
2102
2103         err = tg3_bmcr_reset(tp);
2104         if (err)
2105                 return err;
2106
2107         if (cpmuctrl & CPMU_CTRL_GPHY_10MB_RXONLY) {
2108                 val = MII_TG3_DSP_EXP8_AEDW | MII_TG3_DSP_EXP8_REJ2MHz;
2109                 tg3_phydsp_write(tp, MII_TG3_DSP_EXP8, val);
2110
2111                 tw32(TG3_CPMU_CTRL, cpmuctrl);
2112         }
2113
2114         if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5784_AX ||
2115             GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5761_AX) {
2116                 val = tr32(TG3_CPMU_LSPD_1000MB_CLK);
2117                 if ((val & CPMU_LSPD_1000MB_MACCLK_MASK) ==
2118                     CPMU_LSPD_1000MB_MACCLK_12_5) {
2119                         val &= ~CPMU_LSPD_1000MB_MACCLK_MASK;
2120                         udelay(40);
2121                         tw32_f(TG3_CPMU_LSPD_1000MB_CLK, val);
2122                 }
2123         }
2124
2125         if (tg3_flag(tp, 5717_PLUS) &&
2126             (tp->phy_flags & TG3_PHYFLG_MII_SERDES))
2127                 return 0;
2128
2129         tg3_phy_apply_otp(tp);
2130
2131         if (tp->phy_flags & TG3_PHYFLG_ENABLE_APD)
2132                 tg3_phy_toggle_apd(tp, true);
2133         else
2134                 tg3_phy_toggle_apd(tp, false);
2135
2136 out:
2137         if ((tp->phy_flags & TG3_PHYFLG_ADC_BUG) &&
2138             !TG3_PHY_AUXCTL_SMDSP_ENABLE(tp)) {
2139                 tg3_phydsp_write(tp, 0x201f, 0x2aaa);
2140                 tg3_phydsp_write(tp, 0x000a, 0x0323);
2141                 TG3_PHY_AUXCTL_SMDSP_DISABLE(tp);
2142         }
2143
2144         if (tp->phy_flags & TG3_PHYFLG_5704_A0_BUG) {
2145                 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8d68);
2146                 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8d68);
2147         }
2148
2149         if (tp->phy_flags & TG3_PHYFLG_BER_BUG) {
2150                 if (!TG3_PHY_AUXCTL_SMDSP_ENABLE(tp)) {
2151                         tg3_phydsp_write(tp, 0x000a, 0x310b);
2152                         tg3_phydsp_write(tp, 0x201f, 0x9506);
2153                         tg3_phydsp_write(tp, 0x401f, 0x14e2);
2154                         TG3_PHY_AUXCTL_SMDSP_DISABLE(tp);
2155                 }
2156         } else if (tp->phy_flags & TG3_PHYFLG_JITTER_BUG) {
2157                 if (!TG3_PHY_AUXCTL_SMDSP_ENABLE(tp)) {
2158                         tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000a);
2159                         if (tp->phy_flags & TG3_PHYFLG_ADJUST_TRIM) {
2160                                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x110b);
2161                                 tg3_writephy(tp, MII_TG3_TEST1,
2162                                              MII_TG3_TEST1_TRIM_EN | 0x4);
2163                         } else
2164                                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x010b);
2165
2166                         TG3_PHY_AUXCTL_SMDSP_DISABLE(tp);
2167                 }
2168         }
2169
2170         /* Set Extended packet length bit (bit 14) on all chips that */
2171         /* support jumbo frames */
2172         if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) {
2173                 /* Cannot do read-modify-write on 5401 */
2174                 tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_AUXCTL, 0x4c20);
2175         } else if (tg3_flag(tp, JUMBO_CAPABLE)) {
2176                 /* Set bit 14 with read-modify-write to preserve other bits */
2177                 err = tg3_phy_auxctl_read(tp,
2178                                           MII_TG3_AUXCTL_SHDWSEL_AUXCTL, &val);
2179                 if (!err)
2180                         tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_AUXCTL,
2181                                            val | MII_TG3_AUXCTL_ACTL_EXTPKTLEN);
2182         }
2183
2184         /* Set phy register 0x10 bit 0 to high fifo elasticity to support
2185          * jumbo frames transmission.
2186          */
2187         if (tg3_flag(tp, JUMBO_CAPABLE)) {
2188                 if (!tg3_readphy(tp, MII_TG3_EXT_CTRL, &val))
2189                         tg3_writephy(tp, MII_TG3_EXT_CTRL,
2190                                      val | MII_TG3_EXT_CTRL_FIFO_ELASTIC);
2191         }
2192
2193         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
2194                 /* adjust output voltage */
2195                 tg3_writephy(tp, MII_TG3_FET_PTEST, 0x12);
2196         }
2197
2198         tg3_phy_toggle_automdix(tp, 1);
2199         tg3_phy_set_wirespeed(tp);
2200         return 0;
2201 }
2202
2203 #define TG3_GPIO_MSG_DRVR_PRES           0x00000001
2204 #define TG3_GPIO_MSG_NEED_VAUX           0x00000002
2205 #define TG3_GPIO_MSG_MASK                (TG3_GPIO_MSG_DRVR_PRES | \
2206                                           TG3_GPIO_MSG_NEED_VAUX)
2207 #define TG3_GPIO_MSG_ALL_DRVR_PRES_MASK \
2208         ((TG3_GPIO_MSG_DRVR_PRES << 0) | \
2209          (TG3_GPIO_MSG_DRVR_PRES << 4) | \
2210          (TG3_GPIO_MSG_DRVR_PRES << 8) | \
2211          (TG3_GPIO_MSG_DRVR_PRES << 12))
2212
2213 #define TG3_GPIO_MSG_ALL_NEED_VAUX_MASK \
2214         ((TG3_GPIO_MSG_NEED_VAUX << 0) | \
2215          (TG3_GPIO_MSG_NEED_VAUX << 4) | \
2216          (TG3_GPIO_MSG_NEED_VAUX << 8) | \
2217          (TG3_GPIO_MSG_NEED_VAUX << 12))
2218
2219 static inline u32 tg3_set_function_status(struct tg3 *tp, u32 newstat)
2220 {
2221         u32 status, shift;
2222
2223         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
2224             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719)
2225                 status = tg3_ape_read32(tp, TG3_APE_GPIO_MSG);
2226         else
2227                 status = tr32(TG3_CPMU_DRV_STATUS);
2228
2229         shift = TG3_APE_GPIO_MSG_SHIFT + 4 * tp->pci_fn;
2230         status &= ~(TG3_GPIO_MSG_MASK << shift);
2231         status |= (newstat << shift);
2232
2233         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
2234             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719)
2235                 tg3_ape_write32(tp, TG3_APE_GPIO_MSG, status);
2236         else
2237                 tw32(TG3_CPMU_DRV_STATUS, status);
2238
2239         return status >> TG3_APE_GPIO_MSG_SHIFT;
2240 }
2241
2242 static inline int tg3_pwrsrc_switch_to_vmain(struct tg3 *tp)
2243 {
2244         if (!tg3_flag(tp, IS_NIC))
2245                 return 0;
2246
2247         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
2248             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
2249             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) {
2250                 if (tg3_ape_lock(tp, TG3_APE_LOCK_GPIO))
2251                         return -EIO;
2252
2253                 tg3_set_function_status(tp, TG3_GPIO_MSG_DRVR_PRES);
2254
2255                 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl,
2256                             TG3_GRC_LCLCTL_PWRSW_DELAY);
2257
2258                 tg3_ape_unlock(tp, TG3_APE_LOCK_GPIO);
2259         } else {
2260                 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl,
2261                             TG3_GRC_LCLCTL_PWRSW_DELAY);
2262         }
2263
2264         return 0;
2265 }
2266
2267 static void tg3_pwrsrc_die_with_vmain(struct tg3 *tp)
2268 {
2269         u32 grc_local_ctrl;
2270
2271         if (!tg3_flag(tp, IS_NIC) ||
2272             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
2273             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)
2274                 return;
2275
2276         grc_local_ctrl = tp->grc_local_ctrl | GRC_LCLCTRL_GPIO_OE1;
2277
2278         tw32_wait_f(GRC_LOCAL_CTRL,
2279                     grc_local_ctrl | GRC_LCLCTRL_GPIO_OUTPUT1,
2280                     TG3_GRC_LCLCTL_PWRSW_DELAY);
2281
2282         tw32_wait_f(GRC_LOCAL_CTRL,
2283                     grc_local_ctrl,
2284                     TG3_GRC_LCLCTL_PWRSW_DELAY);
2285
2286         tw32_wait_f(GRC_LOCAL_CTRL,
2287                     grc_local_ctrl | GRC_LCLCTRL_GPIO_OUTPUT1,
2288                     TG3_GRC_LCLCTL_PWRSW_DELAY);
2289 }
2290
2291 static void tg3_pwrsrc_switch_to_vaux(struct tg3 *tp)
2292 {
2293         if (!tg3_flag(tp, IS_NIC))
2294                 return;
2295
2296         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
2297             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
2298                 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
2299                             (GRC_LCLCTRL_GPIO_OE0 |
2300                              GRC_LCLCTRL_GPIO_OE1 |
2301                              GRC_LCLCTRL_GPIO_OE2 |
2302                              GRC_LCLCTRL_GPIO_OUTPUT0 |
2303                              GRC_LCLCTRL_GPIO_OUTPUT1),
2304                             TG3_GRC_LCLCTL_PWRSW_DELAY);
2305         } else if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761 ||
2306                    tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761S) {
2307                 /* The 5761 non-e device swaps GPIO 0 and GPIO 2. */
2308                 u32 grc_local_ctrl = GRC_LCLCTRL_GPIO_OE0 |
2309                                      GRC_LCLCTRL_GPIO_OE1 |
2310                                      GRC_LCLCTRL_GPIO_OE2 |
2311                                      GRC_LCLCTRL_GPIO_OUTPUT0 |
2312                                      GRC_LCLCTRL_GPIO_OUTPUT1 |
2313                                      tp->grc_local_ctrl;
2314                 tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl,
2315                             TG3_GRC_LCLCTL_PWRSW_DELAY);
2316
2317                 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OUTPUT2;
2318                 tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl,
2319                             TG3_GRC_LCLCTL_PWRSW_DELAY);
2320
2321                 grc_local_ctrl &= ~GRC_LCLCTRL_GPIO_OUTPUT0;
2322                 tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl,
2323                             TG3_GRC_LCLCTL_PWRSW_DELAY);
2324         } else {
2325                 u32 no_gpio2;
2326                 u32 grc_local_ctrl = 0;
2327
2328                 /* Workaround to prevent overdrawing Amps. */
2329                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) {
2330                         grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE3;
2331                         tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
2332                                     grc_local_ctrl,
2333                                     TG3_GRC_LCLCTL_PWRSW_DELAY);
2334                 }
2335
2336                 /* On 5753 and variants, GPIO2 cannot be used. */
2337                 no_gpio2 = tp->nic_sram_data_cfg &
2338                            NIC_SRAM_DATA_CFG_NO_GPIO2;
2339
2340                 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE0 |
2341                                   GRC_LCLCTRL_GPIO_OE1 |
2342                                   GRC_LCLCTRL_GPIO_OE2 |
2343                                   GRC_LCLCTRL_GPIO_OUTPUT1 |
2344                                   GRC_LCLCTRL_GPIO_OUTPUT2;
2345                 if (no_gpio2) {
2346                         grc_local_ctrl &= ~(GRC_LCLCTRL_GPIO_OE2 |
2347                                             GRC_LCLCTRL_GPIO_OUTPUT2);
2348                 }
2349                 tw32_wait_f(GRC_LOCAL_CTRL,
2350                             tp->grc_local_ctrl | grc_local_ctrl,
2351                             TG3_GRC_LCLCTL_PWRSW_DELAY);
2352
2353                 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OUTPUT0;
2354
2355                 tw32_wait_f(GRC_LOCAL_CTRL,
2356                             tp->grc_local_ctrl | grc_local_ctrl,
2357                             TG3_GRC_LCLCTL_PWRSW_DELAY);
2358
2359                 if (!no_gpio2) {
2360                         grc_local_ctrl &= ~GRC_LCLCTRL_GPIO_OUTPUT2;
2361                         tw32_wait_f(GRC_LOCAL_CTRL,
2362                                     tp->grc_local_ctrl | grc_local_ctrl,
2363                                     TG3_GRC_LCLCTL_PWRSW_DELAY);
2364                 }
2365         }
2366 }
2367
2368 static void tg3_frob_aux_power_5717(struct tg3 *tp, bool wol_enable)
2369 {
2370         u32 msg = 0;
2371
2372         /* Serialize power state transitions */
2373         if (tg3_ape_lock(tp, TG3_APE_LOCK_GPIO))
2374                 return;
2375
2376         if (tg3_flag(tp, ENABLE_ASF) || tg3_flag(tp, ENABLE_APE) || wol_enable)
2377                 msg = TG3_GPIO_MSG_NEED_VAUX;
2378
2379         msg = tg3_set_function_status(tp, msg);
2380
2381         if (msg & TG3_GPIO_MSG_ALL_DRVR_PRES_MASK)
2382                 goto done;
2383
2384         if (msg & TG3_GPIO_MSG_ALL_NEED_VAUX_MASK)
2385                 tg3_pwrsrc_switch_to_vaux(tp);
2386         else
2387                 tg3_pwrsrc_die_with_vmain(tp);
2388
2389 done:
2390         tg3_ape_unlock(tp, TG3_APE_LOCK_GPIO);
2391 }
2392
2393 static void tg3_frob_aux_power(struct tg3 *tp, bool include_wol)
2394 {
2395         bool need_vaux = false;
2396
2397         /* The GPIOs do something completely different on 57765. */
2398         if (!tg3_flag(tp, IS_NIC) ||
2399             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765)
2400                 return;
2401
2402         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
2403             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
2404             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) {
2405                 tg3_frob_aux_power_5717(tp, include_wol ?
2406                                         tg3_flag(tp, WOL_ENABLE) != 0 : 0);
2407                 return;
2408         }
2409
2410         if (tp->pdev_peer && tp->pdev_peer != tp->pdev) {
2411                 struct net_device *dev_peer;
2412
2413                 dev_peer = pci_get_drvdata(tp->pdev_peer);
2414
2415                 /* remove_one() may have been run on the peer. */
2416                 if (dev_peer) {
2417                         struct tg3 *tp_peer = netdev_priv(dev_peer);
2418
2419                         if (tg3_flag(tp_peer, INIT_COMPLETE))
2420                                 return;
2421
2422                         if ((include_wol && tg3_flag(tp_peer, WOL_ENABLE)) ||
2423                             tg3_flag(tp_peer, ENABLE_ASF))
2424                                 need_vaux = true;
2425                 }
2426         }
2427
2428         if ((include_wol && tg3_flag(tp, WOL_ENABLE)) ||
2429             tg3_flag(tp, ENABLE_ASF))
2430                 need_vaux = true;
2431
2432         if (need_vaux)
2433                 tg3_pwrsrc_switch_to_vaux(tp);
2434         else
2435                 tg3_pwrsrc_die_with_vmain(tp);
2436 }
2437
2438 static int tg3_5700_link_polarity(struct tg3 *tp, u32 speed)
2439 {
2440         if (tp->led_ctrl == LED_CTRL_MODE_PHY_2)
2441                 return 1;
2442         else if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5411) {
2443                 if (speed != SPEED_10)
2444                         return 1;
2445         } else if (speed == SPEED_10)
2446                 return 1;
2447
2448         return 0;
2449 }
2450
2451 static int tg3_setup_phy(struct tg3 *, int);
2452
2453 #define RESET_KIND_SHUTDOWN     0
2454 #define RESET_KIND_INIT         1
2455 #define RESET_KIND_SUSPEND      2
2456
2457 static void tg3_write_sig_post_reset(struct tg3 *, int);
2458 static int tg3_halt_cpu(struct tg3 *, u32);
2459
2460 static void tg3_power_down_phy(struct tg3 *tp, bool do_low_power)
2461 {
2462         u32 val;
2463
2464         if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
2465                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
2466                         u32 sg_dig_ctrl = tr32(SG_DIG_CTRL);
2467                         u32 serdes_cfg = tr32(MAC_SERDES_CFG);
2468
2469                         sg_dig_ctrl |=
2470                                 SG_DIG_USING_HW_AUTONEG | SG_DIG_SOFT_RESET;
2471                         tw32(SG_DIG_CTRL, sg_dig_ctrl);
2472                         tw32(MAC_SERDES_CFG, serdes_cfg | (1 << 15));
2473                 }
2474                 return;
2475         }
2476
2477         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
2478                 tg3_bmcr_reset(tp);
2479                 val = tr32(GRC_MISC_CFG);
2480                 tw32_f(GRC_MISC_CFG, val | GRC_MISC_CFG_EPHY_IDDQ);
2481                 udelay(40);
2482                 return;
2483         } else if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
2484                 u32 phytest;
2485                 if (!tg3_readphy(tp, MII_TG3_FET_TEST, &phytest)) {
2486                         u32 phy;
2487
2488                         tg3_writephy(tp, MII_ADVERTISE, 0);
2489                         tg3_writephy(tp, MII_BMCR,
2490                                      BMCR_ANENABLE | BMCR_ANRESTART);
2491
2492                         tg3_writephy(tp, MII_TG3_FET_TEST,
2493                                      phytest | MII_TG3_FET_SHADOW_EN);
2494                         if (!tg3_readphy(tp, MII_TG3_FET_SHDW_AUXMODE4, &phy)) {
2495                                 phy |= MII_TG3_FET_SHDW_AUXMODE4_SBPD;
2496                                 tg3_writephy(tp,
2497                                              MII_TG3_FET_SHDW_AUXMODE4,
2498                                              phy);
2499                         }
2500                         tg3_writephy(tp, MII_TG3_FET_TEST, phytest);
2501                 }
2502                 return;
2503         } else if (do_low_power) {
2504                 tg3_writephy(tp, MII_TG3_EXT_CTRL,
2505                              MII_TG3_EXT_CTRL_FORCE_LED_OFF);
2506
2507                 val = MII_TG3_AUXCTL_PCTL_100TX_LPWR |
2508                       MII_TG3_AUXCTL_PCTL_SPR_ISOLATE |
2509                       MII_TG3_AUXCTL_PCTL_VREG_11V;
2510                 tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_PWRCTL, val);
2511         }
2512
2513         /* The PHY should not be powered down on some chips because
2514          * of bugs.
2515          */
2516         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
2517             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
2518             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780 &&
2519              (tp->phy_flags & TG3_PHYFLG_MII_SERDES)))
2520                 return;
2521
2522         if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5784_AX ||
2523             GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5761_AX) {
2524                 val = tr32(TG3_CPMU_LSPD_1000MB_CLK);
2525                 val &= ~CPMU_LSPD_1000MB_MACCLK_MASK;
2526                 val |= CPMU_LSPD_1000MB_MACCLK_12_5;
2527                 tw32_f(TG3_CPMU_LSPD_1000MB_CLK, val);
2528         }
2529
2530         tg3_writephy(tp, MII_BMCR, BMCR_PDOWN);
2531 }
2532
2533 /* tp->lock is held. */
2534 static int tg3_nvram_lock(struct tg3 *tp)
2535 {
2536         if (tg3_flag(tp, NVRAM)) {
2537                 int i;
2538
2539                 if (tp->nvram_lock_cnt == 0) {
2540                         tw32(NVRAM_SWARB, SWARB_REQ_SET1);
2541                         for (i = 0; i < 8000; i++) {
2542                                 if (tr32(NVRAM_SWARB) & SWARB_GNT1)
2543                                         break;
2544                                 udelay(20);
2545                         }
2546                         if (i == 8000) {
2547                                 tw32(NVRAM_SWARB, SWARB_REQ_CLR1);
2548                                 return -ENODEV;
2549                         }
2550                 }
2551                 tp->nvram_lock_cnt++;
2552         }
2553         return 0;
2554 }
2555
2556 /* tp->lock is held. */
2557 static void tg3_nvram_unlock(struct tg3 *tp)
2558 {
2559         if (tg3_flag(tp, NVRAM)) {
2560                 if (tp->nvram_lock_cnt > 0)
2561                         tp->nvram_lock_cnt--;
2562                 if (tp->nvram_lock_cnt == 0)
2563                         tw32_f(NVRAM_SWARB, SWARB_REQ_CLR1);
2564         }
2565 }
2566
2567 /* tp->lock is held. */
2568 static void tg3_enable_nvram_access(struct tg3 *tp)
2569 {
2570         if (tg3_flag(tp, 5750_PLUS) && !tg3_flag(tp, PROTECTED_NVRAM)) {
2571                 u32 nvaccess = tr32(NVRAM_ACCESS);
2572
2573                 tw32(NVRAM_ACCESS, nvaccess | ACCESS_ENABLE);
2574         }
2575 }
2576
2577 /* tp->lock is held. */
2578 static void tg3_disable_nvram_access(struct tg3 *tp)
2579 {
2580         if (tg3_flag(tp, 5750_PLUS) && !tg3_flag(tp, PROTECTED_NVRAM)) {
2581                 u32 nvaccess = tr32(NVRAM_ACCESS);
2582
2583                 tw32(NVRAM_ACCESS, nvaccess & ~ACCESS_ENABLE);
2584         }
2585 }
2586
2587 static int tg3_nvram_read_using_eeprom(struct tg3 *tp,
2588                                         u32 offset, u32 *val)
2589 {
2590         u32 tmp;
2591         int i;
2592
2593         if (offset > EEPROM_ADDR_ADDR_MASK || (offset % 4) != 0)
2594                 return -EINVAL;
2595
2596         tmp = tr32(GRC_EEPROM_ADDR) & ~(EEPROM_ADDR_ADDR_MASK |
2597                                         EEPROM_ADDR_DEVID_MASK |
2598                                         EEPROM_ADDR_READ);
2599         tw32(GRC_EEPROM_ADDR,
2600              tmp |
2601              (0 << EEPROM_ADDR_DEVID_SHIFT) |
2602              ((offset << EEPROM_ADDR_ADDR_SHIFT) &
2603               EEPROM_ADDR_ADDR_MASK) |
2604              EEPROM_ADDR_READ | EEPROM_ADDR_START);
2605
2606         for (i = 0; i < 1000; i++) {
2607                 tmp = tr32(GRC_EEPROM_ADDR);
2608
2609                 if (tmp & EEPROM_ADDR_COMPLETE)
2610                         break;
2611                 msleep(1);
2612         }
2613         if (!(tmp & EEPROM_ADDR_COMPLETE))
2614                 return -EBUSY;
2615
2616         tmp = tr32(GRC_EEPROM_DATA);
2617
2618         /*
2619          * The data will always be opposite the native endian
2620          * format.  Perform a blind byteswap to compensate.
2621          */
2622         *val = swab32(tmp);
2623
2624         return 0;
2625 }
2626
2627 #define NVRAM_CMD_TIMEOUT 10000
2628
2629 static int tg3_nvram_exec_cmd(struct tg3 *tp, u32 nvram_cmd)
2630 {
2631         int i;
2632
2633         tw32(NVRAM_CMD, nvram_cmd);
2634         for (i = 0; i < NVRAM_CMD_TIMEOUT; i++) {
2635                 udelay(10);
2636                 if (tr32(NVRAM_CMD) & NVRAM_CMD_DONE) {
2637                         udelay(10);
2638                         break;
2639                 }
2640         }
2641
2642         if (i == NVRAM_CMD_TIMEOUT)
2643                 return -EBUSY;
2644
2645         return 0;
2646 }
2647
2648 static u32 tg3_nvram_phys_addr(struct tg3 *tp, u32 addr)
2649 {
2650         if (tg3_flag(tp, NVRAM) &&
2651             tg3_flag(tp, NVRAM_BUFFERED) &&
2652             tg3_flag(tp, FLASH) &&
2653             !tg3_flag(tp, NO_NVRAM_ADDR_TRANS) &&
2654             (tp->nvram_jedecnum == JEDEC_ATMEL))
2655
2656                 addr = ((addr / tp->nvram_pagesize) <<
2657                         ATMEL_AT45DB0X1B_PAGE_POS) +
2658                        (addr % tp->nvram_pagesize);
2659
2660         return addr;
2661 }
2662
2663 static u32 tg3_nvram_logical_addr(struct tg3 *tp, u32 addr)
2664 {
2665         if (tg3_flag(tp, NVRAM) &&
2666             tg3_flag(tp, NVRAM_BUFFERED) &&
2667             tg3_flag(tp, FLASH) &&
2668             !tg3_flag(tp, NO_NVRAM_ADDR_TRANS) &&
2669             (tp->nvram_jedecnum == JEDEC_ATMEL))
2670
2671                 addr = ((addr >> ATMEL_AT45DB0X1B_PAGE_POS) *
2672                         tp->nvram_pagesize) +
2673                        (addr & ((1 << ATMEL_AT45DB0X1B_PAGE_POS) - 1));
2674
2675         return addr;
2676 }
2677
2678 /* NOTE: Data read in from NVRAM is byteswapped according to
2679  * the byteswapping settings for all other register accesses.
2680  * tg3 devices are BE devices, so on a BE machine, the data
2681  * returned will be exactly as it is seen in NVRAM.  On a LE
2682  * machine, the 32-bit value will be byteswapped.
2683  */
2684 static int tg3_nvram_read(struct tg3 *tp, u32 offset, u32 *val)
2685 {
2686         int ret;
2687
2688         if (!tg3_flag(tp, NVRAM))
2689                 return tg3_nvram_read_using_eeprom(tp, offset, val);
2690
2691         offset = tg3_nvram_phys_addr(tp, offset);
2692
2693         if (offset > NVRAM_ADDR_MSK)
2694                 return -EINVAL;
2695
2696         ret = tg3_nvram_lock(tp);
2697         if (ret)
2698                 return ret;
2699
2700         tg3_enable_nvram_access(tp);
2701
2702         tw32(NVRAM_ADDR, offset);
2703         ret = tg3_nvram_exec_cmd(tp, NVRAM_CMD_RD | NVRAM_CMD_GO |
2704                 NVRAM_CMD_FIRST | NVRAM_CMD_LAST | NVRAM_CMD_DONE);
2705
2706         if (ret == 0)
2707                 *val = tr32(NVRAM_RDDATA);
2708
2709         tg3_disable_nvram_access(tp);
2710
2711         tg3_nvram_unlock(tp);
2712
2713         return ret;
2714 }
2715
2716 /* Ensures NVRAM data is in bytestream format. */
2717 static int tg3_nvram_read_be32(struct tg3 *tp, u32 offset, __be32 *val)
2718 {
2719         u32 v;
2720         int res = tg3_nvram_read(tp, offset, &v);
2721         if (!res)
2722                 *val = cpu_to_be32(v);
2723         return res;
2724 }
2725
2726 /* tp->lock is held. */
2727 static void __tg3_set_mac_addr(struct tg3 *tp, int skip_mac_1)
2728 {
2729         u32 addr_high, addr_low;
2730         int i;
2731
2732         addr_high = ((tp->dev->dev_addr[0] << 8) |
2733                      tp->dev->dev_addr[1]);
2734         addr_low = ((tp->dev->dev_addr[2] << 24) |
2735                     (tp->dev->dev_addr[3] << 16) |
2736                     (tp->dev->dev_addr[4] <<  8) |
2737                     (tp->dev->dev_addr[5] <<  0));
2738         for (i = 0; i < 4; i++) {
2739                 if (i == 1 && skip_mac_1)
2740                         continue;
2741                 tw32(MAC_ADDR_0_HIGH + (i * 8), addr_high);
2742                 tw32(MAC_ADDR_0_LOW + (i * 8), addr_low);
2743         }
2744
2745         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
2746             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
2747                 for (i = 0; i < 12; i++) {
2748                         tw32(MAC_EXTADDR_0_HIGH + (i * 8), addr_high);
2749                         tw32(MAC_EXTADDR_0_LOW + (i * 8), addr_low);
2750                 }
2751         }
2752
2753         addr_high = (tp->dev->dev_addr[0] +
2754                      tp->dev->dev_addr[1] +
2755                      tp->dev->dev_addr[2] +
2756                      tp->dev->dev_addr[3] +
2757                      tp->dev->dev_addr[4] +
2758                      tp->dev->dev_addr[5]) &
2759                 TX_BACKOFF_SEED_MASK;
2760         tw32(MAC_TX_BACKOFF_SEED, addr_high);
2761 }
2762
2763 static void tg3_enable_register_access(struct tg3 *tp)
2764 {
2765         /*
2766          * Make sure register accesses (indirect or otherwise) will function
2767          * correctly.
2768          */
2769         pci_write_config_dword(tp->pdev,
2770                                TG3PCI_MISC_HOST_CTRL, tp->misc_host_ctrl);
2771 }
2772
2773 static int tg3_power_up(struct tg3 *tp)
2774 {
2775         int err;
2776
2777         tg3_enable_register_access(tp);
2778
2779         err = pci_set_power_state(tp->pdev, PCI_D0);
2780         if (!err) {
2781                 /* Switch out of Vaux if it is a NIC */
2782                 tg3_pwrsrc_switch_to_vmain(tp);
2783         } else {
2784                 netdev_err(tp->dev, "Transition to D0 failed\n");
2785         }
2786
2787         return err;
2788 }
2789
2790 static int tg3_power_down_prepare(struct tg3 *tp)
2791 {
2792         u32 misc_host_ctrl;
2793         bool device_should_wake, do_low_power;
2794
2795         tg3_enable_register_access(tp);
2796
2797         /* Restore the CLKREQ setting. */
2798         if (tg3_flag(tp, CLKREQ_BUG)) {
2799                 u16 lnkctl;
2800
2801                 pci_read_config_word(tp->pdev,
2802                                      pci_pcie_cap(tp->pdev) + PCI_EXP_LNKCTL,
2803                                      &lnkctl);
2804                 lnkctl |= PCI_EXP_LNKCTL_CLKREQ_EN;
2805                 pci_write_config_word(tp->pdev,
2806                                       pci_pcie_cap(tp->pdev) + PCI_EXP_LNKCTL,
2807                                       lnkctl);
2808         }
2809
2810         misc_host_ctrl = tr32(TG3PCI_MISC_HOST_CTRL);
2811         tw32(TG3PCI_MISC_HOST_CTRL,
2812              misc_host_ctrl | MISC_HOST_CTRL_MASK_PCI_INT);
2813
2814         device_should_wake = device_may_wakeup(&tp->pdev->dev) &&
2815                              tg3_flag(tp, WOL_ENABLE);
2816
2817         if (tg3_flag(tp, USE_PHYLIB)) {
2818                 do_low_power = false;
2819                 if ((tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) &&
2820                     !(tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)) {
2821                         struct phy_device *phydev;
2822                         u32 phyid, advertising;
2823
2824                         phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
2825
2826                         tp->phy_flags |= TG3_PHYFLG_IS_LOW_POWER;
2827
2828                         tp->link_config.orig_speed = phydev->speed;
2829                         tp->link_config.orig_duplex = phydev->duplex;
2830                         tp->link_config.orig_autoneg = phydev->autoneg;
2831                         tp->link_config.orig_advertising = phydev->advertising;
2832
2833                         advertising = ADVERTISED_TP |
2834                                       ADVERTISED_Pause |
2835                                       ADVERTISED_Autoneg |
2836                                       ADVERTISED_10baseT_Half;
2837
2838                         if (tg3_flag(tp, ENABLE_ASF) || device_should_wake) {
2839                                 if (tg3_flag(tp, WOL_SPEED_100MB))
2840                                         advertising |=
2841                                                 ADVERTISED_100baseT_Half |
2842                                                 ADVERTISED_100baseT_Full |
2843                                                 ADVERTISED_10baseT_Full;
2844                                 else
2845                                         advertising |= ADVERTISED_10baseT_Full;
2846                         }
2847
2848                         phydev->advertising = advertising;
2849
2850                         phy_start_aneg(phydev);
2851
2852                         phyid = phydev->drv->phy_id & phydev->drv->phy_id_mask;
2853                         if (phyid != PHY_ID_BCMAC131) {
2854                                 phyid &= PHY_BCM_OUI_MASK;
2855                                 if (phyid == PHY_BCM_OUI_1 ||
2856                                     phyid == PHY_BCM_OUI_2 ||
2857                                     phyid == PHY_BCM_OUI_3)
2858                                         do_low_power = true;
2859                         }
2860                 }
2861         } else {
2862                 do_low_power = true;
2863
2864                 if (!(tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)) {
2865                         tp->phy_flags |= TG3_PHYFLG_IS_LOW_POWER;
2866                         tp->link_config.orig_speed = tp->link_config.speed;
2867                         tp->link_config.orig_duplex = tp->link_config.duplex;
2868                         tp->link_config.orig_autoneg = tp->link_config.autoneg;
2869                 }
2870
2871                 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES)) {
2872                         tp->link_config.speed = SPEED_10;
2873                         tp->link_config.duplex = DUPLEX_HALF;
2874                         tp->link_config.autoneg = AUTONEG_ENABLE;
2875                         tg3_setup_phy(tp, 0);
2876                 }
2877         }
2878
2879         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
2880                 u32 val;
2881
2882                 val = tr32(GRC_VCPU_EXT_CTRL);
2883                 tw32(GRC_VCPU_EXT_CTRL, val | GRC_VCPU_EXT_CTRL_DISABLE_WOL);
2884         } else if (!tg3_flag(tp, ENABLE_ASF)) {
2885                 int i;
2886                 u32 val;
2887
2888                 for (i = 0; i < 200; i++) {
2889                         tg3_read_mem(tp, NIC_SRAM_FW_ASF_STATUS_MBOX, &val);
2890                         if (val == ~NIC_SRAM_FIRMWARE_MBOX_MAGIC1)
2891                                 break;
2892                         msleep(1);
2893                 }
2894         }
2895         if (tg3_flag(tp, WOL_CAP))
2896                 tg3_write_mem(tp, NIC_SRAM_WOL_MBOX, WOL_SIGNATURE |
2897                                                      WOL_DRV_STATE_SHUTDOWN |
2898                                                      WOL_DRV_WOL |
2899                                                      WOL_SET_MAGIC_PKT);
2900
2901         if (device_should_wake) {
2902                 u32 mac_mode;
2903
2904                 if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES)) {
2905                         if (do_low_power &&
2906                             !(tp->phy_flags & TG3_PHYFLG_IS_FET)) {
2907                                 tg3_phy_auxctl_write(tp,
2908                                                MII_TG3_AUXCTL_SHDWSEL_PWRCTL,
2909                                                MII_TG3_AUXCTL_PCTL_WOL_EN |
2910                                                MII_TG3_AUXCTL_PCTL_100TX_LPWR |
2911                                                MII_TG3_AUXCTL_PCTL_CL_AB_TXDAC);
2912                                 udelay(40);
2913                         }
2914
2915                         if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
2916                                 mac_mode = MAC_MODE_PORT_MODE_GMII;
2917                         else
2918                                 mac_mode = MAC_MODE_PORT_MODE_MII;
2919
2920                         mac_mode |= tp->mac_mode & MAC_MODE_LINK_POLARITY;
2921                         if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
2922                             ASIC_REV_5700) {
2923                                 u32 speed = tg3_flag(tp, WOL_SPEED_100MB) ?
2924                                              SPEED_100 : SPEED_10;
2925                                 if (tg3_5700_link_polarity(tp, speed))
2926                                         mac_mode |= MAC_MODE_LINK_POLARITY;
2927                                 else
2928                                         mac_mode &= ~MAC_MODE_LINK_POLARITY;
2929                         }
2930                 } else {
2931                         mac_mode = MAC_MODE_PORT_MODE_TBI;
2932                 }
2933
2934                 if (!tg3_flag(tp, 5750_PLUS))
2935                         tw32(MAC_LED_CTRL, tp->led_ctrl);
2936
2937                 mac_mode |= MAC_MODE_MAGIC_PKT_ENABLE;
2938                 if ((tg3_flag(tp, 5705_PLUS) && !tg3_flag(tp, 5780_CLASS)) &&
2939                     (tg3_flag(tp, ENABLE_ASF) || tg3_flag(tp, ENABLE_APE)))
2940                         mac_mode |= MAC_MODE_KEEP_FRAME_IN_WOL;
2941
2942                 if (tg3_flag(tp, ENABLE_APE))
2943                         mac_mode |= MAC_MODE_APE_TX_EN |
2944                                     MAC_MODE_APE_RX_EN |
2945                                     MAC_MODE_TDE_ENABLE;
2946
2947                 tw32_f(MAC_MODE, mac_mode);
2948                 udelay(100);
2949
2950                 tw32_f(MAC_RX_MODE, RX_MODE_ENABLE);
2951                 udelay(10);
2952         }
2953
2954         if (!tg3_flag(tp, WOL_SPEED_100MB) &&
2955             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
2956              GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)) {
2957                 u32 base_val;
2958
2959                 base_val = tp->pci_clock_ctrl;
2960                 base_val |= (CLOCK_CTRL_RXCLK_DISABLE |
2961                              CLOCK_CTRL_TXCLK_DISABLE);
2962
2963                 tw32_wait_f(TG3PCI_CLOCK_CTRL, base_val | CLOCK_CTRL_ALTCLK |
2964                             CLOCK_CTRL_PWRDOWN_PLL133, 40);
2965         } else if (tg3_flag(tp, 5780_CLASS) ||
2966                    tg3_flag(tp, CPMU_PRESENT) ||
2967                    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
2968                 /* do nothing */
2969         } else if (!(tg3_flag(tp, 5750_PLUS) && tg3_flag(tp, ENABLE_ASF))) {
2970                 u32 newbits1, newbits2;
2971
2972                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
2973                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
2974                         newbits1 = (CLOCK_CTRL_RXCLK_DISABLE |
2975                                     CLOCK_CTRL_TXCLK_DISABLE |
2976                                     CLOCK_CTRL_ALTCLK);
2977                         newbits2 = newbits1 | CLOCK_CTRL_44MHZ_CORE;
2978                 } else if (tg3_flag(tp, 5705_PLUS)) {
2979                         newbits1 = CLOCK_CTRL_625_CORE;
2980                         newbits2 = newbits1 | CLOCK_CTRL_ALTCLK;
2981                 } else {
2982                         newbits1 = CLOCK_CTRL_ALTCLK;
2983                         newbits2 = newbits1 | CLOCK_CTRL_44MHZ_CORE;
2984                 }
2985
2986                 tw32_wait_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl | newbits1,
2987                             40);
2988
2989                 tw32_wait_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl | newbits2,
2990                             40);
2991
2992                 if (!tg3_flag(tp, 5705_PLUS)) {
2993                         u32 newbits3;
2994
2995                         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
2996                             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
2997                                 newbits3 = (CLOCK_CTRL_RXCLK_DISABLE |
2998                                             CLOCK_CTRL_TXCLK_DISABLE |
2999                                             CLOCK_CTRL_44MHZ_CORE);
3000                         } else {
3001                                 newbits3 = CLOCK_CTRL_44MHZ_CORE;
3002                         }
3003
3004                         tw32_wait_f(TG3PCI_CLOCK_CTRL,
3005                                     tp->pci_clock_ctrl | newbits3, 40);
3006                 }
3007         }
3008
3009         if (!(device_should_wake) && !tg3_flag(tp, ENABLE_ASF))
3010                 tg3_power_down_phy(tp, do_low_power);
3011
3012         tg3_frob_aux_power(tp, true);
3013
3014         /* Workaround for unstable PLL clock */
3015         if ((GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_AX) ||
3016             (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_BX)) {
3017                 u32 val = tr32(0x7d00);
3018
3019                 val &= ~((1 << 16) | (1 << 4) | (1 << 2) | (1 << 1) | 1);
3020                 tw32(0x7d00, val);
3021                 if (!tg3_flag(tp, ENABLE_ASF)) {
3022                         int err;
3023
3024                         err = tg3_nvram_lock(tp);
3025                         tg3_halt_cpu(tp, RX_CPU_BASE);
3026                         if (!err)
3027                                 tg3_nvram_unlock(tp);
3028                 }
3029         }
3030
3031         tg3_write_sig_post_reset(tp, RESET_KIND_SHUTDOWN);
3032
3033         return 0;
3034 }
3035
3036 static void tg3_power_down(struct tg3 *tp)
3037 {
3038         tg3_power_down_prepare(tp);
3039
3040         pci_wake_from_d3(tp->pdev, tg3_flag(tp, WOL_ENABLE));
3041         pci_set_power_state(tp->pdev, PCI_D3hot);
3042 }
3043
3044 static void tg3_aux_stat_to_speed_duplex(struct tg3 *tp, u32 val, u16 *speed, u8 *duplex)
3045 {
3046         switch (val & MII_TG3_AUX_STAT_SPDMASK) {
3047         case MII_TG3_AUX_STAT_10HALF:
3048                 *speed = SPEED_10;
3049                 *duplex = DUPLEX_HALF;
3050                 break;
3051
3052         case MII_TG3_AUX_STAT_10FULL:
3053                 *speed = SPEED_10;
3054                 *duplex = DUPLEX_FULL;
3055                 break;
3056
3057         case MII_TG3_AUX_STAT_100HALF:
3058                 *speed = SPEED_100;
3059                 *duplex = DUPLEX_HALF;
3060                 break;
3061
3062         case MII_TG3_AUX_STAT_100FULL:
3063                 *speed = SPEED_100;
3064                 *duplex = DUPLEX_FULL;
3065                 break;
3066
3067         case MII_TG3_AUX_STAT_1000HALF:
3068                 *speed = SPEED_1000;
3069                 *duplex = DUPLEX_HALF;
3070                 break;
3071
3072         case MII_TG3_AUX_STAT_1000FULL:
3073                 *speed = SPEED_1000;
3074                 *duplex = DUPLEX_FULL;
3075                 break;
3076
3077         default:
3078                 if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
3079                         *speed = (val & MII_TG3_AUX_STAT_100) ? SPEED_100 :
3080                                  SPEED_10;
3081                         *duplex = (val & MII_TG3_AUX_STAT_FULL) ? DUPLEX_FULL :
3082                                   DUPLEX_HALF;
3083                         break;
3084                 }
3085                 *speed = SPEED_INVALID;
3086                 *duplex = DUPLEX_INVALID;
3087                 break;
3088         }
3089 }
3090
3091 static int tg3_phy_autoneg_cfg(struct tg3 *tp, u32 advertise, u32 flowctrl)
3092 {
3093         int err = 0;
3094         u32 val, new_adv;
3095
3096         new_adv = ADVERTISE_CSMA;
3097         if (advertise & ADVERTISED_10baseT_Half)
3098                 new_adv |= ADVERTISE_10HALF;
3099         if (advertise & ADVERTISED_10baseT_Full)
3100                 new_adv |= ADVERTISE_10FULL;
3101         if (advertise & ADVERTISED_100baseT_Half)
3102                 new_adv |= ADVERTISE_100HALF;
3103         if (advertise & ADVERTISED_100baseT_Full)
3104                 new_adv |= ADVERTISE_100FULL;
3105
3106         new_adv |= tg3_advert_flowctrl_1000T(flowctrl);
3107
3108         err = tg3_writephy(tp, MII_ADVERTISE, new_adv);
3109         if (err)
3110                 goto done;
3111
3112         if (tp->phy_flags & TG3_PHYFLG_10_100_ONLY)
3113                 goto done;
3114
3115         new_adv = 0;
3116         if (advertise & ADVERTISED_1000baseT_Half)
3117                 new_adv |= ADVERTISE_1000HALF;
3118         if (advertise & ADVERTISED_1000baseT_Full)
3119                 new_adv |= ADVERTISE_1000FULL;
3120
3121         if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
3122             tp->pci_chip_rev_id == CHIPREV_ID_5701_B0)
3123                 new_adv |= CTL1000_AS_MASTER | CTL1000_ENABLE_MASTER;
3124
3125         err = tg3_writephy(tp, MII_CTRL1000, new_adv);
3126         if (err)
3127                 goto done;
3128
3129         if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP))
3130                 goto done;
3131
3132         tw32(TG3_CPMU_EEE_MODE,
3133              tr32(TG3_CPMU_EEE_MODE) & ~TG3_CPMU_EEEMD_LPI_ENABLE);
3134
3135         err = TG3_PHY_AUXCTL_SMDSP_ENABLE(tp);
3136         if (!err) {
3137                 u32 err2;
3138
3139                 val = 0;
3140                 /* Advertise 100-BaseTX EEE ability */
3141                 if (advertise & ADVERTISED_100baseT_Full)
3142                         val |= MDIO_AN_EEE_ADV_100TX;
3143                 /* Advertise 1000-BaseT EEE ability */
3144                 if (advertise & ADVERTISED_1000baseT_Full)
3145                         val |= MDIO_AN_EEE_ADV_1000T;
3146                 err = tg3_phy_cl45_write(tp, MDIO_MMD_AN, MDIO_AN_EEE_ADV, val);
3147                 if (err)
3148                         val = 0;
3149
3150                 switch (GET_ASIC_REV(tp->pci_chip_rev_id)) {
3151                 case ASIC_REV_5717:
3152                 case ASIC_REV_57765:
3153                 case ASIC_REV_5719:
3154                         /* If we advertised any eee advertisements above... */
3155                         if (val)
3156                                 val = MII_TG3_DSP_TAP26_ALNOKO |
3157                                       MII_TG3_DSP_TAP26_RMRXSTO |
3158                                       MII_TG3_DSP_TAP26_OPCSINPT;
3159                         tg3_phydsp_write(tp, MII_TG3_DSP_TAP26, val);
3160                         /* Fall through */
3161                 case ASIC_REV_5720:
3162                         if (!tg3_phydsp_read(tp, MII_TG3_DSP_CH34TP2, &val))
3163                                 tg3_phydsp_write(tp, MII_TG3_DSP_CH34TP2, val |
3164                                                  MII_TG3_DSP_CH34TP2_HIBW01);
3165                 }
3166
3167                 err2 = TG3_PHY_AUXCTL_SMDSP_DISABLE(tp);
3168                 if (!err)
3169                         err = err2;
3170         }
3171
3172 done:
3173         return err;
3174 }
3175
3176 static void tg3_phy_copper_begin(struct tg3 *tp)
3177 {
3178         u32 new_adv;
3179         int i;
3180
3181         if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) {
3182                 new_adv = ADVERTISED_10baseT_Half |
3183                           ADVERTISED_10baseT_Full;
3184                 if (tg3_flag(tp, WOL_SPEED_100MB))
3185                         new_adv |= ADVERTISED_100baseT_Half |
3186                                    ADVERTISED_100baseT_Full;
3187
3188                 tg3_phy_autoneg_cfg(tp, new_adv,
3189                                     FLOW_CTRL_TX | FLOW_CTRL_RX);
3190         } else if (tp->link_config.speed == SPEED_INVALID) {
3191                 if (tp->phy_flags & TG3_PHYFLG_10_100_ONLY)
3192                         tp->link_config.advertising &=
3193                                 ~(ADVERTISED_1000baseT_Half |
3194                                   ADVERTISED_1000baseT_Full);
3195
3196                 tg3_phy_autoneg_cfg(tp, tp->link_config.advertising,
3197                                     tp->link_config.flowctrl);
3198         } else {
3199                 /* Asking for a specific link mode. */
3200                 if (tp->link_config.speed == SPEED_1000) {
3201                         if (tp->link_config.duplex == DUPLEX_FULL)
3202                                 new_adv = ADVERTISED_1000baseT_Full;
3203                         else
3204                                 new_adv = ADVERTISED_1000baseT_Half;
3205                 } else if (tp->link_config.speed == SPEED_100) {
3206                         if (tp->link_config.duplex == DUPLEX_FULL)
3207                                 new_adv = ADVERTISED_100baseT_Full;
3208                         else
3209                                 new_adv = ADVERTISED_100baseT_Half;
3210                 } else {
3211                         if (tp->link_config.duplex == DUPLEX_FULL)
3212                                 new_adv = ADVERTISED_10baseT_Full;
3213                         else
3214                                 new_adv = ADVERTISED_10baseT_Half;
3215                 }
3216
3217                 tg3_phy_autoneg_cfg(tp, new_adv,
3218                                     tp->link_config.flowctrl);
3219         }
3220
3221         if (tp->link_config.autoneg == AUTONEG_DISABLE &&
3222             tp->link_config.speed != SPEED_INVALID) {
3223                 u32 bmcr, orig_bmcr;
3224
3225                 tp->link_config.active_speed = tp->link_config.speed;
3226                 tp->link_config.active_duplex = tp->link_config.duplex;
3227
3228                 bmcr = 0;
3229                 switch (tp->link_config.speed) {
3230                 default:
3231                 case SPEED_10:
3232                         break;
3233
3234                 case SPEED_100:
3235                         bmcr |= BMCR_SPEED100;
3236                         break;
3237
3238                 case SPEED_1000:
3239                         bmcr |= BMCR_SPEED1000;
3240                         break;
3241                 }
3242
3243                 if (tp->link_config.duplex == DUPLEX_FULL)
3244                         bmcr |= BMCR_FULLDPLX;
3245
3246                 if (!tg3_readphy(tp, MII_BMCR, &orig_bmcr) &&
3247                     (bmcr != orig_bmcr)) {
3248                         tg3_writephy(tp, MII_BMCR, BMCR_LOOPBACK);
3249                         for (i = 0; i < 1500; i++) {
3250                                 u32 tmp;
3251
3252                                 udelay(10);
3253                                 if (tg3_readphy(tp, MII_BMSR, &tmp) ||
3254                                     tg3_readphy(tp, MII_BMSR, &tmp))
3255                                         continue;
3256                                 if (!(tmp & BMSR_LSTATUS)) {
3257                                         udelay(40);
3258                                         break;
3259                                 }
3260                         }
3261                         tg3_writephy(tp, MII_BMCR, bmcr);
3262                         udelay(40);
3263                 }
3264         } else {
3265                 tg3_writephy(tp, MII_BMCR,
3266                              BMCR_ANENABLE | BMCR_ANRESTART);
3267         }
3268 }
3269
3270 static int tg3_init_5401phy_dsp(struct tg3 *tp)
3271 {
3272         int err;
3273
3274         /* Turn off tap power management. */
3275         /* Set Extended packet length bit */
3276         err = tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_AUXCTL, 0x4c20);
3277
3278         err |= tg3_phydsp_write(tp, 0x0012, 0x1804);
3279         err |= tg3_phydsp_write(tp, 0x0013, 0x1204);
3280         err |= tg3_phydsp_write(tp, 0x8006, 0x0132);
3281         err |= tg3_phydsp_write(tp, 0x8006, 0x0232);
3282         err |= tg3_phydsp_write(tp, 0x201f, 0x0a20);
3283
3284         udelay(40);
3285
3286         return err;
3287 }
3288
3289 static int tg3_copper_is_advertising_all(struct tg3 *tp, u32 mask)
3290 {
3291         u32 adv_reg, all_mask = 0;
3292
3293         if (mask & ADVERTISED_10baseT_Half)
3294                 all_mask |= ADVERTISE_10HALF;
3295         if (mask & ADVERTISED_10baseT_Full)
3296                 all_mask |= ADVERTISE_10FULL;
3297         if (mask & ADVERTISED_100baseT_Half)
3298                 all_mask |= ADVERTISE_100HALF;
3299         if (mask & ADVERTISED_100baseT_Full)
3300                 all_mask |= ADVERTISE_100FULL;
3301
3302         if (tg3_readphy(tp, MII_ADVERTISE, &adv_reg))
3303                 return 0;
3304
3305         if ((adv_reg & all_mask) != all_mask)
3306                 return 0;
3307         if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
3308                 u32 tg3_ctrl;
3309
3310                 all_mask = 0;
3311                 if (mask & ADVERTISED_1000baseT_Half)
3312                         all_mask |= ADVERTISE_1000HALF;
3313                 if (mask & ADVERTISED_1000baseT_Full)
3314                         all_mask |= ADVERTISE_1000FULL;
3315
3316                 if (tg3_readphy(tp, MII_CTRL1000, &tg3_ctrl))
3317                         return 0;
3318
3319                 if ((tg3_ctrl & all_mask) != all_mask)
3320                         return 0;
3321         }
3322         return 1;
3323 }
3324
3325 static int tg3_adv_1000T_flowctrl_ok(struct tg3 *tp, u32 *lcladv, u32 *rmtadv)
3326 {
3327         u32 curadv, reqadv;
3328
3329         if (tg3_readphy(tp, MII_ADVERTISE, lcladv))
3330                 return 1;
3331
3332         curadv = *lcladv & (ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM);
3333         reqadv = tg3_advert_flowctrl_1000T(tp->link_config.flowctrl);
3334
3335         if (tp->link_config.active_duplex == DUPLEX_FULL) {
3336                 if (curadv != reqadv)
3337                         return 0;
3338
3339                 if (tg3_flag(tp, PAUSE_AUTONEG))
3340                         tg3_readphy(tp, MII_LPA, rmtadv);
3341         } else {
3342                 /* Reprogram the advertisement register, even if it
3343                  * does not affect the current link.  If the link
3344                  * gets renegotiated in the future, we can save an
3345                  * additional renegotiation cycle by advertising
3346                  * it correctly in the first place.
3347                  */
3348                 if (curadv != reqadv) {
3349                         *lcladv &= ~(ADVERTISE_PAUSE_CAP |
3350                                      ADVERTISE_PAUSE_ASYM);
3351                         tg3_writephy(tp, MII_ADVERTISE, *lcladv | reqadv);
3352                 }
3353         }
3354
3355         return 1;
3356 }
3357
3358 static int tg3_setup_copper_phy(struct tg3 *tp, int force_reset)
3359 {
3360         int current_link_up;
3361         u32 bmsr, val;
3362         u32 lcl_adv, rmt_adv;
3363         u16 current_speed;
3364         u8 current_duplex;
3365         int i, err;
3366
3367         tw32(MAC_EVENT, 0);
3368
3369         tw32_f(MAC_STATUS,
3370              (MAC_STATUS_SYNC_CHANGED |
3371               MAC_STATUS_CFG_CHANGED |
3372               MAC_STATUS_MI_COMPLETION |
3373               MAC_STATUS_LNKSTATE_CHANGED));
3374         udelay(40);
3375
3376         if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
3377                 tw32_f(MAC_MI_MODE,
3378                      (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
3379                 udelay(80);
3380         }
3381
3382         tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_PWRCTL, 0);
3383
3384         /* Some third-party PHYs need to be reset on link going
3385          * down.
3386          */
3387         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
3388              GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
3389              GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) &&
3390             netif_carrier_ok(tp->dev)) {
3391                 tg3_readphy(tp, MII_BMSR, &bmsr);
3392                 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
3393                     !(bmsr & BMSR_LSTATUS))
3394                         force_reset = 1;
3395         }
3396         if (force_reset)
3397                 tg3_phy_reset(tp);
3398
3399         if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) {
3400                 tg3_readphy(tp, MII_BMSR, &bmsr);
3401                 if (tg3_readphy(tp, MII_BMSR, &bmsr) ||
3402                     !tg3_flag(tp, INIT_COMPLETE))
3403                         bmsr = 0;
3404
3405                 if (!(bmsr & BMSR_LSTATUS)) {
3406                         err = tg3_init_5401phy_dsp(tp);
3407                         if (err)
3408                                 return err;
3409
3410                         tg3_readphy(tp, MII_BMSR, &bmsr);
3411                         for (i = 0; i < 1000; i++) {
3412                                 udelay(10);
3413                                 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
3414                                     (bmsr & BMSR_LSTATUS)) {
3415                                         udelay(40);
3416                                         break;
3417                                 }
3418                         }
3419
3420                         if ((tp->phy_id & TG3_PHY_ID_REV_MASK) ==
3421                             TG3_PHY_REV_BCM5401_B0 &&
3422                             !(bmsr & BMSR_LSTATUS) &&
3423                             tp->link_config.active_speed == SPEED_1000) {
3424                                 err = tg3_phy_reset(tp);
3425                                 if (!err)
3426                                         err = tg3_init_5401phy_dsp(tp);
3427                                 if (err)
3428                                         return err;
3429                         }
3430                 }
3431         } else if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
3432                    tp->pci_chip_rev_id == CHIPREV_ID_5701_B0) {
3433                 /* 5701 {A0,B0} CRC bug workaround */
3434                 tg3_writephy(tp, 0x15, 0x0a75);
3435                 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8c68);
3436                 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8d68);
3437                 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8c68);
3438         }
3439
3440         /* Clear pending interrupts... */
3441         tg3_readphy(tp, MII_TG3_ISTAT, &val);
3442         tg3_readphy(tp, MII_TG3_ISTAT, &val);
3443
3444         if (tp->phy_flags & TG3_PHYFLG_USE_MI_INTERRUPT)
3445                 tg3_writephy(tp, MII_TG3_IMASK, ~MII_TG3_INT_LINKCHG);
3446         else if (!(tp->phy_flags & TG3_PHYFLG_IS_FET))
3447                 tg3_writephy(tp, MII_TG3_IMASK, ~0);
3448
3449         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
3450             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
3451                 if (tp->led_ctrl == LED_CTRL_MODE_PHY_1)
3452                         tg3_writephy(tp, MII_TG3_EXT_CTRL,
3453                                      MII_TG3_EXT_CTRL_LNK3_LED_MODE);
3454                 else
3455                         tg3_writephy(tp, MII_TG3_EXT_CTRL, 0);
3456         }
3457
3458         current_link_up = 0;
3459         current_speed = SPEED_INVALID;
3460         current_duplex = DUPLEX_INVALID;
3461
3462         if (tp->phy_flags & TG3_PHYFLG_CAPACITIVE_COUPLING) {
3463                 err = tg3_phy_auxctl_read(tp,
3464                                           MII_TG3_AUXCTL_SHDWSEL_MISCTEST,
3465                                           &val);
3466                 if (!err && !(val & (1 << 10))) {
3467                         tg3_phy_auxctl_write(tp,
3468                                              MII_TG3_AUXCTL_SHDWSEL_MISCTEST,
3469                                              val | (1 << 10));
3470                         goto relink;
3471                 }
3472         }
3473
3474         bmsr = 0;
3475         for (i = 0; i < 100; i++) {
3476                 tg3_readphy(tp, MII_BMSR, &bmsr);
3477                 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
3478                     (bmsr & BMSR_LSTATUS))
3479                         break;
3480                 udelay(40);
3481         }
3482
3483         if (bmsr & BMSR_LSTATUS) {
3484                 u32 aux_stat, bmcr;
3485
3486                 tg3_readphy(tp, MII_TG3_AUX_STAT, &aux_stat);
3487                 for (i = 0; i < 2000; i++) {
3488                         udelay(10);
3489                         if (!tg3_readphy(tp, MII_TG3_AUX_STAT, &aux_stat) &&
3490                             aux_stat)
3491                                 break;
3492                 }
3493
3494                 tg3_aux_stat_to_speed_duplex(tp, aux_stat,
3495                                              &current_speed,
3496                                              &current_duplex);
3497
3498                 bmcr = 0;
3499                 for (i = 0; i < 200; i++) {
3500                         tg3_readphy(tp, MII_BMCR, &bmcr);
3501                         if (tg3_readphy(tp, MII_BMCR, &bmcr))
3502                                 continue;
3503                         if (bmcr && bmcr != 0x7fff)
3504                                 break;
3505                         udelay(10);
3506                 }
3507
3508                 lcl_adv = 0;
3509                 rmt_adv = 0;
3510
3511                 tp->link_config.active_speed = current_speed;
3512                 tp->link_config.active_duplex = current_duplex;
3513
3514                 if (tp->link_config.autoneg == AUTONEG_ENABLE) {
3515                         if ((bmcr & BMCR_ANENABLE) &&
3516                             tg3_copper_is_advertising_all(tp,
3517                                                 tp->link_config.advertising)) {
3518                                 if (tg3_adv_1000T_flowctrl_ok(tp, &lcl_adv,
3519                                                                   &rmt_adv))
3520                                         current_link_up = 1;
3521                         }
3522                 } else {
3523                         if (!(bmcr & BMCR_ANENABLE) &&
3524                             tp->link_config.speed == current_speed &&
3525                             tp->link_config.duplex == current_duplex &&
3526                             tp->link_config.flowctrl ==
3527                             tp->link_config.active_flowctrl) {
3528                                 current_link_up = 1;
3529                         }
3530                 }
3531
3532                 if (current_link_up == 1 &&
3533                     tp->link_config.active_duplex == DUPLEX_FULL)
3534                         tg3_setup_flow_control(tp, lcl_adv, rmt_adv);
3535         }
3536
3537 relink:
3538         if (current_link_up == 0 || (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)) {
3539                 tg3_phy_copper_begin(tp);
3540
3541                 tg3_readphy(tp, MII_BMSR, &bmsr);
3542                 if ((!tg3_readphy(tp, MII_BMSR, &bmsr) && (bmsr & BMSR_LSTATUS)) ||
3543                     (tp->mac_mode & MAC_MODE_PORT_INT_LPBACK))
3544                         current_link_up = 1;
3545         }
3546
3547         tp->mac_mode &= ~MAC_MODE_PORT_MODE_MASK;
3548         if (current_link_up == 1) {
3549                 if (tp->link_config.active_speed == SPEED_100 ||
3550                     tp->link_config.active_speed == SPEED_10)
3551                         tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
3552                 else
3553                         tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
3554         } else if (tp->phy_flags & TG3_PHYFLG_IS_FET)
3555                 tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
3556         else
3557                 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
3558
3559         tp->mac_mode &= ~MAC_MODE_HALF_DUPLEX;
3560         if (tp->link_config.active_duplex == DUPLEX_HALF)
3561                 tp->mac_mode |= MAC_MODE_HALF_DUPLEX;
3562
3563         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700) {
3564                 if (current_link_up == 1 &&
3565                     tg3_5700_link_polarity(tp, tp->link_config.active_speed))
3566                         tp->mac_mode |= MAC_MODE_LINK_POLARITY;
3567                 else
3568                         tp->mac_mode &= ~MAC_MODE_LINK_POLARITY;
3569         }
3570
3571         /* ??? Without this setting Netgear GA302T PHY does not
3572          * ??? send/receive packets...
3573          */
3574         if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5411 &&
3575             tp->pci_chip_rev_id == CHIPREV_ID_5700_ALTIMA) {
3576                 tp->mi_mode |= MAC_MI_MODE_AUTO_POLL;
3577                 tw32_f(MAC_MI_MODE, tp->mi_mode);
3578                 udelay(80);
3579         }
3580
3581         tw32_f(MAC_MODE, tp->mac_mode);
3582         udelay(40);
3583
3584         tg3_phy_eee_adjust(tp, current_link_up);
3585
3586         if (tg3_flag(tp, USE_LINKCHG_REG)) {
3587                 /* Polled via timer. */
3588                 tw32_f(MAC_EVENT, 0);
3589         } else {
3590                 tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
3591         }
3592         udelay(40);
3593
3594         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 &&
3595             current_link_up == 1 &&
3596             tp->link_config.active_speed == SPEED_1000 &&
3597             (tg3_flag(tp, PCIX_MODE) || tg3_flag(tp, PCI_HIGH_SPEED))) {
3598                 udelay(120);
3599                 tw32_f(MAC_STATUS,
3600                      (MAC_STATUS_SYNC_CHANGED |
3601                       MAC_STATUS_CFG_CHANGED));
3602                 udelay(40);
3603                 tg3_write_mem(tp,
3604                               NIC_SRAM_FIRMWARE_MBOX,
3605                               NIC_SRAM_FIRMWARE_MBOX_MAGIC2);
3606         }
3607
3608         /* Prevent send BD corruption. */
3609         if (tg3_flag(tp, CLKREQ_BUG)) {
3610                 u16 oldlnkctl, newlnkctl;
3611
3612                 pci_read_config_word(tp->pdev,
3613                                      pci_pcie_cap(tp->pdev) + PCI_EXP_LNKCTL,
3614                                      &oldlnkctl);
3615                 if (tp->link_config.active_speed == SPEED_100 ||
3616                     tp->link_config.active_speed == SPEED_10)
3617                         newlnkctl = oldlnkctl & ~PCI_EXP_LNKCTL_CLKREQ_EN;
3618                 else
3619                         newlnkctl = oldlnkctl | PCI_EXP_LNKCTL_CLKREQ_EN;
3620                 if (newlnkctl != oldlnkctl)
3621                         pci_write_config_word(tp->pdev,
3622                                               pci_pcie_cap(tp->pdev) + PCI_EXP_LNKCTL,
3623                                               newlnkctl);
3624         }
3625
3626         if (current_link_up != netif_carrier_ok(tp->dev)) {
3627                 if (current_link_up)
3628                         netif_carrier_on(tp->dev);
3629                 else
3630                         netif_carrier_off(tp->dev);
3631                 tg3_link_report(tp);
3632         }
3633
3634         return 0;
3635 }
3636
3637 struct tg3_fiber_aneginfo {
3638         int state;
3639 #define ANEG_STATE_UNKNOWN              0
3640 #define ANEG_STATE_AN_ENABLE            1
3641 #define ANEG_STATE_RESTART_INIT         2
3642 #define ANEG_STATE_RESTART              3
3643 #define ANEG_STATE_DISABLE_LINK_OK      4
3644 #define ANEG_STATE_ABILITY_DETECT_INIT  5
3645 #define ANEG_STATE_ABILITY_DETECT       6
3646 #define ANEG_STATE_ACK_DETECT_INIT      7
3647 #define ANEG_STATE_ACK_DETECT           8
3648 #define ANEG_STATE_COMPLETE_ACK_INIT    9
3649 #define ANEG_STATE_COMPLETE_ACK         10
3650 #define ANEG_STATE_IDLE_DETECT_INIT     11
3651 #define ANEG_STATE_IDLE_DETECT          12
3652 #define ANEG_STATE_LINK_OK              13
3653 #define ANEG_STATE_NEXT_PAGE_WAIT_INIT  14
3654 #define ANEG_STATE_NEXT_PAGE_WAIT       15
3655
3656         u32 flags;
3657 #define MR_AN_ENABLE            0x00000001
3658 #define MR_RESTART_AN           0x00000002
3659 #define MR_AN_COMPLETE          0x00000004
3660 #define MR_PAGE_RX              0x00000008
3661 #define MR_NP_LOADED            0x00000010
3662 #define MR_TOGGLE_TX            0x00000020
3663 #define MR_LP_ADV_FULL_DUPLEX   0x00000040
3664 #define MR_LP_ADV_HALF_DUPLEX   0x00000080
3665 #define MR_LP_ADV_SYM_PAUSE     0x00000100
3666 #define MR_LP_ADV_ASYM_PAUSE    0x00000200
3667 #define MR_LP_ADV_REMOTE_FAULT1 0x00000400
3668 #define MR_LP_ADV_REMOTE_FAULT2 0x00000800
3669 #define MR_LP_ADV_NEXT_PAGE     0x00001000
3670 #define MR_TOGGLE_RX            0x00002000
3671 #define MR_NP_RX                0x00004000
3672
3673 #define MR_LINK_OK              0x80000000
3674
3675         unsigned long link_time, cur_time;
3676
3677         u32 ability_match_cfg;
3678         int ability_match_count;
3679
3680         char ability_match, idle_match, ack_match;
3681
3682         u32 txconfig, rxconfig;
3683 #define ANEG_CFG_NP             0x00000080
3684 #define ANEG_CFG_ACK            0x00000040
3685 #define ANEG_CFG_RF2            0x00000020
3686 #define ANEG_CFG_RF1            0x00000010
3687 #define ANEG_CFG_PS2            0x00000001
3688 #define ANEG_CFG_PS1            0x00008000
3689 #define ANEG_CFG_HD             0x00004000
3690 #define ANEG_CFG_FD             0x00002000
3691 #define ANEG_CFG_INVAL          0x00001f06
3692
3693 };
3694 #define ANEG_OK         0
3695 #define ANEG_DONE       1
3696 #define ANEG_TIMER_ENAB 2
3697 #define ANEG_FAILED     -1
3698
3699 #define ANEG_STATE_SETTLE_TIME  10000
3700
3701 static int tg3_fiber_aneg_smachine(struct tg3 *tp,
3702                                    struct tg3_fiber_aneginfo *ap)
3703 {
3704         u16 flowctrl;
3705         unsigned long delta;
3706         u32 rx_cfg_reg;
3707         int ret;
3708
3709         if (ap->state == ANEG_STATE_UNKNOWN) {
3710                 ap->rxconfig = 0;
3711                 ap->link_time = 0;
3712                 ap->cur_time = 0;
3713                 ap->ability_match_cfg = 0;
3714                 ap->ability_match_count = 0;
3715                 ap->ability_match = 0;
3716                 ap->idle_match = 0;
3717                 ap->ack_match = 0;
3718         }
3719         ap->cur_time++;
3720
3721         if (tr32(MAC_STATUS) & MAC_STATUS_RCVD_CFG) {
3722                 rx_cfg_reg = tr32(MAC_RX_AUTO_NEG);
3723
3724                 if (rx_cfg_reg != ap->ability_match_cfg) {
3725                         ap->ability_match_cfg = rx_cfg_reg;
3726                         ap->ability_match = 0;
3727                         ap->ability_match_count = 0;
3728                 } else {
3729                         if (++ap->ability_match_count > 1) {
3730                                 ap->ability_match = 1;
3731                                 ap->ability_match_cfg = rx_cfg_reg;
3732                         }
3733                 }
3734                 if (rx_cfg_reg & ANEG_CFG_ACK)
3735                         ap->ack_match = 1;
3736                 else
3737                         ap->ack_match = 0;
3738
3739                 ap->idle_match = 0;
3740         } else {
3741                 ap->idle_match = 1;
3742                 ap->ability_match_cfg = 0;
3743                 ap->ability_match_count = 0;
3744                 ap->ability_match = 0;
3745                 ap->ack_match = 0;
3746
3747                 rx_cfg_reg = 0;
3748         }
3749
3750         ap->rxconfig = rx_cfg_reg;
3751         ret = ANEG_OK;
3752
3753         switch (ap->state) {
3754         case ANEG_STATE_UNKNOWN:
3755                 if (ap->flags & (MR_AN_ENABLE | MR_RESTART_AN))
3756                         ap->state = ANEG_STATE_AN_ENABLE;
3757
3758                 /* fallthru */
3759         case ANEG_STATE_AN_ENABLE:
3760                 ap->flags &= ~(MR_AN_COMPLETE | MR_PAGE_RX);
3761                 if (ap->flags & MR_AN_ENABLE) {
3762                         ap->link_time = 0;
3763                         ap->cur_time = 0;
3764                         ap->ability_match_cfg = 0;
3765                         ap->ability_match_count = 0;
3766                         ap->ability_match = 0;
3767                         ap->idle_match = 0;
3768                         ap->ack_match = 0;
3769
3770                         ap->state = ANEG_STATE_RESTART_INIT;
3771                 } else {
3772                         ap->state = ANEG_STATE_DISABLE_LINK_OK;
3773                 }
3774                 break;
3775
3776         case ANEG_STATE_RESTART_INIT:
3777                 ap->link_time = ap->cur_time;
3778                 ap->flags &= ~(MR_NP_LOADED);
3779                 ap->txconfig = 0;
3780                 tw32(MAC_TX_AUTO_NEG, 0);
3781                 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
3782                 tw32_f(MAC_MODE, tp->mac_mode);
3783                 udelay(40);
3784
3785                 ret = ANEG_TIMER_ENAB;
3786                 ap->state = ANEG_STATE_RESTART;
3787
3788                 /* fallthru */
3789         case ANEG_STATE_RESTART:
3790                 delta = ap->cur_time - ap->link_time;
3791                 if (delta > ANEG_STATE_SETTLE_TIME)
3792                         ap->state = ANEG_STATE_ABILITY_DETECT_INIT;
3793                 else
3794                         ret = ANEG_TIMER_ENAB;
3795                 break;
3796
3797         case ANEG_STATE_DISABLE_LINK_OK:
3798                 ret = ANEG_DONE;
3799                 break;
3800
3801         case ANEG_STATE_ABILITY_DETECT_INIT:
3802                 ap->flags &= ~(MR_TOGGLE_TX);
3803                 ap->txconfig = ANEG_CFG_FD;
3804                 flowctrl = tg3_advert_flowctrl_1000X(tp->link_config.flowctrl);
3805                 if (flowctrl & ADVERTISE_1000XPAUSE)
3806                         ap->txconfig |= ANEG_CFG_PS1;
3807                 if (flowctrl & ADVERTISE_1000XPSE_ASYM)
3808                         ap->txconfig |= ANEG_CFG_PS2;
3809                 tw32(MAC_TX_AUTO_NEG, ap->txconfig);
3810                 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
3811                 tw32_f(MAC_MODE, tp->mac_mode);
3812                 udelay(40);
3813
3814                 ap->state = ANEG_STATE_ABILITY_DETECT;
3815                 break;
3816
3817         case ANEG_STATE_ABILITY_DETECT:
3818                 if (ap->ability_match != 0 && ap->rxconfig != 0)
3819                         ap->state = ANEG_STATE_ACK_DETECT_INIT;
3820                 break;
3821
3822         case ANEG_STATE_ACK_DETECT_INIT:
3823                 ap->txconfig |= ANEG_CFG_ACK;
3824                 tw32(MAC_TX_AUTO_NEG, ap->txconfig);
3825                 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
3826                 tw32_f(MAC_MODE, tp->mac_mode);
3827                 udelay(40);
3828
3829                 ap->state = ANEG_STATE_ACK_DETECT;
3830
3831                 /* fallthru */
3832         case ANEG_STATE_ACK_DETECT:
3833                 if (ap->ack_match != 0) {
3834                         if ((ap->rxconfig & ~ANEG_CFG_ACK) ==
3835                             (ap->ability_match_cfg & ~ANEG_CFG_ACK)) {
3836                                 ap->state = ANEG_STATE_COMPLETE_ACK_INIT;
3837                         } else {
3838                                 ap->state = ANEG_STATE_AN_ENABLE;
3839                         }
3840                 } else if (ap->ability_match != 0 &&
3841                            ap->rxconfig == 0) {
3842                         ap->state = ANEG_STATE_AN_ENABLE;
3843                 }
3844                 break;
3845
3846         case ANEG_STATE_COMPLETE_ACK_INIT:
3847                 if (ap->rxconfig & ANEG_CFG_INVAL) {
3848                         ret = ANEG_FAILED;
3849                         break;
3850                 }
3851                 ap->flags &= ~(MR_LP_ADV_FULL_DUPLEX |
3852                                MR_LP_ADV_HALF_DUPLEX |
3853                                MR_LP_ADV_SYM_PAUSE |
3854                                MR_LP_ADV_ASYM_PAUSE |
3855                                MR_LP_ADV_REMOTE_FAULT1 |
3856                                MR_LP_ADV_REMOTE_FAULT2 |
3857                                MR_LP_ADV_NEXT_PAGE |
3858                                MR_TOGGLE_RX |
3859                                MR_NP_RX);
3860                 if (ap->rxconfig & ANEG_CFG_FD)
3861                         ap->flags |= MR_LP_ADV_FULL_DUPLEX;
3862                 if (ap->rxconfig & ANEG_CFG_HD)
3863                         ap->flags |= MR_LP_ADV_HALF_DUPLEX;
3864                 if (ap->rxconfig & ANEG_CFG_PS1)
3865                         ap->flags |= MR_LP_ADV_SYM_PAUSE;
3866                 if (ap->rxconfig & ANEG_CFG_PS2)
3867                         ap->flags |= MR_LP_ADV_ASYM_PAUSE;
3868                 if (ap->rxconfig & ANEG_CFG_RF1)
3869                         ap->flags |= MR_LP_ADV_REMOTE_FAULT1;
3870                 if (ap->rxconfig & ANEG_CFG_RF2)
3871                         ap->flags |= MR_LP_ADV_REMOTE_FAULT2;
3872                 if (ap->rxconfig & ANEG_CFG_NP)
3873                         ap->flags |= MR_LP_ADV_NEXT_PAGE;
3874
3875                 ap->link_time = ap->cur_time;
3876
3877                 ap->flags ^= (MR_TOGGLE_TX);
3878                 if (ap->rxconfig & 0x0008)
3879                         ap->flags |= MR_TOGGLE_RX;
3880                 if (ap->rxconfig & ANEG_CFG_NP)
3881                         ap->flags |= MR_NP_RX;
3882                 ap->flags |= MR_PAGE_RX;
3883
3884                 ap->state = ANEG_STATE_COMPLETE_ACK;
3885                 ret = ANEG_TIMER_ENAB;
3886                 break;
3887
3888         case ANEG_STATE_COMPLETE_ACK:
3889                 if (ap->ability_match != 0 &&
3890                     ap->rxconfig == 0) {
3891                         ap->state = ANEG_STATE_AN_ENABLE;
3892                         break;
3893                 }
3894                 delta = ap->cur_time - ap->link_time;
3895                 if (delta > ANEG_STATE_SETTLE_TIME) {
3896                         if (!(ap->flags & (MR_LP_ADV_NEXT_PAGE))) {
3897                                 ap->state = ANEG_STATE_IDLE_DETECT_INIT;
3898                         } else {
3899                                 if ((ap->txconfig & ANEG_CFG_NP) == 0 &&
3900                                     !(ap->flags & MR_NP_RX)) {
3901                                         ap->state = ANEG_STATE_IDLE_DETECT_INIT;
3902                                 } else {
3903                                         ret = ANEG_FAILED;
3904                                 }
3905                         }
3906                 }
3907                 break;
3908
3909         case ANEG_STATE_IDLE_DETECT_INIT:
3910                 ap->link_time = ap->cur_time;
3911                 tp->mac_mode &= ~MAC_MODE_SEND_CONFIGS;
3912                 tw32_f(MAC_MODE, tp->mac_mode);
3913                 udelay(40);
3914
3915                 ap->state = ANEG_STATE_IDLE_DETECT;
3916                 ret = ANEG_TIMER_ENAB;
3917                 break;
3918
3919         case ANEG_STATE_IDLE_DETECT:
3920                 if (ap->ability_match != 0 &&
3921                     ap->rxconfig == 0) {
3922                         ap->state = ANEG_STATE_AN_ENABLE;
3923                         break;
3924                 }
3925                 delta = ap->cur_time - ap->link_time;
3926                 if (delta > ANEG_STATE_SETTLE_TIME) {
3927                         /* XXX another gem from the Broadcom driver :( */
3928                         ap->state = ANEG_STATE_LINK_OK;
3929                 }
3930                 break;
3931
3932         case ANEG_STATE_LINK_OK:
3933                 ap->flags |= (MR_AN_COMPLETE | MR_LINK_OK);
3934                 ret = ANEG_DONE;
3935                 break;
3936
3937         case ANEG_STATE_NEXT_PAGE_WAIT_INIT:
3938                 /* ??? unimplemented */
3939                 break;
3940
3941         case ANEG_STATE_NEXT_PAGE_WAIT:
3942                 /* ??? unimplemented */
3943                 break;
3944
3945         default:
3946                 ret = ANEG_FAILED;
3947                 break;
3948         }
3949
3950         return ret;
3951 }
3952
3953 static int fiber_autoneg(struct tg3 *tp, u32 *txflags, u32 *rxflags)
3954 {
3955         int res = 0;
3956         struct tg3_fiber_aneginfo aninfo;
3957         int status = ANEG_FAILED;
3958         unsigned int tick;
3959         u32 tmp;
3960
3961         tw32_f(MAC_TX_AUTO_NEG, 0);
3962
3963         tmp = tp->mac_mode & ~MAC_MODE_PORT_MODE_MASK;
3964         tw32_f(MAC_MODE, tmp | MAC_MODE_PORT_MODE_GMII);
3965         udelay(40);
3966
3967         tw32_f(MAC_MODE, tp->mac_mode | MAC_MODE_SEND_CONFIGS);
3968         udelay(40);
3969
3970         memset(&aninfo, 0, sizeof(aninfo));
3971         aninfo.flags |= MR_AN_ENABLE;
3972         aninfo.state = ANEG_STATE_UNKNOWN;
3973         aninfo.cur_time = 0;
3974         tick = 0;
3975         while (++tick < 195000) {
3976                 status = tg3_fiber_aneg_smachine(tp, &aninfo);
3977                 if (status == ANEG_DONE || status == ANEG_FAILED)
3978                         break;
3979
3980                 udelay(1);
3981         }
3982
3983         tp->mac_mode &= ~MAC_MODE_SEND_CONFIGS;
3984         tw32_f(MAC_MODE, tp->mac_mode);
3985         udelay(40);
3986
3987         *txflags = aninfo.txconfig;
3988         *rxflags = aninfo.flags;
3989
3990         if (status == ANEG_DONE &&
3991             (aninfo.flags & (MR_AN_COMPLETE | MR_LINK_OK |
3992                              MR_LP_ADV_FULL_DUPLEX)))
3993                 res = 1;
3994
3995         return res;
3996 }
3997
3998 static void tg3_init_bcm8002(struct tg3 *tp)
3999 {
4000         u32 mac_status = tr32(MAC_STATUS);
4001         int i;
4002
4003         /* Reset when initting first time or we have a link. */
4004         if (tg3_flag(tp, INIT_COMPLETE) &&
4005             !(mac_status & MAC_STATUS_PCS_SYNCED))
4006                 return;
4007
4008         /* Set PLL lock range. */
4009         tg3_writephy(tp, 0x16, 0x8007);
4010
4011         /* SW reset */
4012         tg3_writephy(tp, MII_BMCR, BMCR_RESET);
4013
4014         /* Wait for reset to complete. */
4015         /* XXX schedule_timeout() ... */
4016         for (i = 0; i < 500; i++)
4017                 udelay(10);
4018
4019         /* Config mode; select PMA/Ch 1 regs. */
4020         tg3_writephy(tp, 0x10, 0x8411);
4021
4022         /* Enable auto-lock and comdet, select txclk for tx. */
4023         tg3_writephy(tp, 0x11, 0x0a10);
4024
4025         tg3_writephy(tp, 0x18, 0x00a0);
4026         tg3_writephy(tp, 0x16, 0x41ff);
4027
4028         /* Assert and deassert POR. */
4029         tg3_writephy(tp, 0x13, 0x0400);
4030         udelay(40);
4031         tg3_writephy(tp, 0x13, 0x0000);
4032
4033         tg3_writephy(tp, 0x11, 0x0a50);
4034         udelay(40);
4035         tg3_writephy(tp, 0x11, 0x0a10);
4036
4037         /* Wait for signal to stabilize */
4038         /* XXX schedule_timeout() ... */
4039         for (i = 0; i < 15000; i++)
4040                 udelay(10);
4041
4042         /* Deselect the channel register so we can read the PHYID
4043          * later.
4044          */
4045         tg3_writephy(tp, 0x10, 0x8011);
4046 }
4047
4048 static int tg3_setup_fiber_hw_autoneg(struct tg3 *tp, u32 mac_status)
4049 {
4050         u16 flowctrl;
4051         u32 sg_dig_ctrl, sg_dig_status;
4052         u32 serdes_cfg, expected_sg_dig_ctrl;
4053         int workaround, port_a;
4054         int current_link_up;
4055
4056         serdes_cfg = 0;
4057         expected_sg_dig_ctrl = 0;
4058         workaround = 0;
4059         port_a = 1;
4060         current_link_up = 0;
4061
4062         if (tp->pci_chip_rev_id != CHIPREV_ID_5704_A0 &&
4063             tp->pci_chip_rev_id != CHIPREV_ID_5704_A1) {
4064                 workaround = 1;
4065                 if (tr32(TG3PCI_DUAL_MAC_CTRL) & DUAL_MAC_CTRL_ID)
4066                         port_a = 0;
4067
4068                 /* preserve bits 0-11,13,14 for signal pre-emphasis */
4069                 /* preserve bits 20-23 for voltage regulator */
4070                 serdes_cfg = tr32(MAC_SERDES_CFG) & 0x00f06fff;
4071         }
4072
4073         sg_dig_ctrl = tr32(SG_DIG_CTRL);
4074
4075         if (tp->link_config.autoneg != AUTONEG_ENABLE) {
4076                 if (sg_dig_ctrl & SG_DIG_USING_HW_AUTONEG) {
4077                         if (workaround) {
4078                                 u32 val = serdes_cfg;
4079
4080                                 if (port_a)
4081                                         val |= 0xc010000;
4082                                 else
4083                                         val |= 0x4010000;
4084                                 tw32_f(MAC_SERDES_CFG, val);
4085                         }
4086
4087                         tw32_f(SG_DIG_CTRL, SG_DIG_COMMON_SETUP);
4088                 }
4089                 if (mac_status & MAC_STATUS_PCS_SYNCED) {
4090                         tg3_setup_flow_control(tp, 0, 0);
4091                         current_link_up = 1;
4092                 }
4093                 goto out;
4094         }
4095
4096         /* Want auto-negotiation.  */
4097         expected_sg_dig_ctrl = SG_DIG_USING_HW_AUTONEG | SG_DIG_COMMON_SETUP;
4098
4099         flowctrl = tg3_advert_flowctrl_1000X(tp->link_config.flowctrl);
4100         if (flowctrl & ADVERTISE_1000XPAUSE)
4101                 expected_sg_dig_ctrl |= SG_DIG_PAUSE_CAP;
4102         if (flowctrl & ADVERTISE_1000XPSE_ASYM)
4103                 expected_sg_dig_ctrl |= SG_DIG_ASYM_PAUSE;
4104
4105         if (sg_dig_ctrl != expected_sg_dig_ctrl) {
4106                 if ((tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT) &&
4107                     tp->serdes_counter &&
4108                     ((mac_status & (MAC_STATUS_PCS_SYNCED |
4109                                     MAC_STATUS_RCVD_CFG)) ==
4110                      MAC_STATUS_PCS_SYNCED)) {
4111                         tp->serdes_counter--;
4112                         current_link_up = 1;
4113                         goto out;
4114                 }
4115 restart_autoneg:
4116                 if (workaround)
4117                         tw32_f(MAC_SERDES_CFG, serdes_cfg | 0xc011000);
4118                 tw32_f(SG_DIG_CTRL, expected_sg_dig_ctrl | SG_DIG_SOFT_RESET);
4119                 udelay(5);
4120                 tw32_f(SG_DIG_CTRL, expected_sg_dig_ctrl);
4121
4122                 tp->serdes_counter = SERDES_AN_TIMEOUT_5704S;
4123                 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
4124         } else if (mac_status & (MAC_STATUS_PCS_SYNCED |
4125                                  MAC_STATUS_SIGNAL_DET)) {
4126                 sg_dig_status = tr32(SG_DIG_STATUS);
4127                 mac_status = tr32(MAC_STATUS);
4128
4129                 if ((sg_dig_status & SG_DIG_AUTONEG_COMPLETE) &&
4130                     (mac_status & MAC_STATUS_PCS_SYNCED)) {
4131                         u32 local_adv = 0, remote_adv = 0;
4132
4133                         if (sg_dig_ctrl & SG_DIG_PAUSE_CAP)
4134                                 local_adv |= ADVERTISE_1000XPAUSE;
4135                         if (sg_dig_ctrl & SG_DIG_ASYM_PAUSE)
4136                                 local_adv |= ADVERTISE_1000XPSE_ASYM;
4137
4138                         if (sg_dig_status & SG_DIG_PARTNER_PAUSE_CAPABLE)
4139                                 remote_adv |= LPA_1000XPAUSE;
4140                         if (sg_dig_status & SG_DIG_PARTNER_ASYM_PAUSE)
4141                                 remote_adv |= LPA_1000XPAUSE_ASYM;
4142
4143                         tg3_setup_flow_control(tp, local_adv, remote_adv);
4144                         current_link_up = 1;
4145                         tp->serdes_counter = 0;
4146                         tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
4147                 } else if (!(sg_dig_status & SG_DIG_AUTONEG_COMPLETE)) {
4148                         if (tp->serdes_counter)
4149                                 tp->serdes_counter--;
4150                         else {
4151                                 if (workaround) {
4152                                         u32 val = serdes_cfg;
4153
4154                                         if (port_a)
4155                                                 val |= 0xc010000;
4156                                         else
4157                                                 val |= 0x4010000;
4158
4159                                         tw32_f(MAC_SERDES_CFG, val);
4160                                 }
4161
4162                                 tw32_f(SG_DIG_CTRL, SG_DIG_COMMON_SETUP);
4163                                 udelay(40);
4164
4165                                 /* Link parallel detection - link is up */
4166                                 /* only if we have PCS_SYNC and not */
4167                                 /* receiving config code words */
4168                                 mac_status = tr32(MAC_STATUS);
4169                                 if ((mac_status & MAC_STATUS_PCS_SYNCED) &&
4170                                     !(mac_status & MAC_STATUS_RCVD_CFG)) {
4171                                         tg3_setup_flow_control(tp, 0, 0);
4172                                         current_link_up = 1;
4173                                         tp->phy_flags |=
4174                                                 TG3_PHYFLG_PARALLEL_DETECT;
4175                                         tp->serdes_counter =
4176                                                 SERDES_PARALLEL_DET_TIMEOUT;
4177                                 } else
4178                                         goto restart_autoneg;
4179                         }
4180                 }
4181         } else {
4182                 tp->serdes_counter = SERDES_AN_TIMEOUT_5704S;
4183                 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
4184         }
4185
4186 out:
4187         return current_link_up;
4188 }
4189
4190 static int tg3_setup_fiber_by_hand(struct tg3 *tp, u32 mac_status)
4191 {
4192         int current_link_up = 0;
4193
4194         if (!(mac_status & MAC_STATUS_PCS_SYNCED))
4195                 goto out;
4196
4197         if (tp->link_config.autoneg == AUTONEG_ENABLE) {
4198                 u32 txflags, rxflags;
4199                 int i;
4200
4201                 if (fiber_autoneg(tp, &txflags, &rxflags)) {
4202                         u32 local_adv = 0, remote_adv = 0;
4203
4204                         if (txflags & ANEG_CFG_PS1)
4205                                 local_adv |= ADVERTISE_1000XPAUSE;
4206                         if (txflags & ANEG_CFG_PS2)
4207                                 local_adv |= ADVERTISE_1000XPSE_ASYM;
4208
4209                         if (rxflags & MR_LP_ADV_SYM_PAUSE)
4210                                 remote_adv |= LPA_1000XPAUSE;
4211                         if (rxflags & MR_LP_ADV_ASYM_PAUSE)
4212                                 remote_adv |= LPA_1000XPAUSE_ASYM;
4213
4214                         tg3_setup_flow_control(tp, local_adv, remote_adv);
4215
4216                         current_link_up = 1;
4217                 }
4218                 for (i = 0; i < 30; i++) {
4219                         udelay(20);
4220                         tw32_f(MAC_STATUS,
4221                                (MAC_STATUS_SYNC_CHANGED |
4222                                 MAC_STATUS_CFG_CHANGED));
4223                         udelay(40);
4224                         if ((tr32(MAC_STATUS) &
4225                              (MAC_STATUS_SYNC_CHANGED |
4226                               MAC_STATUS_CFG_CHANGED)) == 0)
4227                                 break;
4228                 }
4229
4230                 mac_status = tr32(MAC_STATUS);
4231                 if (current_link_up == 0 &&
4232                     (mac_status & MAC_STATUS_PCS_SYNCED) &&
4233                     !(mac_status & MAC_STATUS_RCVD_CFG))
4234                         current_link_up = 1;
4235         } else {
4236                 tg3_setup_flow_control(tp, 0, 0);
4237
4238                 /* Forcing 1000FD link up. */
4239                 current_link_up = 1;
4240
4241                 tw32_f(MAC_MODE, (tp->mac_mode | MAC_MODE_SEND_CONFIGS));
4242                 udelay(40);
4243
4244                 tw32_f(MAC_MODE, tp->mac_mode);
4245                 udelay(40);
4246         }
4247
4248 out:
4249         return current_link_up;
4250 }
4251
4252 static int tg3_setup_fiber_phy(struct tg3 *tp, int force_reset)
4253 {
4254         u32 orig_pause_cfg;
4255         u16 orig_active_speed;
4256         u8 orig_active_duplex;
4257         u32 mac_status;
4258         int current_link_up;
4259         int i;
4260
4261         orig_pause_cfg = tp->link_config.active_flowctrl;
4262         orig_active_speed = tp->link_config.active_speed;
4263         orig_active_duplex = tp->link_config.active_duplex;
4264
4265         if (!tg3_flag(tp, HW_AUTONEG) &&
4266             netif_carrier_ok(tp->dev) &&
4267             tg3_flag(tp, INIT_COMPLETE)) {
4268                 mac_status = tr32(MAC_STATUS);
4269                 mac_status &= (MAC_STATUS_PCS_SYNCED |
4270                                MAC_STATUS_SIGNAL_DET |
4271                                MAC_STATUS_CFG_CHANGED |
4272                                MAC_STATUS_RCVD_CFG);
4273                 if (mac_status == (MAC_STATUS_PCS_SYNCED |
4274                                    MAC_STATUS_SIGNAL_DET)) {
4275                         tw32_f(MAC_STATUS, (MAC_STATUS_SYNC_CHANGED |
4276                                             MAC_STATUS_CFG_CHANGED));
4277                         return 0;
4278                 }
4279         }
4280
4281         tw32_f(MAC_TX_AUTO_NEG, 0);
4282
4283         tp->mac_mode &= ~(MAC_MODE_PORT_MODE_MASK | MAC_MODE_HALF_DUPLEX);
4284         tp->mac_mode |= MAC_MODE_PORT_MODE_TBI;
4285         tw32_f(MAC_MODE, tp->mac_mode);
4286         udelay(40);
4287
4288         if (tp->phy_id == TG3_PHY_ID_BCM8002)
4289                 tg3_init_bcm8002(tp);
4290
4291         /* Enable link change event even when serdes polling.  */
4292         tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
4293         udelay(40);
4294
4295         current_link_up = 0;
4296         mac_status = tr32(MAC_STATUS);
4297
4298         if (tg3_flag(tp, HW_AUTONEG))
4299                 current_link_up = tg3_setup_fiber_hw_autoneg(tp, mac_status);
4300         else
4301                 current_link_up = tg3_setup_fiber_by_hand(tp, mac_status);
4302
4303         tp->napi[0].hw_status->status =
4304                 (SD_STATUS_UPDATED |
4305                  (tp->napi[0].hw_status->status & ~SD_STATUS_LINK_CHG));
4306
4307         for (i = 0; i < 100; i++) {
4308                 tw32_f(MAC_STATUS, (MAC_STATUS_SYNC_CHANGED |
4309                                     MAC_STATUS_CFG_CHANGED));
4310                 udelay(5);
4311                 if ((tr32(MAC_STATUS) & (MAC_STATUS_SYNC_CHANGED |
4312                                          MAC_STATUS_CFG_CHANGED |
4313                                          MAC_STATUS_LNKSTATE_CHANGED)) == 0)
4314                         break;
4315         }
4316
4317         mac_status = tr32(MAC_STATUS);
4318         if ((mac_status & MAC_STATUS_PCS_SYNCED) == 0) {
4319                 current_link_up = 0;
4320                 if (tp->link_config.autoneg == AUTONEG_ENABLE &&
4321                     tp->serdes_counter == 0) {
4322                         tw32_f(MAC_MODE, (tp->mac_mode |
4323                                           MAC_MODE_SEND_CONFIGS));
4324                         udelay(1);
4325                         tw32_f(MAC_MODE, tp->mac_mode);
4326                 }
4327         }
4328
4329         if (current_link_up == 1) {
4330                 tp->link_config.active_speed = SPEED_1000;
4331                 tp->link_config.active_duplex = DUPLEX_FULL;
4332                 tw32(MAC_LED_CTRL, (tp->led_ctrl |
4333                                     LED_CTRL_LNKLED_OVERRIDE |
4334                                     LED_CTRL_1000MBPS_ON));
4335         } else {
4336                 tp->link_config.active_speed = SPEED_INVALID;
4337                 tp->link_config.active_duplex = DUPLEX_INVALID;
4338                 tw32(MAC_LED_CTRL, (tp->led_ctrl |
4339                                     LED_CTRL_LNKLED_OVERRIDE |
4340                                     LED_CTRL_TRAFFIC_OVERRIDE));
4341         }
4342
4343         if (current_link_up != netif_carrier_ok(tp->dev)) {
4344                 if (current_link_up)
4345                         netif_carrier_on(tp->dev);
4346                 else
4347                         netif_carrier_off(tp->dev);
4348                 tg3_link_report(tp);
4349         } else {
4350                 u32 now_pause_cfg = tp->link_config.active_flowctrl;
4351                 if (orig_pause_cfg != now_pause_cfg ||
4352                     orig_active_speed != tp->link_config.active_speed ||
4353                     orig_active_duplex != tp->link_config.active_duplex)
4354                         tg3_link_report(tp);
4355         }
4356
4357         return 0;
4358 }
4359
4360 static int tg3_setup_fiber_mii_phy(struct tg3 *tp, int force_reset)
4361 {
4362         int current_link_up, err = 0;
4363         u32 bmsr, bmcr;
4364         u16 current_speed;
4365         u8 current_duplex;
4366         u32 local_adv, remote_adv;
4367
4368         tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
4369         tw32_f(MAC_MODE, tp->mac_mode);
4370         udelay(40);
4371
4372         tw32(MAC_EVENT, 0);
4373
4374         tw32_f(MAC_STATUS,
4375              (MAC_STATUS_SYNC_CHANGED |
4376               MAC_STATUS_CFG_CHANGED |
4377               MAC_STATUS_MI_COMPLETION |
4378               MAC_STATUS_LNKSTATE_CHANGED));
4379         udelay(40);
4380
4381         if (force_reset)
4382                 tg3_phy_reset(tp);
4383
4384         current_link_up = 0;
4385         current_speed = SPEED_INVALID;
4386         current_duplex = DUPLEX_INVALID;
4387
4388         err |= tg3_readphy(tp, MII_BMSR, &bmsr);
4389         err |= tg3_readphy(tp, MII_BMSR, &bmsr);
4390         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) {
4391                 if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
4392                         bmsr |= BMSR_LSTATUS;
4393                 else
4394                         bmsr &= ~BMSR_LSTATUS;
4395         }
4396
4397         err |= tg3_readphy(tp, MII_BMCR, &bmcr);
4398
4399         if ((tp->link_config.autoneg == AUTONEG_ENABLE) && !force_reset &&
4400             (tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT)) {
4401                 /* do nothing, just check for link up at the end */
4402         } else if (tp->link_config.autoneg == AUTONEG_ENABLE) {
4403                 u32 adv, new_adv;
4404
4405                 err |= tg3_readphy(tp, MII_ADVERTISE, &adv);
4406                 new_adv = adv & ~(ADVERTISE_1000XFULL | ADVERTISE_1000XHALF |
4407                                   ADVERTISE_1000XPAUSE |
4408                                   ADVERTISE_1000XPSE_ASYM |
4409                                   ADVERTISE_SLCT);
4410
4411                 new_adv |= tg3_advert_flowctrl_1000X(tp->link_config.flowctrl);
4412
4413                 if (tp->link_config.advertising & ADVERTISED_1000baseT_Half)
4414                         new_adv |= ADVERTISE_1000XHALF;
4415                 if (tp->link_config.advertising & ADVERTISED_1000baseT_Full)
4416                         new_adv |= ADVERTISE_1000XFULL;
4417
4418                 if ((new_adv != adv) || !(bmcr & BMCR_ANENABLE)) {
4419                         tg3_writephy(tp, MII_ADVERTISE, new_adv);
4420                         bmcr |= BMCR_ANENABLE | BMCR_ANRESTART;
4421                         tg3_writephy(tp, MII_BMCR, bmcr);
4422
4423                         tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
4424                         tp->serdes_counter = SERDES_AN_TIMEOUT_5714S;
4425                         tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
4426
4427                         return err;
4428                 }
4429         } else {
4430                 u32 new_bmcr;
4431
4432                 bmcr &= ~BMCR_SPEED1000;
4433                 new_bmcr = bmcr & ~(BMCR_ANENABLE | BMCR_FULLDPLX);
4434
4435                 if (tp->link_config.duplex == DUPLEX_FULL)
4436                         new_bmcr |= BMCR_FULLDPLX;
4437
4438                 if (new_bmcr != bmcr) {
4439                         /* BMCR_SPEED1000 is a reserved bit that needs
4440                          * to be set on write.
4441                          */
4442                         new_bmcr |= BMCR_SPEED1000;
4443
4444                         /* Force a linkdown */
4445                         if (netif_carrier_ok(tp->dev)) {
4446                                 u32 adv;
4447
4448                                 err |= tg3_readphy(tp, MII_ADVERTISE, &adv);
4449                                 adv &= ~(ADVERTISE_1000XFULL |
4450                                          ADVERTISE_1000XHALF |
4451                                          ADVERTISE_SLCT);
4452                                 tg3_writephy(tp, MII_ADVERTISE, adv);
4453                                 tg3_writephy(tp, MII_BMCR, bmcr |
4454                                                            BMCR_ANRESTART |
4455                                                            BMCR_ANENABLE);
4456                                 udelay(10);
4457                                 netif_carrier_off(tp->dev);
4458                         }
4459                         tg3_writephy(tp, MII_BMCR, new_bmcr);
4460                         bmcr = new_bmcr;
4461                         err |= tg3_readphy(tp, MII_BMSR, &bmsr);
4462                         err |= tg3_readphy(tp, MII_BMSR, &bmsr);
4463                         if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
4464                             ASIC_REV_5714) {
4465                                 if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
4466                                         bmsr |= BMSR_LSTATUS;
4467                                 else
4468                                         bmsr &= ~BMSR_LSTATUS;
4469                         }
4470                         tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
4471                 }
4472         }
4473
4474         if (bmsr & BMSR_LSTATUS) {
4475                 current_speed = SPEED_1000;
4476                 current_link_up = 1;
4477                 if (bmcr & BMCR_FULLDPLX)
4478                         current_duplex = DUPLEX_FULL;
4479                 else
4480                         current_duplex = DUPLEX_HALF;
4481
4482                 local_adv = 0;
4483                 remote_adv = 0;
4484
4485                 if (bmcr & BMCR_ANENABLE) {
4486                         u32 common;
4487
4488                         err |= tg3_readphy(tp, MII_ADVERTISE, &local_adv);
4489                         err |= tg3_readphy(tp, MII_LPA, &remote_adv);
4490                         common = local_adv & remote_adv;
4491                         if (common & (ADVERTISE_1000XHALF |
4492                                       ADVERTISE_1000XFULL)) {
4493                                 if (common & ADVERTISE_1000XFULL)
4494                                         current_duplex = DUPLEX_FULL;
4495                                 else
4496                                         current_duplex = DUPLEX_HALF;
4497                         } else if (!tg3_flag(tp, 5780_CLASS)) {
4498                                 /* Link is up via parallel detect */
4499                         } else {
4500                                 current_link_up = 0;
4501                         }
4502                 }
4503         }
4504
4505         if (current_link_up == 1 && current_duplex == DUPLEX_FULL)
4506                 tg3_setup_flow_control(tp, local_adv, remote_adv);
4507
4508         tp->mac_mode &= ~MAC_MODE_HALF_DUPLEX;
4509         if (tp->link_config.active_duplex == DUPLEX_HALF)
4510                 tp->mac_mode |= MAC_MODE_HALF_DUPLEX;
4511
4512         tw32_f(MAC_MODE, tp->mac_mode);
4513         udelay(40);
4514
4515         tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
4516
4517         tp->link_config.active_speed = current_speed;
4518         tp->link_config.active_duplex = current_duplex;
4519
4520         if (current_link_up != netif_carrier_ok(tp->dev)) {
4521                 if (current_link_up)
4522                         netif_carrier_on(tp->dev);
4523                 else {
4524                         netif_carrier_off(tp->dev);
4525                         tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
4526                 }
4527                 tg3_link_report(tp);
4528         }
4529         return err;
4530 }
4531
4532 static void tg3_serdes_parallel_detect(struct tg3 *tp)
4533 {
4534         if (tp->serdes_counter) {
4535                 /* Give autoneg time to complete. */
4536                 tp->serdes_counter--;
4537                 return;
4538         }
4539
4540         if (!netif_carrier_ok(tp->dev) &&
4541             (tp->link_config.autoneg == AUTONEG_ENABLE)) {
4542                 u32 bmcr;
4543
4544                 tg3_readphy(tp, MII_BMCR, &bmcr);
4545                 if (bmcr & BMCR_ANENABLE) {
4546                         u32 phy1, phy2;
4547
4548                         /* Select shadow register 0x1f */
4549                         tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x7c00);
4550                         tg3_readphy(tp, MII_TG3_MISC_SHDW, &phy1);
4551
4552                         /* Select expansion interrupt status register */
4553                         tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
4554                                          MII_TG3_DSP_EXP1_INT_STAT);
4555                         tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &phy2);
4556                         tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &phy2);
4557
4558                         if ((phy1 & 0x10) && !(phy2 & 0x20)) {
4559                                 /* We have signal detect and not receiving
4560                                  * config code words, link is up by parallel
4561                                  * detection.
4562                                  */
4563
4564                                 bmcr &= ~BMCR_ANENABLE;
4565                                 bmcr |= BMCR_SPEED1000 | BMCR_FULLDPLX;
4566                                 tg3_writephy(tp, MII_BMCR, bmcr);
4567                                 tp->phy_flags |= TG3_PHYFLG_PARALLEL_DETECT;
4568                         }
4569                 }
4570         } else if (netif_carrier_ok(tp->dev) &&
4571                    (tp->link_config.autoneg == AUTONEG_ENABLE) &&
4572                    (tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT)) {
4573                 u32 phy2;
4574
4575                 /* Select expansion interrupt status register */
4576                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
4577                                  MII_TG3_DSP_EXP1_INT_STAT);
4578                 tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &phy2);
4579                 if (phy2 & 0x20) {
4580                         u32 bmcr;
4581
4582                         /* Config code words received, turn on autoneg. */
4583                         tg3_readphy(tp, MII_BMCR, &bmcr);
4584                         tg3_writephy(tp, MII_BMCR, bmcr | BMCR_ANENABLE);
4585
4586                         tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
4587
4588                 }
4589         }
4590 }
4591
4592 static int tg3_setup_phy(struct tg3 *tp, int force_reset)
4593 {
4594         u32 val;
4595         int err;
4596
4597         if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
4598                 err = tg3_setup_fiber_phy(tp, force_reset);
4599         else if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
4600                 err = tg3_setup_fiber_mii_phy(tp, force_reset);
4601         else
4602                 err = tg3_setup_copper_phy(tp, force_reset);
4603
4604         if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5784_AX) {
4605                 u32 scale;
4606
4607                 val = tr32(TG3_CPMU_CLCK_STAT) & CPMU_CLCK_STAT_MAC_CLCK_MASK;
4608                 if (val == CPMU_CLCK_STAT_MAC_CLCK_62_5)
4609                         scale = 65;
4610                 else if (val == CPMU_CLCK_STAT_MAC_CLCK_6_25)
4611                         scale = 6;
4612                 else
4613                         scale = 12;
4614
4615                 val = tr32(GRC_MISC_CFG) & ~GRC_MISC_CFG_PRESCALAR_MASK;
4616                 val |= (scale << GRC_MISC_CFG_PRESCALAR_SHIFT);
4617                 tw32(GRC_MISC_CFG, val);
4618         }
4619
4620         val = (2 << TX_LENGTHS_IPG_CRS_SHIFT) |
4621               (6 << TX_LENGTHS_IPG_SHIFT);
4622         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720)
4623                 val |= tr32(MAC_TX_LENGTHS) &
4624                        (TX_LENGTHS_JMB_FRM_LEN_MSK |
4625                         TX_LENGTHS_CNT_DWN_VAL_MSK);
4626
4627         if (tp->link_config.active_speed == SPEED_1000 &&
4628             tp->link_config.active_duplex == DUPLEX_HALF)
4629                 tw32(MAC_TX_LENGTHS, val |
4630                      (0xff << TX_LENGTHS_SLOT_TIME_SHIFT));
4631         else
4632                 tw32(MAC_TX_LENGTHS, val |
4633                      (32 << TX_LENGTHS_SLOT_TIME_SHIFT));
4634
4635         if (!tg3_flag(tp, 5705_PLUS)) {
4636                 if (netif_carrier_ok(tp->dev)) {
4637                         tw32(HOSTCC_STAT_COAL_TICKS,
4638                              tp->coal.stats_block_coalesce_usecs);
4639                 } else {
4640                         tw32(HOSTCC_STAT_COAL_TICKS, 0);
4641                 }
4642         }
4643
4644         if (tg3_flag(tp, ASPM_WORKAROUND)) {
4645                 val = tr32(PCIE_PWR_MGMT_THRESH);
4646                 if (!netif_carrier_ok(tp->dev))
4647                         val = (val & ~PCIE_PWR_MGMT_L1_THRESH_MSK) |
4648                               tp->pwrmgmt_thresh;
4649                 else
4650                         val |= PCIE_PWR_MGMT_L1_THRESH_MSK;
4651                 tw32(PCIE_PWR_MGMT_THRESH, val);
4652         }
4653
4654         return err;
4655 }
4656
4657 static inline int tg3_irq_sync(struct tg3 *tp)
4658 {
4659         return tp->irq_sync;
4660 }
4661
4662 static inline void tg3_rd32_loop(struct tg3 *tp, u32 *dst, u32 off, u32 len)
4663 {
4664         int i;
4665
4666         dst = (u32 *)((u8 *)dst + off);
4667         for (i = 0; i < len; i += sizeof(u32))
4668                 *dst++ = tr32(off + i);
4669 }
4670
4671 static void tg3_dump_legacy_regs(struct tg3 *tp, u32 *regs)
4672 {
4673         tg3_rd32_loop(tp, regs, TG3PCI_VENDOR, 0xb0);
4674         tg3_rd32_loop(tp, regs, MAILBOX_INTERRUPT_0, 0x200);
4675         tg3_rd32_loop(tp, regs, MAC_MODE, 0x4f0);
4676         tg3_rd32_loop(tp, regs, SNDDATAI_MODE, 0xe0);
4677         tg3_rd32_loop(tp, regs, SNDDATAC_MODE, 0x04);
4678         tg3_rd32_loop(tp, regs, SNDBDS_MODE, 0x80);
4679         tg3_rd32_loop(tp, regs, SNDBDI_MODE, 0x48);
4680         tg3_rd32_loop(tp, regs, SNDBDC_MODE, 0x04);
4681         tg3_rd32_loop(tp, regs, RCVLPC_MODE, 0x20);
4682         tg3_rd32_loop(tp, regs, RCVLPC_SELLST_BASE, 0x15c);
4683         tg3_rd32_loop(tp, regs, RCVDBDI_MODE, 0x0c);
4684         tg3_rd32_loop(tp, regs, RCVDBDI_JUMBO_BD, 0x3c);
4685         tg3_rd32_loop(tp, regs, RCVDBDI_BD_PROD_IDX_0, 0x44);
4686         tg3_rd32_loop(tp, regs, RCVDCC_MODE, 0x04);
4687         tg3_rd32_loop(tp, regs, RCVBDI_MODE, 0x20);
4688         tg3_rd32_loop(tp, regs, RCVCC_MODE, 0x14);
4689         tg3_rd32_loop(tp, regs, RCVLSC_MODE, 0x08);
4690         tg3_rd32_loop(tp, regs, MBFREE_MODE, 0x08);
4691         tg3_rd32_loop(tp, regs, HOSTCC_MODE, 0x100);
4692
4693         if (tg3_flag(tp, SUPPORT_MSIX))
4694                 tg3_rd32_loop(tp, regs, HOSTCC_RXCOL_TICKS_VEC1, 0x180);
4695
4696         tg3_rd32_loop(tp, regs, MEMARB_MODE, 0x10);
4697         tg3_rd32_loop(tp, regs, BUFMGR_MODE, 0x58);
4698         tg3_rd32_loop(tp, regs, RDMAC_MODE, 0x08);
4699         tg3_rd32_loop(tp, regs, WDMAC_MODE, 0x08);
4700         tg3_rd32_loop(tp, regs, RX_CPU_MODE, 0x04);
4701         tg3_rd32_loop(tp, regs, RX_CPU_STATE, 0x04);
4702         tg3_rd32_loop(tp, regs, RX_CPU_PGMCTR, 0x04);
4703         tg3_rd32_loop(tp, regs, RX_CPU_HWBKPT, 0x04);
4704
4705         if (!tg3_flag(tp, 5705_PLUS)) {
4706                 tg3_rd32_loop(tp, regs, TX_CPU_MODE, 0x04);
4707                 tg3_rd32_loop(tp, regs, TX_CPU_STATE, 0x04);
4708                 tg3_rd32_loop(tp, regs, TX_CPU_PGMCTR, 0x04);
4709         }
4710
4711         tg3_rd32_loop(tp, regs, GRCMBOX_INTERRUPT_0, 0x110);
4712         tg3_rd32_loop(tp, regs, FTQ_RESET, 0x120);
4713         tg3_rd32_loop(tp, regs, MSGINT_MODE, 0x0c);
4714         tg3_rd32_loop(tp, regs, DMAC_MODE, 0x04);
4715         tg3_rd32_loop(tp, regs, GRC_MODE, 0x4c);
4716
4717         if (tg3_flag(tp, NVRAM))
4718                 tg3_rd32_loop(tp, regs, NVRAM_CMD, 0x24);
4719 }
4720
4721 static void tg3_dump_state(struct tg3 *tp)
4722 {
4723         int i;
4724         u32 *regs;
4725
4726         regs = kzalloc(TG3_REG_BLK_SIZE, GFP_ATOMIC);
4727         if (!regs) {
4728                 netdev_err(tp->dev, "Failed allocating register dump buffer\n");
4729                 return;
4730         }
4731
4732         if (tg3_flag(tp, PCI_EXPRESS)) {
4733                 /* Read up to but not including private PCI registers */
4734                 for (i = 0; i < TG3_PCIE_TLDLPL_PORT; i += sizeof(u32))
4735                         regs[i / sizeof(u32)] = tr32(i);
4736         } else
4737                 tg3_dump_legacy_regs(tp, regs);
4738
4739         for (i = 0; i < TG3_REG_BLK_SIZE / sizeof(u32); i += 4) {
4740                 if (!regs[i + 0] && !regs[i + 1] &&
4741                     !regs[i + 2] && !regs[i + 3])
4742                         continue;
4743
4744                 netdev_err(tp->dev, "0x%08x: 0x%08x, 0x%08x, 0x%08x, 0x%08x\n",
4745                            i * 4,
4746                            regs[i + 0], regs[i + 1], regs[i + 2], regs[i + 3]);
4747         }
4748
4749         kfree(regs);
4750
4751         for (i = 0; i < tp->irq_cnt; i++) {
4752                 struct tg3_napi *tnapi = &tp->napi[i];
4753
4754                 /* SW status block */
4755                 netdev_err(tp->dev,
4756                          "%d: Host status block [%08x:%08x:(%04x:%04x:%04x):(%04x:%04x)]\n",
4757                            i,
4758                            tnapi->hw_status->status,
4759                            tnapi->hw_status->status_tag,
4760                            tnapi->hw_status->rx_jumbo_consumer,
4761                            tnapi->hw_status->rx_consumer,
4762                            tnapi->hw_status->rx_mini_consumer,
4763                            tnapi->hw_status->idx[0].rx_producer,
4764                            tnapi->hw_status->idx[0].tx_consumer);
4765
4766                 netdev_err(tp->dev,
4767                 "%d: NAPI info [%08x:%08x:(%04x:%04x:%04x):%04x:(%04x:%04x:%04x:%04x)]\n",
4768                            i,
4769                            tnapi->last_tag, tnapi->last_irq_tag,
4770                            tnapi->tx_prod, tnapi->tx_cons, tnapi->tx_pending,
4771                            tnapi->rx_rcb_ptr,
4772                            tnapi->prodring.rx_std_prod_idx,
4773                            tnapi->prodring.rx_std_cons_idx,
4774                            tnapi->prodring.rx_jmb_prod_idx,
4775                            tnapi->prodring.rx_jmb_cons_idx);
4776         }
4777 }
4778
4779 /* This is called whenever we suspect that the system chipset is re-
4780  * ordering the sequence of MMIO to the tx send mailbox. The symptom
4781  * is bogus tx completions. We try to recover by setting the
4782  * TG3_FLAG_MBOX_WRITE_REORDER flag and resetting the chip later
4783  * in the workqueue.
4784  */
4785 static void tg3_tx_recover(struct tg3 *tp)
4786 {
4787         BUG_ON(tg3_flag(tp, MBOX_WRITE_REORDER) ||
4788                tp->write32_tx_mbox == tg3_write_indirect_mbox);
4789
4790         netdev_warn(tp->dev,
4791                     "The system may be re-ordering memory-mapped I/O "
4792                     "cycles to the network device, attempting to recover. "
4793                     "Please report the problem to the driver maintainer "
4794                     "and include system chipset information.\n");
4795
4796         spin_lock(&tp->lock);
4797         tg3_flag_set(tp, TX_RECOVERY_PENDING);
4798         spin_unlock(&tp->lock);
4799 }
4800
4801 static inline u32 tg3_tx_avail(struct tg3_napi *tnapi)
4802 {
4803         /* Tell compiler to fetch tx indices from memory. */
4804         barrier();
4805         return tnapi->tx_pending -
4806                ((tnapi->tx_prod - tnapi->tx_cons) & (TG3_TX_RING_SIZE - 1));
4807 }
4808
4809 /* Tigon3 never reports partial packet sends.  So we do not
4810  * need special logic to handle SKBs that have not had all
4811  * of their frags sent yet, like SunGEM does.
4812  */
4813 static void tg3_tx(struct tg3_napi *tnapi)
4814 {
4815         struct tg3 *tp = tnapi->tp;
4816         u32 hw_idx = tnapi->hw_status->idx[0].tx_consumer;
4817         u32 sw_idx = tnapi->tx_cons;
4818         struct netdev_queue *txq;
4819         int index = tnapi - tp->napi;
4820
4821         if (tg3_flag(tp, ENABLE_TSS))
4822                 index--;
4823
4824         txq = netdev_get_tx_queue(tp->dev, index);
4825
4826         while (sw_idx != hw_idx) {
4827                 struct tg3_tx_ring_info *ri = &tnapi->tx_buffers[sw_idx];
4828                 struct sk_buff *skb = ri->skb;
4829                 int i, tx_bug = 0;
4830
4831                 if (unlikely(skb == NULL)) {
4832                         tg3_tx_recover(tp);
4833                         return;
4834                 }
4835
4836                 pci_unmap_single(tp->pdev,
4837                                  dma_unmap_addr(ri, mapping),
4838                                  skb_headlen(skb),
4839                                  PCI_DMA_TODEVICE);
4840
4841                 ri->skb = NULL;
4842
4843                 sw_idx = NEXT_TX(sw_idx);
4844
4845                 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
4846                         ri = &tnapi->tx_buffers[sw_idx];
4847                         if (unlikely(ri->skb != NULL || sw_idx == hw_idx))
4848                                 tx_bug = 1;
4849
4850                         pci_unmap_page(tp->pdev,
4851                                        dma_unmap_addr(ri, mapping),
4852                                        skb_shinfo(skb)->frags[i].size,
4853                                        PCI_DMA_TODEVICE);
4854                         sw_idx = NEXT_TX(sw_idx);
4855                 }
4856
4857                 dev_kfree_skb(skb);
4858
4859                 if (unlikely(tx_bug)) {
4860                         tg3_tx_recover(tp);
4861                         return;
4862                 }
4863         }
4864
4865         tnapi->tx_cons = sw_idx;
4866
4867         /* Need to make the tx_cons update visible to tg3_start_xmit()
4868          * before checking for netif_queue_stopped().  Without the
4869          * memory barrier, there is a small possibility that tg3_start_xmit()
4870          * will miss it and cause the queue to be stopped forever.
4871          */
4872         smp_mb();
4873
4874         if (unlikely(netif_tx_queue_stopped(txq) &&
4875                      (tg3_tx_avail(tnapi) > TG3_TX_WAKEUP_THRESH(tnapi)))) {
4876                 __netif_tx_lock(txq, smp_processor_id());
4877                 if (netif_tx_queue_stopped(txq) &&
4878                     (tg3_tx_avail(tnapi) > TG3_TX_WAKEUP_THRESH(tnapi)))
4879                         netif_tx_wake_queue(txq);
4880                 __netif_tx_unlock(txq);
4881         }
4882 }
4883
4884 static void tg3_rx_skb_free(struct tg3 *tp, struct ring_info *ri, u32 map_sz)
4885 {
4886         if (!ri->skb)
4887                 return;
4888
4889         pci_unmap_single(tp->pdev, dma_unmap_addr(ri, mapping),
4890                          map_sz, PCI_DMA_FROMDEVICE);
4891         dev_kfree_skb_any(ri->skb);
4892         ri->skb = NULL;
4893 }
4894
4895 /* Returns size of skb allocated or < 0 on error.
4896  *
4897  * We only need to fill in the address because the other members
4898  * of the RX descriptor are invariant, see tg3_init_rings.
4899  *
4900  * Note the purposeful assymetry of cpu vs. chip accesses.  For
4901  * posting buffers we only dirty the first cache line of the RX
4902  * descriptor (containing the address).  Whereas for the RX status
4903  * buffers the cpu only reads the last cacheline of the RX descriptor
4904  * (to fetch the error flags, vlan tag, checksum, and opaque cookie).
4905  */
4906 static int tg3_alloc_rx_skb(struct tg3 *tp, struct tg3_rx_prodring_set *tpr,
4907                             u32 opaque_key, u32 dest_idx_unmasked)
4908 {
4909         struct tg3_rx_buffer_desc *desc;
4910         struct ring_info *map;
4911         struct sk_buff *skb;
4912         dma_addr_t mapping;
4913         int skb_size, dest_idx;
4914
4915         switch (opaque_key) {
4916         case RXD_OPAQUE_RING_STD:
4917                 dest_idx = dest_idx_unmasked & tp->rx_std_ring_mask;
4918                 desc = &tpr->rx_std[dest_idx];
4919                 map = &tpr->rx_std_buffers[dest_idx];
4920                 skb_size = tp->rx_pkt_map_sz;
4921                 break;
4922
4923         case RXD_OPAQUE_RING_JUMBO:
4924                 dest_idx = dest_idx_unmasked & tp->rx_jmb_ring_mask;
4925                 desc = &tpr->rx_jmb[dest_idx].std;
4926                 map = &tpr->rx_jmb_buffers[dest_idx];
4927                 skb_size = TG3_RX_JMB_MAP_SZ;
4928                 break;
4929
4930         default:
4931                 return -EINVAL;
4932         }
4933
4934         /* Do not overwrite any of the map or rp information
4935          * until we are sure we can commit to a new buffer.
4936          *
4937          * Callers depend upon this behavior and assume that
4938          * we leave everything unchanged if we fail.
4939          */
4940         skb = netdev_alloc_skb(tp->dev, skb_size + tp->rx_offset);
4941         if (skb == NULL)
4942                 return -ENOMEM;
4943
4944         skb_reserve(skb, tp->rx_offset);
4945
4946         mapping = pci_map_single(tp->pdev, skb->data, skb_size,
4947                                  PCI_DMA_FROMDEVICE);
4948         if (pci_dma_mapping_error(tp->pdev, mapping)) {
4949                 dev_kfree_skb(skb);
4950                 return -EIO;
4951         }
4952
4953         map->skb = skb;
4954         dma_unmap_addr_set(map, mapping, mapping);
4955
4956         desc->addr_hi = ((u64)mapping >> 32);
4957         desc->addr_lo = ((u64)mapping & 0xffffffff);
4958
4959         return skb_size;
4960 }
4961
4962 /* We only need to move over in the address because the other
4963  * members of the RX descriptor are invariant.  See notes above
4964  * tg3_alloc_rx_skb for full details.
4965  */
4966 static void tg3_recycle_rx(struct tg3_napi *tnapi,
4967                            struct tg3_rx_prodring_set *dpr,
4968                            u32 opaque_key, int src_idx,
4969                            u32 dest_idx_unmasked)
4970 {
4971         struct tg3 *tp = tnapi->tp;
4972         struct tg3_rx_buffer_desc *src_desc, *dest_desc;
4973         struct ring_info *src_map, *dest_map;
4974         struct tg3_rx_prodring_set *spr = &tp->napi[0].prodring;
4975         int dest_idx;
4976
4977         switch (opaque_key) {
4978         case RXD_OPAQUE_RING_STD:
4979                 dest_idx = dest_idx_unmasked & tp->rx_std_ring_mask;
4980                 dest_desc = &dpr->rx_std[dest_idx];
4981                 dest_map = &dpr->rx_std_buffers[dest_idx];
4982                 src_desc = &spr->rx_std[src_idx];
4983                 src_map = &spr->rx_std_buffers[src_idx];
4984                 break;
4985
4986         case RXD_OPAQUE_RING_JUMBO:
4987                 dest_idx = dest_idx_unmasked & tp->rx_jmb_ring_mask;
4988                 dest_desc = &dpr->rx_jmb[dest_idx].std;
4989                 dest_map = &dpr->rx_jmb_buffers[dest_idx];
4990                 src_desc = &spr->rx_jmb[src_idx].std;
4991                 src_map = &spr->rx_jmb_buffers[src_idx];
4992                 break;
4993
4994         default:
4995                 return;
4996         }
4997
4998         dest_map->skb = src_map->skb;
4999         dma_unmap_addr_set(dest_map, mapping,
5000                            dma_unmap_addr(src_map, mapping));
5001         dest_desc->addr_hi = src_desc->addr_hi;
5002         dest_desc->addr_lo = src_desc->addr_lo;
5003
5004         /* Ensure that the update to the skb happens after the physical
5005          * addresses have been transferred to the new BD location.
5006          */
5007         smp_wmb();
5008
5009         src_map->skb = NULL;
5010 }
5011
5012 /* The RX ring scheme is composed of multiple rings which post fresh
5013  * buffers to the chip, and one special ring the chip uses to report
5014  * status back to the host.
5015  *
5016  * The special ring reports the status of received packets to the
5017  * host.  The chip does not write into the original descriptor the
5018  * RX buffer was obtained from.  The chip simply takes the original
5019  * descriptor as provided by the host, updates the status and length
5020  * field, then writes this into the next status ring entry.
5021  *
5022  * Each ring the host uses to post buffers to the chip is described
5023  * by a TG3_BDINFO entry in the chips SRAM area.  When a packet arrives,
5024  * it is first placed into the on-chip ram.  When the packet's length
5025  * is known, it walks down the TG3_BDINFO entries to select the ring.
5026  * Each TG3_BDINFO specifies a MAXLEN field and the first TG3_BDINFO
5027  * which is within the range of the new packet's length is chosen.
5028  *
5029  * The "separate ring for rx status" scheme may sound queer, but it makes
5030  * sense from a cache coherency perspective.  If only the host writes
5031  * to the buffer post rings, and only the chip writes to the rx status
5032  * rings, then cache lines never move beyond shared-modified state.
5033  * If both the host and chip were to write into the same ring, cache line
5034  * eviction could occur since both entities want it in an exclusive state.
5035  */
5036 static int tg3_rx(struct tg3_napi *tnapi, int budget)
5037 {
5038         struct tg3 *tp = tnapi->tp;
5039         u32 work_mask, rx_std_posted = 0;
5040         u32 std_prod_idx, jmb_prod_idx;
5041         u32 sw_idx = tnapi->rx_rcb_ptr;
5042         u16 hw_idx;
5043         int received;
5044         struct tg3_rx_prodring_set *tpr = &tnapi->prodring;
5045
5046         hw_idx = *(tnapi->rx_rcb_prod_idx);
5047         /*
5048          * We need to order the read of hw_idx and the read of
5049          * the opaque cookie.
5050          */
5051         rmb();
5052         work_mask = 0;
5053         received = 0;
5054         std_prod_idx = tpr->rx_std_prod_idx;
5055         jmb_prod_idx = tpr->rx_jmb_prod_idx;
5056         while (sw_idx != hw_idx && budget > 0) {
5057                 struct ring_info *ri;
5058                 struct tg3_rx_buffer_desc *desc = &tnapi->rx_rcb[sw_idx];
5059                 unsigned int len;
5060                 struct sk_buff *skb;
5061                 dma_addr_t dma_addr;
5062                 u32 opaque_key, desc_idx, *post_ptr;
5063
5064                 desc_idx = desc->opaque & RXD_OPAQUE_INDEX_MASK;
5065                 opaque_key = desc->opaque & RXD_OPAQUE_RING_MASK;
5066                 if (opaque_key == RXD_OPAQUE_RING_STD) {
5067                         ri = &tp->napi[0].prodring.rx_std_buffers[desc_idx];
5068                         dma_addr = dma_unmap_addr(ri, mapping);
5069                         skb = ri->skb;
5070                         post_ptr = &std_prod_idx;
5071                         rx_std_posted++;
5072                 } else if (opaque_key == RXD_OPAQUE_RING_JUMBO) {
5073                         ri = &tp->napi[0].prodring.rx_jmb_buffers[desc_idx];
5074                         dma_addr = dma_unmap_addr(ri, mapping);
5075                         skb = ri->skb;
5076                         post_ptr = &jmb_prod_idx;
5077                 } else
5078                         goto next_pkt_nopost;
5079
5080                 work_mask |= opaque_key;
5081
5082                 if ((desc->err_vlan & RXD_ERR_MASK) != 0 &&
5083                     (desc->err_vlan != RXD_ERR_ODD_NIBBLE_RCVD_MII)) {
5084                 drop_it:
5085                         tg3_recycle_rx(tnapi, tpr, opaque_key,
5086                                        desc_idx, *post_ptr);
5087                 drop_it_no_recycle:
5088                         /* Other statistics kept track of by card. */
5089                         tp->rx_dropped++;
5090                         goto next_pkt;
5091                 }
5092
5093                 len = ((desc->idx_len & RXD_LEN_MASK) >> RXD_LEN_SHIFT) -
5094                       ETH_FCS_LEN;
5095
5096                 if (len > TG3_RX_COPY_THRESH(tp)) {
5097                         int skb_size;
5098
5099                         skb_size = tg3_alloc_rx_skb(tp, tpr, opaque_key,
5100                                                     *post_ptr);
5101                         if (skb_size < 0)
5102                                 goto drop_it;
5103
5104                         pci_unmap_single(tp->pdev, dma_addr, skb_size,
5105                                          PCI_DMA_FROMDEVICE);
5106
5107                         /* Ensure that the update to the skb happens
5108                          * after the usage of the old DMA mapping.
5109                          */
5110                         smp_wmb();
5111
5112                         ri->skb = NULL;
5113
5114                         skb_put(skb, len);
5115                 } else {
5116                         struct sk_buff *copy_skb;
5117
5118                         tg3_recycle_rx(tnapi, tpr, opaque_key,
5119                                        desc_idx, *post_ptr);
5120
5121                         copy_skb = netdev_alloc_skb(tp->dev, len +
5122                                                     TG3_RAW_IP_ALIGN);
5123                         if (copy_skb == NULL)
5124                                 goto drop_it_no_recycle;
5125
5126                         skb_reserve(copy_skb, TG3_RAW_IP_ALIGN);
5127                         skb_put(copy_skb, len);
5128                         pci_dma_sync_single_for_cpu(tp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE);
5129                         skb_copy_from_linear_data(skb, copy_skb->data, len);
5130                         pci_dma_sync_single_for_device(tp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE);
5131
5132                         /* We'll reuse the original ring buffer. */
5133                         skb = copy_skb;
5134                 }
5135
5136                 if ((tp->dev->features & NETIF_F_RXCSUM) &&
5137                     (desc->type_flags & RXD_FLAG_TCPUDP_CSUM) &&
5138                     (((desc->ip_tcp_csum & RXD_TCPCSUM_MASK)
5139                       >> RXD_TCPCSUM_SHIFT) == 0xffff))
5140                         skb->ip_summed = CHECKSUM_UNNECESSARY;
5141                 else
5142                         skb_checksum_none_assert(skb);
5143
5144                 skb->protocol = eth_type_trans(skb, tp->dev);
5145
5146                 if (len > (tp->dev->mtu + ETH_HLEN) &&
5147                     skb->protocol != htons(ETH_P_8021Q)) {
5148                         dev_kfree_skb(skb);
5149                         goto drop_it_no_recycle;
5150                 }
5151
5152                 if (desc->type_flags & RXD_FLAG_VLAN &&
5153                     !(tp->rx_mode & RX_MODE_KEEP_VLAN_TAG))
5154                         __vlan_hwaccel_put_tag(skb,
5155                                                desc->err_vlan & RXD_VLAN_MASK);
5156
5157                 napi_gro_receive(&tnapi->napi, skb);
5158
5159                 received++;
5160                 budget--;
5161
5162 next_pkt:
5163                 (*post_ptr)++;
5164
5165                 if (unlikely(rx_std_posted >= tp->rx_std_max_post)) {
5166                         tpr->rx_std_prod_idx = std_prod_idx &
5167                                                tp->rx_std_ring_mask;
5168                         tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG,
5169                                      tpr->rx_std_prod_idx);
5170                         work_mask &= ~RXD_OPAQUE_RING_STD;
5171                         rx_std_posted = 0;
5172                 }
5173 next_pkt_nopost:
5174                 sw_idx++;
5175                 sw_idx &= tp->rx_ret_ring_mask;
5176
5177                 /* Refresh hw_idx to see if there is new work */
5178                 if (sw_idx == hw_idx) {
5179                         hw_idx = *(tnapi->rx_rcb_prod_idx);
5180                         rmb();
5181                 }
5182         }
5183
5184         /* ACK the status ring. */
5185         tnapi->rx_rcb_ptr = sw_idx;
5186         tw32_rx_mbox(tnapi->consmbox, sw_idx);
5187
5188         /* Refill RX ring(s). */
5189         if (!tg3_flag(tp, ENABLE_RSS)) {
5190                 if (work_mask & RXD_OPAQUE_RING_STD) {
5191                         tpr->rx_std_prod_idx = std_prod_idx &
5192                                                tp->rx_std_ring_mask;
5193                         tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG,
5194                                      tpr->rx_std_prod_idx);
5195                 }
5196                 if (work_mask & RXD_OPAQUE_RING_JUMBO) {
5197                         tpr->rx_jmb_prod_idx = jmb_prod_idx &
5198                                                tp->rx_jmb_ring_mask;
5199                         tw32_rx_mbox(TG3_RX_JMB_PROD_IDX_REG,
5200                                      tpr->rx_jmb_prod_idx);
5201                 }
5202                 mmiowb();
5203         } else if (work_mask) {
5204                 /* rx_std_buffers[] and rx_jmb_buffers[] entries must be
5205                  * updated before the producer indices can be updated.
5206                  */
5207                 smp_wmb();
5208
5209                 tpr->rx_std_prod_idx = std_prod_idx & tp->rx_std_ring_mask;
5210                 tpr->rx_jmb_prod_idx = jmb_prod_idx & tp->rx_jmb_ring_mask;
5211
5212                 if (tnapi != &tp->napi[1])
5213                         napi_schedule(&tp->napi[1].napi);
5214         }
5215
5216         return received;
5217 }
5218
5219 static void tg3_poll_link(struct tg3 *tp)
5220 {
5221         /* handle link change and other phy events */
5222         if (!(tg3_flag(tp, USE_LINKCHG_REG) || tg3_flag(tp, POLL_SERDES))) {
5223                 struct tg3_hw_status *sblk = tp->napi[0].hw_status;
5224
5225                 if (sblk->status & SD_STATUS_LINK_CHG) {
5226                         sblk->status = SD_STATUS_UPDATED |
5227                                        (sblk->status & ~SD_STATUS_LINK_CHG);
5228                         spin_lock(&tp->lock);
5229                         if (tg3_flag(tp, USE_PHYLIB)) {
5230                                 tw32_f(MAC_STATUS,
5231                                      (MAC_STATUS_SYNC_CHANGED |
5232                                       MAC_STATUS_CFG_CHANGED |
5233                                       MAC_STATUS_MI_COMPLETION |
5234                                       MAC_STATUS_LNKSTATE_CHANGED));
5235                                 udelay(40);
5236                         } else
5237                                 tg3_setup_phy(tp, 0);
5238                         spin_unlock(&tp->lock);
5239                 }
5240         }
5241 }
5242
5243 static int tg3_rx_prodring_xfer(struct tg3 *tp,
5244                                 struct tg3_rx_prodring_set *dpr,
5245                                 struct tg3_rx_prodring_set *spr)
5246 {
5247         u32 si, di, cpycnt, src_prod_idx;
5248         int i, err = 0;
5249
5250         while (1) {
5251                 src_prod_idx = spr->rx_std_prod_idx;
5252
5253                 /* Make sure updates to the rx_std_buffers[] entries and the
5254                  * standard producer index are seen in the correct order.
5255                  */
5256                 smp_rmb();
5257
5258                 if (spr->rx_std_cons_idx == src_prod_idx)
5259                         break;
5260
5261                 if (spr->rx_std_cons_idx < src_prod_idx)
5262                         cpycnt = src_prod_idx - spr->rx_std_cons_idx;
5263                 else
5264                         cpycnt = tp->rx_std_ring_mask + 1 -
5265                                  spr->rx_std_cons_idx;
5266
5267                 cpycnt = min(cpycnt,
5268                              tp->rx_std_ring_mask + 1 - dpr->rx_std_prod_idx);
5269
5270                 si = spr->rx_std_cons_idx;
5271                 di = dpr->rx_std_prod_idx;
5272
5273                 for (i = di; i < di + cpycnt; i++) {
5274                         if (dpr->rx_std_buffers[i].skb) {
5275                                 cpycnt = i - di;
5276                                 err = -ENOSPC;
5277                                 break;
5278                         }
5279                 }
5280
5281                 if (!cpycnt)
5282                         break;
5283
5284                 /* Ensure that updates to the rx_std_buffers ring and the
5285                  * shadowed hardware producer ring from tg3_recycle_skb() are
5286                  * ordered correctly WRT the skb check above.
5287                  */
5288                 smp_rmb();
5289
5290                 memcpy(&dpr->rx_std_buffers[di],
5291                        &spr->rx_std_buffers[si],
5292                        cpycnt * sizeof(struct ring_info));
5293
5294                 for (i = 0; i < cpycnt; i++, di++, si++) {
5295                         struct tg3_rx_buffer_desc *sbd, *dbd;
5296                         sbd = &spr->rx_std[si];
5297                         dbd = &dpr->rx_std[di];
5298                         dbd->addr_hi = sbd->addr_hi;
5299                         dbd->addr_lo = sbd->addr_lo;
5300                 }
5301
5302                 spr->rx_std_cons_idx = (spr->rx_std_cons_idx + cpycnt) &
5303                                        tp->rx_std_ring_mask;
5304                 dpr->rx_std_prod_idx = (dpr->rx_std_prod_idx + cpycnt) &
5305                                        tp->rx_std_ring_mask;
5306         }
5307
5308         while (1) {
5309                 src_prod_idx = spr->rx_jmb_prod_idx;
5310
5311                 /* Make sure updates to the rx_jmb_buffers[] entries and
5312                  * the jumbo producer index are seen in the correct order.
5313                  */
5314                 smp_rmb();
5315
5316                 if (spr->rx_jmb_cons_idx == src_prod_idx)
5317                         break;
5318
5319                 if (spr->rx_jmb_cons_idx < src_prod_idx)
5320                         cpycnt = src_prod_idx - spr->rx_jmb_cons_idx;
5321                 else
5322                         cpycnt = tp->rx_jmb_ring_mask + 1 -
5323                                  spr->rx_jmb_cons_idx;
5324
5325                 cpycnt = min(cpycnt,
5326                              tp->rx_jmb_ring_mask + 1 - dpr->rx_jmb_prod_idx);
5327
5328                 si = spr->rx_jmb_cons_idx;
5329                 di = dpr->rx_jmb_prod_idx;
5330
5331                 for (i = di; i < di + cpycnt; i++) {
5332                         if (dpr->rx_jmb_buffers[i].skb) {
5333                                 cpycnt = i - di;
5334                                 err = -ENOSPC;
5335                                 break;
5336                         }
5337                 }
5338
5339                 if (!cpycnt)
5340                         break;
5341
5342                 /* Ensure that updates to the rx_jmb_buffers ring and the
5343                  * shadowed hardware producer ring from tg3_recycle_skb() are
5344                  * ordered correctly WRT the skb check above.
5345                  */
5346                 smp_rmb();
5347
5348                 memcpy(&dpr->rx_jmb_buffers[di],
5349                        &spr->rx_jmb_buffers[si],
5350                        cpycnt * sizeof(struct ring_info));
5351
5352                 for (i = 0; i < cpycnt; i++, di++, si++) {
5353                         struct tg3_rx_buffer_desc *sbd, *dbd;
5354                         sbd = &spr->rx_jmb[si].std;
5355                         dbd = &dpr->rx_jmb[di].std;
5356                         dbd->addr_hi = sbd->addr_hi;
5357                         dbd->addr_lo = sbd->addr_lo;
5358                 }
5359
5360                 spr->rx_jmb_cons_idx = (spr->rx_jmb_cons_idx + cpycnt) &
5361                                        tp->rx_jmb_ring_mask;
5362                 dpr->rx_jmb_prod_idx = (dpr->rx_jmb_prod_idx + cpycnt) &
5363                                        tp->rx_jmb_ring_mask;
5364         }
5365
5366         return err;
5367 }
5368
5369 static int tg3_poll_work(struct tg3_napi *tnapi, int work_done, int budget)
5370 {
5371         struct tg3 *tp = tnapi->tp;
5372
5373         /* run TX completion thread */
5374         if (tnapi->hw_status->idx[0].tx_consumer != tnapi->tx_cons) {
5375                 tg3_tx(tnapi);
5376                 if (unlikely(tg3_flag(tp, TX_RECOVERY_PENDING)))
5377                         return work_done;
5378         }
5379
5380         /* run RX thread, within the bounds set by NAPI.
5381          * All RX "locking" is done by ensuring outside
5382          * code synchronizes with tg3->napi.poll()
5383          */
5384         if (*(tnapi->rx_rcb_prod_idx) != tnapi->rx_rcb_ptr)
5385                 work_done += tg3_rx(tnapi, budget - work_done);
5386
5387         if (tg3_flag(tp, ENABLE_RSS) && tnapi == &tp->napi[1]) {
5388                 struct tg3_rx_prodring_set *dpr = &tp->napi[0].prodring;
5389                 int i, err = 0;
5390                 u32 std_prod_idx = dpr->rx_std_prod_idx;
5391                 u32 jmb_prod_idx = dpr->rx_jmb_prod_idx;
5392
5393                 for (i = 1; i < tp->irq_cnt; i++)
5394                         err |= tg3_rx_prodring_xfer(tp, dpr,
5395                                                     &tp->napi[i].prodring);
5396
5397                 wmb();
5398
5399                 if (std_prod_idx != dpr->rx_std_prod_idx)
5400                         tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG,
5401                                      dpr->rx_std_prod_idx);
5402
5403                 if (jmb_prod_idx != dpr->rx_jmb_prod_idx)
5404                         tw32_rx_mbox(TG3_RX_JMB_PROD_IDX_REG,
5405                                      dpr->rx_jmb_prod_idx);
5406
5407                 mmiowb();
5408
5409                 if (err)
5410                         tw32_f(HOSTCC_MODE, tp->coal_now);
5411         }
5412
5413         return work_done;
5414 }
5415
5416 static int tg3_poll_msix(struct napi_struct *napi, int budget)
5417 {
5418         struct tg3_napi *tnapi = container_of(napi, struct tg3_napi, napi);
5419         struct tg3 *tp = tnapi->tp;
5420         int work_done = 0;
5421         struct tg3_hw_status *sblk = tnapi->hw_status;
5422
5423         while (1) {
5424                 work_done = tg3_poll_work(tnapi, work_done, budget);
5425
5426                 if (unlikely(tg3_flag(tp, TX_RECOVERY_PENDING)))
5427                         goto tx_recovery;
5428
5429                 if (unlikely(work_done >= budget))
5430                         break;
5431
5432                 /* tp->last_tag is used in tg3_int_reenable() below
5433                  * to tell the hw how much work has been processed,
5434                  * so we must read it before checking for more work.
5435                  */
5436                 tnapi->last_tag = sblk->status_tag;
5437                 tnapi->last_irq_tag = tnapi->last_tag;
5438                 rmb();
5439
5440                 /* check for RX/TX work to do */
5441                 if (likely(sblk->idx[0].tx_consumer == tnapi->tx_cons &&
5442                            *(tnapi->rx_rcb_prod_idx) == tnapi->rx_rcb_ptr)) {
5443                         napi_complete(napi);
5444                         /* Reenable interrupts. */
5445                         tw32_mailbox(tnapi->int_mbox, tnapi->last_tag << 24);
5446                         mmiowb();
5447                         break;
5448                 }
5449         }
5450
5451         return work_done;
5452
5453 tx_recovery:
5454         /* work_done is guaranteed to be less than budget. */
5455         napi_complete(napi);
5456         schedule_work(&tp->reset_task);
5457         return work_done;
5458 }
5459
5460 static void tg3_process_error(struct tg3 *tp)
5461 {
5462         u32 val;
5463         bool real_error = false;
5464
5465         if (tg3_flag(tp, ERROR_PROCESSED))
5466                 return;
5467
5468         /* Check Flow Attention register */
5469         val = tr32(HOSTCC_FLOW_ATTN);
5470         if (val & ~HOSTCC_FLOW_ATTN_MBUF_LWM) {
5471                 netdev_err(tp->dev, "FLOW Attention error.  Resetting chip.\n");
5472                 real_error = true;
5473         }
5474
5475         if (tr32(MSGINT_STATUS) & ~MSGINT_STATUS_MSI_REQ) {
5476                 netdev_err(tp->dev, "MSI Status error.  Resetting chip.\n");
5477                 real_error = true;
5478         }
5479
5480         if (tr32(RDMAC_STATUS) || tr32(WDMAC_STATUS)) {
5481                 netdev_err(tp->dev, "DMA Status error.  Resetting chip.\n");
5482                 real_error = true;
5483         }
5484
5485         if (!real_error)
5486                 return;
5487
5488         tg3_dump_state(tp);
5489
5490         tg3_flag_set(tp, ERROR_PROCESSED);
5491         schedule_work(&tp->reset_task);
5492 }
5493
5494 static int tg3_poll(struct napi_struct *napi, int budget)
5495 {
5496         struct tg3_napi *tnapi = container_of(napi, struct tg3_napi, napi);
5497         struct tg3 *tp = tnapi->tp;
5498         int work_done = 0;
5499         struct tg3_hw_status *sblk = tnapi->hw_status;
5500
5501         while (1) {
5502                 if (sblk->status & SD_STATUS_ERROR)
5503                         tg3_process_error(tp);
5504
5505                 tg3_poll_link(tp);
5506
5507                 work_done = tg3_poll_work(tnapi, work_done, budget);
5508
5509                 if (unlikely(tg3_flag(tp, TX_RECOVERY_PENDING)))
5510                         goto tx_recovery;
5511
5512                 if (unlikely(work_done >= budget))
5513                         break;
5514
5515                 if (tg3_flag(tp, TAGGED_STATUS)) {
5516                         /* tp->last_tag is used in tg3_int_reenable() below
5517                          * to tell the hw how much work has been processed,
5518                          * so we must read it before checking for more work.
5519                          */
5520                         tnapi->last_tag = sblk->status_tag;
5521                         tnapi->last_irq_tag = tnapi->last_tag;
5522                         rmb();
5523                 } else
5524                         sblk->status &= ~SD_STATUS_UPDATED;
5525
5526                 if (likely(!tg3_has_work(tnapi))) {
5527                         napi_complete(napi);
5528                         tg3_int_reenable(tnapi);
5529                         break;
5530                 }
5531         }
5532
5533         return work_done;
5534
5535 tx_recovery:
5536         /* work_done is guaranteed to be less than budget. */
5537         napi_complete(napi);
5538         schedule_work(&tp->reset_task);
5539         return work_done;
5540 }
5541
5542 static void tg3_napi_disable(struct tg3 *tp)
5543 {
5544         int i;
5545
5546         for (i = tp->irq_cnt - 1; i >= 0; i--)
5547                 napi_disable(&tp->napi[i].napi);
5548 }
5549
5550 static void tg3_napi_enable(struct tg3 *tp)
5551 {
5552         int i;
5553
5554         for (i = 0; i < tp->irq_cnt; i++)
5555                 napi_enable(&tp->napi[i].napi);
5556 }
5557
5558 static void tg3_napi_init(struct tg3 *tp)
5559 {
5560         int i;
5561
5562         netif_napi_add(tp->dev, &tp->napi[0].napi, tg3_poll, 64);
5563         for (i = 1; i < tp->irq_cnt; i++)
5564                 netif_napi_add(tp->dev, &tp->napi[i].napi, tg3_poll_msix, 64);
5565 }
5566
5567 static void tg3_napi_fini(struct tg3 *tp)
5568 {
5569         int i;
5570
5571         for (i = 0; i < tp->irq_cnt; i++)
5572                 netif_napi_del(&tp->napi[i].napi);
5573 }
5574
5575 static inline void tg3_netif_stop(struct tg3 *tp)
5576 {
5577         tp->dev->trans_start = jiffies; /* prevent tx timeout */
5578         tg3_napi_disable(tp);
5579         netif_tx_disable(tp->dev);
5580 }
5581
5582 static inline void tg3_netif_start(struct tg3 *tp)
5583 {
5584         /* NOTE: unconditional netif_tx_wake_all_queues is only
5585          * appropriate so long as all callers are assured to
5586          * have free tx slots (such as after tg3_init_hw)
5587          */
5588         netif_tx_wake_all_queues(tp->dev);
5589
5590         tg3_napi_enable(tp);
5591         tp->napi[0].hw_status->status |= SD_STATUS_UPDATED;
5592         tg3_enable_ints(tp);
5593 }
5594
5595 static void tg3_irq_quiesce(struct tg3 *tp)
5596 {
5597         int i;
5598
5599         BUG_ON(tp->irq_sync);
5600
5601         tp->irq_sync = 1;
5602         smp_mb();
5603
5604         for (i = 0; i < tp->irq_cnt; i++)
5605                 synchronize_irq(tp->napi[i].irq_vec);
5606 }
5607
5608 /* Fully shutdown all tg3 driver activity elsewhere in the system.
5609  * If irq_sync is non-zero, then the IRQ handler must be synchronized
5610  * with as well.  Most of the time, this is not necessary except when
5611  * shutting down the device.
5612  */
5613 static inline void tg3_full_lock(struct tg3 *tp, int irq_sync)
5614 {
5615         spin_lock_bh(&tp->lock);
5616         if (irq_sync)
5617                 tg3_irq_quiesce(tp);
5618 }
5619
5620 static inline void tg3_full_unlock(struct tg3 *tp)
5621 {
5622         spin_unlock_bh(&tp->lock);
5623 }
5624
5625 /* One-shot MSI handler - Chip automatically disables interrupt
5626  * after sending MSI so driver doesn't have to do it.
5627  */
5628 static irqreturn_t tg3_msi_1shot(int irq, void *dev_id)
5629 {
5630         struct tg3_napi *tnapi = dev_id;
5631         struct tg3 *tp = tnapi->tp;
5632
5633         prefetch(tnapi->hw_status);
5634         if (tnapi->rx_rcb)
5635                 prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
5636
5637         if (likely(!tg3_irq_sync(tp)))
5638                 napi_schedule(&tnapi->napi);
5639
5640         return IRQ_HANDLED;
5641 }
5642
5643 /* MSI ISR - No need to check for interrupt sharing and no need to
5644  * flush status block and interrupt mailbox. PCI ordering rules
5645  * guarantee that MSI will arrive after the status block.
5646  */
5647 static irqreturn_t tg3_msi(int irq, void *dev_id)
5648 {
5649         struct tg3_napi *tnapi = dev_id;
5650         struct tg3 *tp = tnapi->tp;
5651
5652         prefetch(tnapi->hw_status);
5653         if (tnapi->rx_rcb)
5654                 prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
5655         /*
5656          * Writing any value to intr-mbox-0 clears PCI INTA# and
5657          * chip-internal interrupt pending events.
5658          * Writing non-zero to intr-mbox-0 additional tells the
5659          * NIC to stop sending us irqs, engaging "in-intr-handler"
5660          * event coalescing.
5661          */
5662         tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
5663         if (likely(!tg3_irq_sync(tp)))
5664                 napi_schedule(&tnapi->napi);
5665
5666         return IRQ_RETVAL(1);
5667 }
5668
5669 static irqreturn_t tg3_interrupt(int irq, void *dev_id)
5670 {
5671         struct tg3_napi *tnapi = dev_id;
5672         struct tg3 *tp = tnapi->tp;
5673         struct tg3_hw_status *sblk = tnapi->hw_status;
5674         unsigned int handled = 1;
5675
5676         /* In INTx mode, it is possible for the interrupt to arrive at
5677          * the CPU before the status block posted prior to the interrupt.
5678          * Reading the PCI State register will confirm whether the
5679          * interrupt is ours and will flush the status block.
5680          */
5681         if (unlikely(!(sblk->status & SD_STATUS_UPDATED))) {
5682                 if (tg3_flag(tp, CHIP_RESETTING) ||
5683                     (tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
5684                         handled = 0;
5685                         goto out;
5686                 }
5687         }
5688
5689         /*
5690          * Writing any value to intr-mbox-0 clears PCI INTA# and
5691          * chip-internal interrupt pending events.
5692          * Writing non-zero to intr-mbox-0 additional tells the
5693          * NIC to stop sending us irqs, engaging "in-intr-handler"
5694          * event coalescing.
5695          *
5696          * Flush the mailbox to de-assert the IRQ immediately to prevent
5697          * spurious interrupts.  The flush impacts performance but
5698          * excessive spurious interrupts can be worse in some cases.
5699          */
5700         tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
5701         if (tg3_irq_sync(tp))
5702                 goto out;
5703         sblk->status &= ~SD_STATUS_UPDATED;
5704         if (likely(tg3_has_work(tnapi))) {
5705                 prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
5706                 napi_schedule(&tnapi->napi);
5707         } else {
5708                 /* No work, shared interrupt perhaps?  re-enable
5709                  * interrupts, and flush that PCI write
5710                  */
5711                 tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
5712                                0x00000000);
5713         }
5714 out:
5715         return IRQ_RETVAL(handled);
5716 }
5717
5718 static irqreturn_t tg3_interrupt_tagged(int irq, void *dev_id)
5719 {
5720         struct tg3_napi *tnapi = dev_id;
5721         struct tg3 *tp = tnapi->tp;
5722         struct tg3_hw_status *sblk = tnapi->hw_status;
5723         unsigned int handled = 1;
5724
5725         /* In INTx mode, it is possible for the interrupt to arrive at
5726          * the CPU before the status block posted prior to the interrupt.
5727          * Reading the PCI State register will confirm whether the
5728          * interrupt is ours and will flush the status block.
5729          */
5730         if (unlikely(sblk->status_tag == tnapi->last_irq_tag)) {
5731                 if (tg3_flag(tp, CHIP_RESETTING) ||
5732                     (tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
5733                         handled = 0;
5734                         goto out;
5735                 }
5736         }
5737
5738         /*
5739          * writing any value to intr-mbox-0 clears PCI INTA# and
5740          * chip-internal interrupt pending events.
5741          * writing non-zero to intr-mbox-0 additional tells the
5742          * NIC to stop sending us irqs, engaging "in-intr-handler"
5743          * event coalescing.
5744          *
5745          * Flush the mailbox to de-assert the IRQ immediately to prevent
5746          * spurious interrupts.  The flush impacts performance but
5747          * excessive spurious interrupts can be worse in some cases.
5748          */
5749         tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
5750
5751         /*
5752          * In a shared interrupt configuration, sometimes other devices'
5753          * interrupts will scream.  We record the current status tag here
5754          * so that the above check can report that the screaming interrupts
5755          * are unhandled.  Eventually they will be silenced.
5756          */
5757         tnapi->last_irq_tag = sblk->status_tag;
5758
5759         if (tg3_irq_sync(tp))
5760                 goto out;
5761
5762         prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
5763
5764         napi_schedule(&tnapi->napi);
5765
5766 out:
5767         return IRQ_RETVAL(handled);
5768 }
5769
5770 /* ISR for interrupt test */
5771 static irqreturn_t tg3_test_isr(int irq, void *dev_id)
5772 {
5773         struct tg3_napi *tnapi = dev_id;
5774         struct tg3 *tp = tnapi->tp;
5775         struct tg3_hw_status *sblk = tnapi->hw_status;
5776
5777         if ((sblk->status & SD_STATUS_UPDATED) ||
5778             !(tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
5779                 tg3_disable_ints(tp);
5780                 return IRQ_RETVAL(1);
5781         }
5782         return IRQ_RETVAL(0);
5783 }
5784
5785 static int tg3_init_hw(struct tg3 *, int);
5786 static int tg3_halt(struct tg3 *, int, int);
5787
5788 /* Restart hardware after configuration changes, self-test, etc.
5789  * Invoked with tp->lock held.
5790  */
5791 static int tg3_restart_hw(struct tg3 *tp, int reset_phy)
5792         __releases(tp->lock)
5793         __acquires(tp->lock)
5794 {
5795         int err;
5796
5797         err = tg3_init_hw(tp, reset_phy);
5798         if (err) {
5799                 netdev_err(tp->dev,
5800                            "Failed to re-initialize device, aborting\n");
5801                 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
5802                 tg3_full_unlock(tp);
5803                 del_timer_sync(&tp->timer);
5804                 tp->irq_sync = 0;
5805                 tg3_napi_enable(tp);
5806                 dev_close(tp->dev);
5807                 tg3_full_lock(tp, 0);
5808         }
5809         return err;
5810 }
5811
5812 #ifdef CONFIG_NET_POLL_CONTROLLER
5813 static void tg3_poll_controller(struct net_device *dev)
5814 {
5815         int i;
5816         struct tg3 *tp = netdev_priv(dev);
5817
5818         for (i = 0; i < tp->irq_cnt; i++)
5819                 tg3_interrupt(tp->napi[i].irq_vec, &tp->napi[i]);
5820 }
5821 #endif
5822
5823 static void tg3_reset_task(struct work_struct *work)
5824 {
5825         struct tg3 *tp = container_of(work, struct tg3, reset_task);
5826         int err;
5827         unsigned int restart_timer;
5828
5829         tg3_full_lock(tp, 0);
5830
5831         if (!netif_running(tp->dev)) {
5832                 tg3_full_unlock(tp);
5833                 return;
5834         }
5835
5836         tg3_full_unlock(tp);
5837
5838         tg3_phy_stop(tp);
5839
5840         tg3_netif_stop(tp);
5841
5842         tg3_full_lock(tp, 1);
5843
5844         restart_timer = tg3_flag(tp, RESTART_TIMER);
5845         tg3_flag_clear(tp, RESTART_TIMER);
5846
5847         if (tg3_flag(tp, TX_RECOVERY_PENDING)) {
5848                 tp->write32_tx_mbox = tg3_write32_tx_mbox;
5849                 tp->write32_rx_mbox = tg3_write_flush_reg32;
5850                 tg3_flag_set(tp, MBOX_WRITE_REORDER);
5851                 tg3_flag_clear(tp, TX_RECOVERY_PENDING);
5852         }
5853
5854         tg3_halt(tp, RESET_KIND_SHUTDOWN, 0);
5855         err = tg3_init_hw(tp, 1);
5856         if (err)
5857                 goto out;
5858
5859         tg3_netif_start(tp);
5860
5861         if (restart_timer)
5862                 mod_timer(&tp->timer, jiffies + 1);
5863
5864 out:
5865         tg3_full_unlock(tp);
5866
5867         if (!err)