Merge tag 'split-asm_system_h-for-linus-20120328' of git://git.kernel.org/pub/scm...
[linux-2.6.git] / drivers / net / ethernet / broadcom / tg3.c
1 /*
2  * tg3.c: Broadcom Tigon3 ethernet driver.
3  *
4  * Copyright (C) 2001, 2002, 2003, 2004 David S. Miller (davem@redhat.com)
5  * Copyright (C) 2001, 2002, 2003 Jeff Garzik (jgarzik@pobox.com)
6  * Copyright (C) 2004 Sun Microsystems Inc.
7  * Copyright (C) 2005-2012 Broadcom Corporation.
8  *
9  * Firmware is:
10  *      Derived from proprietary unpublished source code,
11  *      Copyright (C) 2000-2003 Broadcom Corporation.
12  *
13  *      Permission is hereby granted for the distribution of this firmware
14  *      data in hexadecimal or equivalent format, provided this copyright
15  *      notice is accompanying it.
16  */
17
18
19 #include <linux/module.h>
20 #include <linux/moduleparam.h>
21 #include <linux/stringify.h>
22 #include <linux/kernel.h>
23 #include <linux/types.h>
24 #include <linux/compiler.h>
25 #include <linux/slab.h>
26 #include <linux/delay.h>
27 #include <linux/in.h>
28 #include <linux/init.h>
29 #include <linux/interrupt.h>
30 #include <linux/ioport.h>
31 #include <linux/pci.h>
32 #include <linux/netdevice.h>
33 #include <linux/etherdevice.h>
34 #include <linux/skbuff.h>
35 #include <linux/ethtool.h>
36 #include <linux/mdio.h>
37 #include <linux/mii.h>
38 #include <linux/phy.h>
39 #include <linux/brcmphy.h>
40 #include <linux/if_vlan.h>
41 #include <linux/ip.h>
42 #include <linux/tcp.h>
43 #include <linux/workqueue.h>
44 #include <linux/prefetch.h>
45 #include <linux/dma-mapping.h>
46 #include <linux/firmware.h>
47
48 #include <net/checksum.h>
49 #include <net/ip.h>
50
51 #include <linux/io.h>
52 #include <asm/byteorder.h>
53 #include <linux/uaccess.h>
54
55 #ifdef CONFIG_SPARC
56 #include <asm/idprom.h>
57 #include <asm/prom.h>
58 #endif
59
60 #define BAR_0   0
61 #define BAR_2   2
62
63 #include "tg3.h"
64
65 /* Functions & macros to verify TG3_FLAGS types */
66
67 static inline int _tg3_flag(enum TG3_FLAGS flag, unsigned long *bits)
68 {
69         return test_bit(flag, bits);
70 }
71
72 static inline void _tg3_flag_set(enum TG3_FLAGS flag, unsigned long *bits)
73 {
74         set_bit(flag, bits);
75 }
76
77 static inline void _tg3_flag_clear(enum TG3_FLAGS flag, unsigned long *bits)
78 {
79         clear_bit(flag, bits);
80 }
81
82 #define tg3_flag(tp, flag)                              \
83         _tg3_flag(TG3_FLAG_##flag, (tp)->tg3_flags)
84 #define tg3_flag_set(tp, flag)                          \
85         _tg3_flag_set(TG3_FLAG_##flag, (tp)->tg3_flags)
86 #define tg3_flag_clear(tp, flag)                        \
87         _tg3_flag_clear(TG3_FLAG_##flag, (tp)->tg3_flags)
88
89 #define DRV_MODULE_NAME         "tg3"
90 #define TG3_MAJ_NUM                     3
91 #define TG3_MIN_NUM                     123
92 #define DRV_MODULE_VERSION      \
93         __stringify(TG3_MAJ_NUM) "." __stringify(TG3_MIN_NUM)
94 #define DRV_MODULE_RELDATE      "March 21, 2012"
95
96 #define RESET_KIND_SHUTDOWN     0
97 #define RESET_KIND_INIT         1
98 #define RESET_KIND_SUSPEND      2
99
100 #define TG3_DEF_RX_MODE         0
101 #define TG3_DEF_TX_MODE         0
102 #define TG3_DEF_MSG_ENABLE        \
103         (NETIF_MSG_DRV          | \
104          NETIF_MSG_PROBE        | \
105          NETIF_MSG_LINK         | \
106          NETIF_MSG_TIMER        | \
107          NETIF_MSG_IFDOWN       | \
108          NETIF_MSG_IFUP         | \
109          NETIF_MSG_RX_ERR       | \
110          NETIF_MSG_TX_ERR)
111
112 #define TG3_GRC_LCLCTL_PWRSW_DELAY      100
113
114 /* length of time before we decide the hardware is borked,
115  * and dev->tx_timeout() should be called to fix the problem
116  */
117
118 #define TG3_TX_TIMEOUT                  (5 * HZ)
119
120 /* hardware minimum and maximum for a single frame's data payload */
121 #define TG3_MIN_MTU                     60
122 #define TG3_MAX_MTU(tp) \
123         (tg3_flag(tp, JUMBO_CAPABLE) ? 9000 : 1500)
124
125 /* These numbers seem to be hard coded in the NIC firmware somehow.
126  * You can't change the ring sizes, but you can change where you place
127  * them in the NIC onboard memory.
128  */
129 #define TG3_RX_STD_RING_SIZE(tp) \
130         (tg3_flag(tp, LRG_PROD_RING_CAP) ? \
131          TG3_RX_STD_MAX_SIZE_5717 : TG3_RX_STD_MAX_SIZE_5700)
132 #define TG3_DEF_RX_RING_PENDING         200
133 #define TG3_RX_JMB_RING_SIZE(tp) \
134         (tg3_flag(tp, LRG_PROD_RING_CAP) ? \
135          TG3_RX_JMB_MAX_SIZE_5717 : TG3_RX_JMB_MAX_SIZE_5700)
136 #define TG3_DEF_RX_JUMBO_RING_PENDING   100
137
138 /* Do not place this n-ring entries value into the tp struct itself,
139  * we really want to expose these constants to GCC so that modulo et
140  * al.  operations are done with shifts and masks instead of with
141  * hw multiply/modulo instructions.  Another solution would be to
142  * replace things like '% foo' with '& (foo - 1)'.
143  */
144
145 #define TG3_TX_RING_SIZE                512
146 #define TG3_DEF_TX_RING_PENDING         (TG3_TX_RING_SIZE - 1)
147
148 #define TG3_RX_STD_RING_BYTES(tp) \
149         (sizeof(struct tg3_rx_buffer_desc) * TG3_RX_STD_RING_SIZE(tp))
150 #define TG3_RX_JMB_RING_BYTES(tp) \
151         (sizeof(struct tg3_ext_rx_buffer_desc) * TG3_RX_JMB_RING_SIZE(tp))
152 #define TG3_RX_RCB_RING_BYTES(tp) \
153         (sizeof(struct tg3_rx_buffer_desc) * (tp->rx_ret_ring_mask + 1))
154 #define TG3_TX_RING_BYTES       (sizeof(struct tg3_tx_buffer_desc) * \
155                                  TG3_TX_RING_SIZE)
156 #define NEXT_TX(N)              (((N) + 1) & (TG3_TX_RING_SIZE - 1))
157
158 #define TG3_DMA_BYTE_ENAB               64
159
160 #define TG3_RX_STD_DMA_SZ               1536
161 #define TG3_RX_JMB_DMA_SZ               9046
162
163 #define TG3_RX_DMA_TO_MAP_SZ(x)         ((x) + TG3_DMA_BYTE_ENAB)
164
165 #define TG3_RX_STD_MAP_SZ               TG3_RX_DMA_TO_MAP_SZ(TG3_RX_STD_DMA_SZ)
166 #define TG3_RX_JMB_MAP_SZ               TG3_RX_DMA_TO_MAP_SZ(TG3_RX_JMB_DMA_SZ)
167
168 #define TG3_RX_STD_BUFF_RING_SIZE(tp) \
169         (sizeof(struct ring_info) * TG3_RX_STD_RING_SIZE(tp))
170
171 #define TG3_RX_JMB_BUFF_RING_SIZE(tp) \
172         (sizeof(struct ring_info) * TG3_RX_JMB_RING_SIZE(tp))
173
174 /* Due to a hardware bug, the 5701 can only DMA to memory addresses
175  * that are at least dword aligned when used in PCIX mode.  The driver
176  * works around this bug by double copying the packet.  This workaround
177  * is built into the normal double copy length check for efficiency.
178  *
179  * However, the double copy is only necessary on those architectures
180  * where unaligned memory accesses are inefficient.  For those architectures
181  * where unaligned memory accesses incur little penalty, we can reintegrate
182  * the 5701 in the normal rx path.  Doing so saves a device structure
183  * dereference by hardcoding the double copy threshold in place.
184  */
185 #define TG3_RX_COPY_THRESHOLD           256
186 #if NET_IP_ALIGN == 0 || defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS)
187         #define TG3_RX_COPY_THRESH(tp)  TG3_RX_COPY_THRESHOLD
188 #else
189         #define TG3_RX_COPY_THRESH(tp)  ((tp)->rx_copy_thresh)
190 #endif
191
192 #if (NET_IP_ALIGN != 0)
193 #define TG3_RX_OFFSET(tp)       ((tp)->rx_offset)
194 #else
195 #define TG3_RX_OFFSET(tp)       (NET_SKB_PAD)
196 #endif
197
198 /* minimum number of free TX descriptors required to wake up TX process */
199 #define TG3_TX_WAKEUP_THRESH(tnapi)             ((tnapi)->tx_pending / 4)
200 #define TG3_TX_BD_DMA_MAX_2K            2048
201 #define TG3_TX_BD_DMA_MAX_4K            4096
202
203 #define TG3_RAW_IP_ALIGN 2
204
205 #define TG3_FW_UPDATE_TIMEOUT_SEC       5
206 #define TG3_FW_UPDATE_FREQ_SEC          (TG3_FW_UPDATE_TIMEOUT_SEC / 2)
207
208 #define FIRMWARE_TG3            "tigon/tg3.bin"
209 #define FIRMWARE_TG3TSO         "tigon/tg3_tso.bin"
210 #define FIRMWARE_TG3TSO5        "tigon/tg3_tso5.bin"
211
212 static char version[] __devinitdata =
213         DRV_MODULE_NAME ".c:v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")";
214
215 MODULE_AUTHOR("David S. Miller (davem@redhat.com) and Jeff Garzik (jgarzik@pobox.com)");
216 MODULE_DESCRIPTION("Broadcom Tigon3 ethernet driver");
217 MODULE_LICENSE("GPL");
218 MODULE_VERSION(DRV_MODULE_VERSION);
219 MODULE_FIRMWARE(FIRMWARE_TG3);
220 MODULE_FIRMWARE(FIRMWARE_TG3TSO);
221 MODULE_FIRMWARE(FIRMWARE_TG3TSO5);
222
223 static int tg3_debug = -1;      /* -1 == use TG3_DEF_MSG_ENABLE as value */
224 module_param(tg3_debug, int, 0);
225 MODULE_PARM_DESC(tg3_debug, "Tigon3 bitmapped debugging message enable value");
226
227 static DEFINE_PCI_DEVICE_TABLE(tg3_pci_tbl) = {
228         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5700)},
229         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5701)},
230         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702)},
231         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703)},
232         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704)},
233         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702FE)},
234         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705)},
235         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705_2)},
236         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705M)},
237         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705M_2)},
238         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702X)},
239         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703X)},
240         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704S)},
241         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702A3)},
242         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703A3)},
243         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5782)},
244         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5788)},
245         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5789)},
246         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5901)},
247         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5901_2)},
248         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704S_2)},
249         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705F)},
250         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5721)},
251         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5722)},
252         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751)},
253         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751M)},
254         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751F)},
255         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5752)},
256         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5752M)},
257         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753)},
258         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753M)},
259         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753F)},
260         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5754)},
261         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5754M)},
262         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5755)},
263         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5755M)},
264         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5756)},
265         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5786)},
266         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787)},
267         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787M)},
268         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787F)},
269         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5714)},
270         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5714S)},
271         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5715)},
272         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5715S)},
273         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5780)},
274         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5780S)},
275         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5781)},
276         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5906)},
277         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5906M)},
278         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5784)},
279         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5764)},
280         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5723)},
281         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5761)},
282         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5761E)},
283         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5761S)},
284         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5761SE)},
285         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5785_G)},
286         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5785_F)},
287         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57780)},
288         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57760)},
289         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57790)},
290         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57788)},
291         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5717)},
292         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5718)},
293         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57781)},
294         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57785)},
295         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57761)},
296         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57765)},
297         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57791)},
298         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57795)},
299         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5719)},
300         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5720)},
301         {PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9DXX)},
302         {PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9MXX)},
303         {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1000)},
304         {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1001)},
305         {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1003)},
306         {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC9100)},
307         {PCI_DEVICE(PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_TIGON3)},
308         {PCI_DEVICE(0x10cf, 0x11a2)}, /* Fujitsu 1000base-SX with BCM5703SKHB */
309         {}
310 };
311
312 MODULE_DEVICE_TABLE(pci, tg3_pci_tbl);
313
314 static const struct {
315         const char string[ETH_GSTRING_LEN];
316 } ethtool_stats_keys[] = {
317         { "rx_octets" },
318         { "rx_fragments" },
319         { "rx_ucast_packets" },
320         { "rx_mcast_packets" },
321         { "rx_bcast_packets" },
322         { "rx_fcs_errors" },
323         { "rx_align_errors" },
324         { "rx_xon_pause_rcvd" },
325         { "rx_xoff_pause_rcvd" },
326         { "rx_mac_ctrl_rcvd" },
327         { "rx_xoff_entered" },
328         { "rx_frame_too_long_errors" },
329         { "rx_jabbers" },
330         { "rx_undersize_packets" },
331         { "rx_in_length_errors" },
332         { "rx_out_length_errors" },
333         { "rx_64_or_less_octet_packets" },
334         { "rx_65_to_127_octet_packets" },
335         { "rx_128_to_255_octet_packets" },
336         { "rx_256_to_511_octet_packets" },
337         { "rx_512_to_1023_octet_packets" },
338         { "rx_1024_to_1522_octet_packets" },
339         { "rx_1523_to_2047_octet_packets" },
340         { "rx_2048_to_4095_octet_packets" },
341         { "rx_4096_to_8191_octet_packets" },
342         { "rx_8192_to_9022_octet_packets" },
343
344         { "tx_octets" },
345         { "tx_collisions" },
346
347         { "tx_xon_sent" },
348         { "tx_xoff_sent" },
349         { "tx_flow_control" },
350         { "tx_mac_errors" },
351         { "tx_single_collisions" },
352         { "tx_mult_collisions" },
353         { "tx_deferred" },
354         { "tx_excessive_collisions" },
355         { "tx_late_collisions" },
356         { "tx_collide_2times" },
357         { "tx_collide_3times" },
358         { "tx_collide_4times" },
359         { "tx_collide_5times" },
360         { "tx_collide_6times" },
361         { "tx_collide_7times" },
362         { "tx_collide_8times" },
363         { "tx_collide_9times" },
364         { "tx_collide_10times" },
365         { "tx_collide_11times" },
366         { "tx_collide_12times" },
367         { "tx_collide_13times" },
368         { "tx_collide_14times" },
369         { "tx_collide_15times" },
370         { "tx_ucast_packets" },
371         { "tx_mcast_packets" },
372         { "tx_bcast_packets" },
373         { "tx_carrier_sense_errors" },
374         { "tx_discards" },
375         { "tx_errors" },
376
377         { "dma_writeq_full" },
378         { "dma_write_prioq_full" },
379         { "rxbds_empty" },
380         { "rx_discards" },
381         { "rx_errors" },
382         { "rx_threshold_hit" },
383
384         { "dma_readq_full" },
385         { "dma_read_prioq_full" },
386         { "tx_comp_queue_full" },
387
388         { "ring_set_send_prod_index" },
389         { "ring_status_update" },
390         { "nic_irqs" },
391         { "nic_avoided_irqs" },
392         { "nic_tx_threshold_hit" },
393
394         { "mbuf_lwm_thresh_hit" },
395 };
396
397 #define TG3_NUM_STATS   ARRAY_SIZE(ethtool_stats_keys)
398
399
400 static const struct {
401         const char string[ETH_GSTRING_LEN];
402 } ethtool_test_keys[] = {
403         { "nvram test        (online) " },
404         { "link test         (online) " },
405         { "register test     (offline)" },
406         { "memory test       (offline)" },
407         { "mac loopback test (offline)" },
408         { "phy loopback test (offline)" },
409         { "ext loopback test (offline)" },
410         { "interrupt test    (offline)" },
411 };
412
413 #define TG3_NUM_TEST    ARRAY_SIZE(ethtool_test_keys)
414
415
416 static void tg3_write32(struct tg3 *tp, u32 off, u32 val)
417 {
418         writel(val, tp->regs + off);
419 }
420
421 static u32 tg3_read32(struct tg3 *tp, u32 off)
422 {
423         return readl(tp->regs + off);
424 }
425
426 static void tg3_ape_write32(struct tg3 *tp, u32 off, u32 val)
427 {
428         writel(val, tp->aperegs + off);
429 }
430
431 static u32 tg3_ape_read32(struct tg3 *tp, u32 off)
432 {
433         return readl(tp->aperegs + off);
434 }
435
436 static void tg3_write_indirect_reg32(struct tg3 *tp, u32 off, u32 val)
437 {
438         unsigned long flags;
439
440         spin_lock_irqsave(&tp->indirect_lock, flags);
441         pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off);
442         pci_write_config_dword(tp->pdev, TG3PCI_REG_DATA, val);
443         spin_unlock_irqrestore(&tp->indirect_lock, flags);
444 }
445
446 static void tg3_write_flush_reg32(struct tg3 *tp, u32 off, u32 val)
447 {
448         writel(val, tp->regs + off);
449         readl(tp->regs + off);
450 }
451
452 static u32 tg3_read_indirect_reg32(struct tg3 *tp, u32 off)
453 {
454         unsigned long flags;
455         u32 val;
456
457         spin_lock_irqsave(&tp->indirect_lock, flags);
458         pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off);
459         pci_read_config_dword(tp->pdev, TG3PCI_REG_DATA, &val);
460         spin_unlock_irqrestore(&tp->indirect_lock, flags);
461         return val;
462 }
463
464 static void tg3_write_indirect_mbox(struct tg3 *tp, u32 off, u32 val)
465 {
466         unsigned long flags;
467
468         if (off == (MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW)) {
469                 pci_write_config_dword(tp->pdev, TG3PCI_RCV_RET_RING_CON_IDX +
470                                        TG3_64BIT_REG_LOW, val);
471                 return;
472         }
473         if (off == TG3_RX_STD_PROD_IDX_REG) {
474                 pci_write_config_dword(tp->pdev, TG3PCI_STD_RING_PROD_IDX +
475                                        TG3_64BIT_REG_LOW, val);
476                 return;
477         }
478
479         spin_lock_irqsave(&tp->indirect_lock, flags);
480         pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off + 0x5600);
481         pci_write_config_dword(tp->pdev, TG3PCI_REG_DATA, val);
482         spin_unlock_irqrestore(&tp->indirect_lock, flags);
483
484         /* In indirect mode when disabling interrupts, we also need
485          * to clear the interrupt bit in the GRC local ctrl register.
486          */
487         if ((off == (MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW)) &&
488             (val == 0x1)) {
489                 pci_write_config_dword(tp->pdev, TG3PCI_MISC_LOCAL_CTRL,
490                                        tp->grc_local_ctrl|GRC_LCLCTRL_CLEARINT);
491         }
492 }
493
494 static u32 tg3_read_indirect_mbox(struct tg3 *tp, u32 off)
495 {
496         unsigned long flags;
497         u32 val;
498
499         spin_lock_irqsave(&tp->indirect_lock, flags);
500         pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off + 0x5600);
501         pci_read_config_dword(tp->pdev, TG3PCI_REG_DATA, &val);
502         spin_unlock_irqrestore(&tp->indirect_lock, flags);
503         return val;
504 }
505
506 /* usec_wait specifies the wait time in usec when writing to certain registers
507  * where it is unsafe to read back the register without some delay.
508  * GRC_LOCAL_CTRL is one example if the GPIOs are toggled to switch power.
509  * TG3PCI_CLOCK_CTRL is another example if the clock frequencies are changed.
510  */
511 static void _tw32_flush(struct tg3 *tp, u32 off, u32 val, u32 usec_wait)
512 {
513         if (tg3_flag(tp, PCIX_TARGET_HWBUG) || tg3_flag(tp, ICH_WORKAROUND))
514                 /* Non-posted methods */
515                 tp->write32(tp, off, val);
516         else {
517                 /* Posted method */
518                 tg3_write32(tp, off, val);
519                 if (usec_wait)
520                         udelay(usec_wait);
521                 tp->read32(tp, off);
522         }
523         /* Wait again after the read for the posted method to guarantee that
524          * the wait time is met.
525          */
526         if (usec_wait)
527                 udelay(usec_wait);
528 }
529
530 static inline void tw32_mailbox_flush(struct tg3 *tp, u32 off, u32 val)
531 {
532         tp->write32_mbox(tp, off, val);
533         if (!tg3_flag(tp, MBOX_WRITE_REORDER) && !tg3_flag(tp, ICH_WORKAROUND))
534                 tp->read32_mbox(tp, off);
535 }
536
537 static void tg3_write32_tx_mbox(struct tg3 *tp, u32 off, u32 val)
538 {
539         void __iomem *mbox = tp->regs + off;
540         writel(val, mbox);
541         if (tg3_flag(tp, TXD_MBOX_HWBUG))
542                 writel(val, mbox);
543         if (tg3_flag(tp, MBOX_WRITE_REORDER))
544                 readl(mbox);
545 }
546
547 static u32 tg3_read32_mbox_5906(struct tg3 *tp, u32 off)
548 {
549         return readl(tp->regs + off + GRCMBOX_BASE);
550 }
551
552 static void tg3_write32_mbox_5906(struct tg3 *tp, u32 off, u32 val)
553 {
554         writel(val, tp->regs + off + GRCMBOX_BASE);
555 }
556
557 #define tw32_mailbox(reg, val)          tp->write32_mbox(tp, reg, val)
558 #define tw32_mailbox_f(reg, val)        tw32_mailbox_flush(tp, (reg), (val))
559 #define tw32_rx_mbox(reg, val)          tp->write32_rx_mbox(tp, reg, val)
560 #define tw32_tx_mbox(reg, val)          tp->write32_tx_mbox(tp, reg, val)
561 #define tr32_mailbox(reg)               tp->read32_mbox(tp, reg)
562
563 #define tw32(reg, val)                  tp->write32(tp, reg, val)
564 #define tw32_f(reg, val)                _tw32_flush(tp, (reg), (val), 0)
565 #define tw32_wait_f(reg, val, us)       _tw32_flush(tp, (reg), (val), (us))
566 #define tr32(reg)                       tp->read32(tp, reg)
567
568 static void tg3_write_mem(struct tg3 *tp, u32 off, u32 val)
569 {
570         unsigned long flags;
571
572         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906 &&
573             (off >= NIC_SRAM_STATS_BLK) && (off < NIC_SRAM_TX_BUFFER_DESC))
574                 return;
575
576         spin_lock_irqsave(&tp->indirect_lock, flags);
577         if (tg3_flag(tp, SRAM_USE_CONFIG)) {
578                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off);
579                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
580
581                 /* Always leave this as zero. */
582                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
583         } else {
584                 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, off);
585                 tw32_f(TG3PCI_MEM_WIN_DATA, val);
586
587                 /* Always leave this as zero. */
588                 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, 0);
589         }
590         spin_unlock_irqrestore(&tp->indirect_lock, flags);
591 }
592
593 static void tg3_read_mem(struct tg3 *tp, u32 off, u32 *val)
594 {
595         unsigned long flags;
596
597         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906 &&
598             (off >= NIC_SRAM_STATS_BLK) && (off < NIC_SRAM_TX_BUFFER_DESC)) {
599                 *val = 0;
600                 return;
601         }
602
603         spin_lock_irqsave(&tp->indirect_lock, flags);
604         if (tg3_flag(tp, SRAM_USE_CONFIG)) {
605                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off);
606                 pci_read_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
607
608                 /* Always leave this as zero. */
609                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
610         } else {
611                 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, off);
612                 *val = tr32(TG3PCI_MEM_WIN_DATA);
613
614                 /* Always leave this as zero. */
615                 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, 0);
616         }
617         spin_unlock_irqrestore(&tp->indirect_lock, flags);
618 }
619
620 static void tg3_ape_lock_init(struct tg3 *tp)
621 {
622         int i;
623         u32 regbase, bit;
624
625         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
626                 regbase = TG3_APE_LOCK_GRANT;
627         else
628                 regbase = TG3_APE_PER_LOCK_GRANT;
629
630         /* Make sure the driver hasn't any stale locks. */
631         for (i = TG3_APE_LOCK_PHY0; i <= TG3_APE_LOCK_GPIO; i++) {
632                 switch (i) {
633                 case TG3_APE_LOCK_PHY0:
634                 case TG3_APE_LOCK_PHY1:
635                 case TG3_APE_LOCK_PHY2:
636                 case TG3_APE_LOCK_PHY3:
637                         bit = APE_LOCK_GRANT_DRIVER;
638                         break;
639                 default:
640                         if (!tp->pci_fn)
641                                 bit = APE_LOCK_GRANT_DRIVER;
642                         else
643                                 bit = 1 << tp->pci_fn;
644                 }
645                 tg3_ape_write32(tp, regbase + 4 * i, bit);
646         }
647
648 }
649
650 static int tg3_ape_lock(struct tg3 *tp, int locknum)
651 {
652         int i, off;
653         int ret = 0;
654         u32 status, req, gnt, bit;
655
656         if (!tg3_flag(tp, ENABLE_APE))
657                 return 0;
658
659         switch (locknum) {
660         case TG3_APE_LOCK_GPIO:
661                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
662                         return 0;
663         case TG3_APE_LOCK_GRC:
664         case TG3_APE_LOCK_MEM:
665                 if (!tp->pci_fn)
666                         bit = APE_LOCK_REQ_DRIVER;
667                 else
668                         bit = 1 << tp->pci_fn;
669                 break;
670         default:
671                 return -EINVAL;
672         }
673
674         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761) {
675                 req = TG3_APE_LOCK_REQ;
676                 gnt = TG3_APE_LOCK_GRANT;
677         } else {
678                 req = TG3_APE_PER_LOCK_REQ;
679                 gnt = TG3_APE_PER_LOCK_GRANT;
680         }
681
682         off = 4 * locknum;
683
684         tg3_ape_write32(tp, req + off, bit);
685
686         /* Wait for up to 1 millisecond to acquire lock. */
687         for (i = 0; i < 100; i++) {
688                 status = tg3_ape_read32(tp, gnt + off);
689                 if (status == bit)
690                         break;
691                 udelay(10);
692         }
693
694         if (status != bit) {
695                 /* Revoke the lock request. */
696                 tg3_ape_write32(tp, gnt + off, bit);
697                 ret = -EBUSY;
698         }
699
700         return ret;
701 }
702
703 static void tg3_ape_unlock(struct tg3 *tp, int locknum)
704 {
705         u32 gnt, bit;
706
707         if (!tg3_flag(tp, ENABLE_APE))
708                 return;
709
710         switch (locknum) {
711         case TG3_APE_LOCK_GPIO:
712                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
713                         return;
714         case TG3_APE_LOCK_GRC:
715         case TG3_APE_LOCK_MEM:
716                 if (!tp->pci_fn)
717                         bit = APE_LOCK_GRANT_DRIVER;
718                 else
719                         bit = 1 << tp->pci_fn;
720                 break;
721         default:
722                 return;
723         }
724
725         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
726                 gnt = TG3_APE_LOCK_GRANT;
727         else
728                 gnt = TG3_APE_PER_LOCK_GRANT;
729
730         tg3_ape_write32(tp, gnt + 4 * locknum, bit);
731 }
732
733 static void tg3_ape_send_event(struct tg3 *tp, u32 event)
734 {
735         int i;
736         u32 apedata;
737
738         /* NCSI does not support APE events */
739         if (tg3_flag(tp, APE_HAS_NCSI))
740                 return;
741
742         apedata = tg3_ape_read32(tp, TG3_APE_SEG_SIG);
743         if (apedata != APE_SEG_SIG_MAGIC)
744                 return;
745
746         apedata = tg3_ape_read32(tp, TG3_APE_FW_STATUS);
747         if (!(apedata & APE_FW_STATUS_READY))
748                 return;
749
750         /* Wait for up to 1 millisecond for APE to service previous event. */
751         for (i = 0; i < 10; i++) {
752                 if (tg3_ape_lock(tp, TG3_APE_LOCK_MEM))
753                         return;
754
755                 apedata = tg3_ape_read32(tp, TG3_APE_EVENT_STATUS);
756
757                 if (!(apedata & APE_EVENT_STATUS_EVENT_PENDING))
758                         tg3_ape_write32(tp, TG3_APE_EVENT_STATUS,
759                                         event | APE_EVENT_STATUS_EVENT_PENDING);
760
761                 tg3_ape_unlock(tp, TG3_APE_LOCK_MEM);
762
763                 if (!(apedata & APE_EVENT_STATUS_EVENT_PENDING))
764                         break;
765
766                 udelay(100);
767         }
768
769         if (!(apedata & APE_EVENT_STATUS_EVENT_PENDING))
770                 tg3_ape_write32(tp, TG3_APE_EVENT, APE_EVENT_1);
771 }
772
773 static void tg3_ape_driver_state_change(struct tg3 *tp, int kind)
774 {
775         u32 event;
776         u32 apedata;
777
778         if (!tg3_flag(tp, ENABLE_APE))
779                 return;
780
781         switch (kind) {
782         case RESET_KIND_INIT:
783                 tg3_ape_write32(tp, TG3_APE_HOST_SEG_SIG,
784                                 APE_HOST_SEG_SIG_MAGIC);
785                 tg3_ape_write32(tp, TG3_APE_HOST_SEG_LEN,
786                                 APE_HOST_SEG_LEN_MAGIC);
787                 apedata = tg3_ape_read32(tp, TG3_APE_HOST_INIT_COUNT);
788                 tg3_ape_write32(tp, TG3_APE_HOST_INIT_COUNT, ++apedata);
789                 tg3_ape_write32(tp, TG3_APE_HOST_DRIVER_ID,
790                         APE_HOST_DRIVER_ID_MAGIC(TG3_MAJ_NUM, TG3_MIN_NUM));
791                 tg3_ape_write32(tp, TG3_APE_HOST_BEHAVIOR,
792                                 APE_HOST_BEHAV_NO_PHYLOCK);
793                 tg3_ape_write32(tp, TG3_APE_HOST_DRVR_STATE,
794                                     TG3_APE_HOST_DRVR_STATE_START);
795
796                 event = APE_EVENT_STATUS_STATE_START;
797                 break;
798         case RESET_KIND_SHUTDOWN:
799                 /* With the interface we are currently using,
800                  * APE does not track driver state.  Wiping
801                  * out the HOST SEGMENT SIGNATURE forces
802                  * the APE to assume OS absent status.
803                  */
804                 tg3_ape_write32(tp, TG3_APE_HOST_SEG_SIG, 0x0);
805
806                 if (device_may_wakeup(&tp->pdev->dev) &&
807                     tg3_flag(tp, WOL_ENABLE)) {
808                         tg3_ape_write32(tp, TG3_APE_HOST_WOL_SPEED,
809                                             TG3_APE_HOST_WOL_SPEED_AUTO);
810                         apedata = TG3_APE_HOST_DRVR_STATE_WOL;
811                 } else
812                         apedata = TG3_APE_HOST_DRVR_STATE_UNLOAD;
813
814                 tg3_ape_write32(tp, TG3_APE_HOST_DRVR_STATE, apedata);
815
816                 event = APE_EVENT_STATUS_STATE_UNLOAD;
817                 break;
818         case RESET_KIND_SUSPEND:
819                 event = APE_EVENT_STATUS_STATE_SUSPEND;
820                 break;
821         default:
822                 return;
823         }
824
825         event |= APE_EVENT_STATUS_DRIVER_EVNT | APE_EVENT_STATUS_STATE_CHNGE;
826
827         tg3_ape_send_event(tp, event);
828 }
829
830 static void tg3_disable_ints(struct tg3 *tp)
831 {
832         int i;
833
834         tw32(TG3PCI_MISC_HOST_CTRL,
835              (tp->misc_host_ctrl | MISC_HOST_CTRL_MASK_PCI_INT));
836         for (i = 0; i < tp->irq_max; i++)
837                 tw32_mailbox_f(tp->napi[i].int_mbox, 0x00000001);
838 }
839
840 static void tg3_enable_ints(struct tg3 *tp)
841 {
842         int i;
843
844         tp->irq_sync = 0;
845         wmb();
846
847         tw32(TG3PCI_MISC_HOST_CTRL,
848              (tp->misc_host_ctrl & ~MISC_HOST_CTRL_MASK_PCI_INT));
849
850         tp->coal_now = tp->coalesce_mode | HOSTCC_MODE_ENABLE;
851         for (i = 0; i < tp->irq_cnt; i++) {
852                 struct tg3_napi *tnapi = &tp->napi[i];
853
854                 tw32_mailbox_f(tnapi->int_mbox, tnapi->last_tag << 24);
855                 if (tg3_flag(tp, 1SHOT_MSI))
856                         tw32_mailbox_f(tnapi->int_mbox, tnapi->last_tag << 24);
857
858                 tp->coal_now |= tnapi->coal_now;
859         }
860
861         /* Force an initial interrupt */
862         if (!tg3_flag(tp, TAGGED_STATUS) &&
863             (tp->napi[0].hw_status->status & SD_STATUS_UPDATED))
864                 tw32(GRC_LOCAL_CTRL, tp->grc_local_ctrl | GRC_LCLCTRL_SETINT);
865         else
866                 tw32(HOSTCC_MODE, tp->coal_now);
867
868         tp->coal_now &= ~(tp->napi[0].coal_now | tp->napi[1].coal_now);
869 }
870
871 static inline unsigned int tg3_has_work(struct tg3_napi *tnapi)
872 {
873         struct tg3 *tp = tnapi->tp;
874         struct tg3_hw_status *sblk = tnapi->hw_status;
875         unsigned int work_exists = 0;
876
877         /* check for phy events */
878         if (!(tg3_flag(tp, USE_LINKCHG_REG) || tg3_flag(tp, POLL_SERDES))) {
879                 if (sblk->status & SD_STATUS_LINK_CHG)
880                         work_exists = 1;
881         }
882         /* check for RX/TX work to do */
883         if (sblk->idx[0].tx_consumer != tnapi->tx_cons ||
884             *(tnapi->rx_rcb_prod_idx) != tnapi->rx_rcb_ptr)
885                 work_exists = 1;
886
887         return work_exists;
888 }
889
890 /* tg3_int_reenable
891  *  similar to tg3_enable_ints, but it accurately determines whether there
892  *  is new work pending and can return without flushing the PIO write
893  *  which reenables interrupts
894  */
895 static void tg3_int_reenable(struct tg3_napi *tnapi)
896 {
897         struct tg3 *tp = tnapi->tp;
898
899         tw32_mailbox(tnapi->int_mbox, tnapi->last_tag << 24);
900         mmiowb();
901
902         /* When doing tagged status, this work check is unnecessary.
903          * The last_tag we write above tells the chip which piece of
904          * work we've completed.
905          */
906         if (!tg3_flag(tp, TAGGED_STATUS) && tg3_has_work(tnapi))
907                 tw32(HOSTCC_MODE, tp->coalesce_mode |
908                      HOSTCC_MODE_ENABLE | tnapi->coal_now);
909 }
910
911 static void tg3_switch_clocks(struct tg3 *tp)
912 {
913         u32 clock_ctrl;
914         u32 orig_clock_ctrl;
915
916         if (tg3_flag(tp, CPMU_PRESENT) || tg3_flag(tp, 5780_CLASS))
917                 return;
918
919         clock_ctrl = tr32(TG3PCI_CLOCK_CTRL);
920
921         orig_clock_ctrl = clock_ctrl;
922         clock_ctrl &= (CLOCK_CTRL_FORCE_CLKRUN |
923                        CLOCK_CTRL_CLKRUN_OENABLE |
924                        0x1f);
925         tp->pci_clock_ctrl = clock_ctrl;
926
927         if (tg3_flag(tp, 5705_PLUS)) {
928                 if (orig_clock_ctrl & CLOCK_CTRL_625_CORE) {
929                         tw32_wait_f(TG3PCI_CLOCK_CTRL,
930                                     clock_ctrl | CLOCK_CTRL_625_CORE, 40);
931                 }
932         } else if ((orig_clock_ctrl & CLOCK_CTRL_44MHZ_CORE) != 0) {
933                 tw32_wait_f(TG3PCI_CLOCK_CTRL,
934                             clock_ctrl |
935                             (CLOCK_CTRL_44MHZ_CORE | CLOCK_CTRL_ALTCLK),
936                             40);
937                 tw32_wait_f(TG3PCI_CLOCK_CTRL,
938                             clock_ctrl | (CLOCK_CTRL_ALTCLK),
939                             40);
940         }
941         tw32_wait_f(TG3PCI_CLOCK_CTRL, clock_ctrl, 40);
942 }
943
944 #define PHY_BUSY_LOOPS  5000
945
946 static int tg3_readphy(struct tg3 *tp, int reg, u32 *val)
947 {
948         u32 frame_val;
949         unsigned int loops;
950         int ret;
951
952         if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
953                 tw32_f(MAC_MI_MODE,
954                      (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
955                 udelay(80);
956         }
957
958         *val = 0x0;
959
960         frame_val  = ((tp->phy_addr << MI_COM_PHY_ADDR_SHIFT) &
961                       MI_COM_PHY_ADDR_MASK);
962         frame_val |= ((reg << MI_COM_REG_ADDR_SHIFT) &
963                       MI_COM_REG_ADDR_MASK);
964         frame_val |= (MI_COM_CMD_READ | MI_COM_START);
965
966         tw32_f(MAC_MI_COM, frame_val);
967
968         loops = PHY_BUSY_LOOPS;
969         while (loops != 0) {
970                 udelay(10);
971                 frame_val = tr32(MAC_MI_COM);
972
973                 if ((frame_val & MI_COM_BUSY) == 0) {
974                         udelay(5);
975                         frame_val = tr32(MAC_MI_COM);
976                         break;
977                 }
978                 loops -= 1;
979         }
980
981         ret = -EBUSY;
982         if (loops != 0) {
983                 *val = frame_val & MI_COM_DATA_MASK;
984                 ret = 0;
985         }
986
987         if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
988                 tw32_f(MAC_MI_MODE, tp->mi_mode);
989                 udelay(80);
990         }
991
992         return ret;
993 }
994
995 static int tg3_writephy(struct tg3 *tp, int reg, u32 val)
996 {
997         u32 frame_val;
998         unsigned int loops;
999         int ret;
1000
1001         if ((tp->phy_flags & TG3_PHYFLG_IS_FET) &&
1002             (reg == MII_CTRL1000 || reg == MII_TG3_AUX_CTRL))
1003                 return 0;
1004
1005         if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
1006                 tw32_f(MAC_MI_MODE,
1007                      (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
1008                 udelay(80);
1009         }
1010
1011         frame_val  = ((tp->phy_addr << MI_COM_PHY_ADDR_SHIFT) &
1012                       MI_COM_PHY_ADDR_MASK);
1013         frame_val |= ((reg << MI_COM_REG_ADDR_SHIFT) &
1014                       MI_COM_REG_ADDR_MASK);
1015         frame_val |= (val & MI_COM_DATA_MASK);
1016         frame_val |= (MI_COM_CMD_WRITE | MI_COM_START);
1017
1018         tw32_f(MAC_MI_COM, frame_val);
1019
1020         loops = PHY_BUSY_LOOPS;
1021         while (loops != 0) {
1022                 udelay(10);
1023                 frame_val = tr32(MAC_MI_COM);
1024                 if ((frame_val & MI_COM_BUSY) == 0) {
1025                         udelay(5);
1026                         frame_val = tr32(MAC_MI_COM);
1027                         break;
1028                 }
1029                 loops -= 1;
1030         }
1031
1032         ret = -EBUSY;
1033         if (loops != 0)
1034                 ret = 0;
1035
1036         if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
1037                 tw32_f(MAC_MI_MODE, tp->mi_mode);
1038                 udelay(80);
1039         }
1040
1041         return ret;
1042 }
1043
1044 static int tg3_phy_cl45_write(struct tg3 *tp, u32 devad, u32 addr, u32 val)
1045 {
1046         int err;
1047
1048         err = tg3_writephy(tp, MII_TG3_MMD_CTRL, devad);
1049         if (err)
1050                 goto done;
1051
1052         err = tg3_writephy(tp, MII_TG3_MMD_ADDRESS, addr);
1053         if (err)
1054                 goto done;
1055
1056         err = tg3_writephy(tp, MII_TG3_MMD_CTRL,
1057                            MII_TG3_MMD_CTRL_DATA_NOINC | devad);
1058         if (err)
1059                 goto done;
1060
1061         err = tg3_writephy(tp, MII_TG3_MMD_ADDRESS, val);
1062
1063 done:
1064         return err;
1065 }
1066
1067 static int tg3_phy_cl45_read(struct tg3 *tp, u32 devad, u32 addr, u32 *val)
1068 {
1069         int err;
1070
1071         err = tg3_writephy(tp, MII_TG3_MMD_CTRL, devad);
1072         if (err)
1073                 goto done;
1074
1075         err = tg3_writephy(tp, MII_TG3_MMD_ADDRESS, addr);
1076         if (err)
1077                 goto done;
1078
1079         err = tg3_writephy(tp, MII_TG3_MMD_CTRL,
1080                            MII_TG3_MMD_CTRL_DATA_NOINC | devad);
1081         if (err)
1082                 goto done;
1083
1084         err = tg3_readphy(tp, MII_TG3_MMD_ADDRESS, val);
1085
1086 done:
1087         return err;
1088 }
1089
1090 static int tg3_phydsp_read(struct tg3 *tp, u32 reg, u32 *val)
1091 {
1092         int err;
1093
1094         err = tg3_writephy(tp, MII_TG3_DSP_ADDRESS, reg);
1095         if (!err)
1096                 err = tg3_readphy(tp, MII_TG3_DSP_RW_PORT, val);
1097
1098         return err;
1099 }
1100
1101 static int tg3_phydsp_write(struct tg3 *tp, u32 reg, u32 val)
1102 {
1103         int err;
1104
1105         err = tg3_writephy(tp, MII_TG3_DSP_ADDRESS, reg);
1106         if (!err)
1107                 err = tg3_writephy(tp, MII_TG3_DSP_RW_PORT, val);
1108
1109         return err;
1110 }
1111
1112 static int tg3_phy_auxctl_read(struct tg3 *tp, int reg, u32 *val)
1113 {
1114         int err;
1115
1116         err = tg3_writephy(tp, MII_TG3_AUX_CTRL,
1117                            (reg << MII_TG3_AUXCTL_MISC_RDSEL_SHIFT) |
1118                            MII_TG3_AUXCTL_SHDWSEL_MISC);
1119         if (!err)
1120                 err = tg3_readphy(tp, MII_TG3_AUX_CTRL, val);
1121
1122         return err;
1123 }
1124
1125 static int tg3_phy_auxctl_write(struct tg3 *tp, int reg, u32 set)
1126 {
1127         if (reg == MII_TG3_AUXCTL_SHDWSEL_MISC)
1128                 set |= MII_TG3_AUXCTL_MISC_WREN;
1129
1130         return tg3_writephy(tp, MII_TG3_AUX_CTRL, set | reg);
1131 }
1132
1133 #define TG3_PHY_AUXCTL_SMDSP_ENABLE(tp) \
1134         tg3_phy_auxctl_write((tp), MII_TG3_AUXCTL_SHDWSEL_AUXCTL, \
1135                              MII_TG3_AUXCTL_ACTL_SMDSP_ENA | \
1136                              MII_TG3_AUXCTL_ACTL_TX_6DB)
1137
1138 #define TG3_PHY_AUXCTL_SMDSP_DISABLE(tp) \
1139         tg3_phy_auxctl_write((tp), MII_TG3_AUXCTL_SHDWSEL_AUXCTL, \
1140                              MII_TG3_AUXCTL_ACTL_TX_6DB);
1141
1142 static int tg3_bmcr_reset(struct tg3 *tp)
1143 {
1144         u32 phy_control;
1145         int limit, err;
1146
1147         /* OK, reset it, and poll the BMCR_RESET bit until it
1148          * clears or we time out.
1149          */
1150         phy_control = BMCR_RESET;
1151         err = tg3_writephy(tp, MII_BMCR, phy_control);
1152         if (err != 0)
1153                 return -EBUSY;
1154
1155         limit = 5000;
1156         while (limit--) {
1157                 err = tg3_readphy(tp, MII_BMCR, &phy_control);
1158                 if (err != 0)
1159                         return -EBUSY;
1160
1161                 if ((phy_control & BMCR_RESET) == 0) {
1162                         udelay(40);
1163                         break;
1164                 }
1165                 udelay(10);
1166         }
1167         if (limit < 0)
1168                 return -EBUSY;
1169
1170         return 0;
1171 }
1172
1173 static int tg3_mdio_read(struct mii_bus *bp, int mii_id, int reg)
1174 {
1175         struct tg3 *tp = bp->priv;
1176         u32 val;
1177
1178         spin_lock_bh(&tp->lock);
1179
1180         if (tg3_readphy(tp, reg, &val))
1181                 val = -EIO;
1182
1183         spin_unlock_bh(&tp->lock);
1184
1185         return val;
1186 }
1187
1188 static int tg3_mdio_write(struct mii_bus *bp, int mii_id, int reg, u16 val)
1189 {
1190         struct tg3 *tp = bp->priv;
1191         u32 ret = 0;
1192
1193         spin_lock_bh(&tp->lock);
1194
1195         if (tg3_writephy(tp, reg, val))
1196                 ret = -EIO;
1197
1198         spin_unlock_bh(&tp->lock);
1199
1200         return ret;
1201 }
1202
1203 static int tg3_mdio_reset(struct mii_bus *bp)
1204 {
1205         return 0;
1206 }
1207
1208 static void tg3_mdio_config_5785(struct tg3 *tp)
1209 {
1210         u32 val;
1211         struct phy_device *phydev;
1212
1213         phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
1214         switch (phydev->drv->phy_id & phydev->drv->phy_id_mask) {
1215         case PHY_ID_BCM50610:
1216         case PHY_ID_BCM50610M:
1217                 val = MAC_PHYCFG2_50610_LED_MODES;
1218                 break;
1219         case PHY_ID_BCMAC131:
1220                 val = MAC_PHYCFG2_AC131_LED_MODES;
1221                 break;
1222         case PHY_ID_RTL8211C:
1223                 val = MAC_PHYCFG2_RTL8211C_LED_MODES;
1224                 break;
1225         case PHY_ID_RTL8201E:
1226                 val = MAC_PHYCFG2_RTL8201E_LED_MODES;
1227                 break;
1228         default:
1229                 return;
1230         }
1231
1232         if (phydev->interface != PHY_INTERFACE_MODE_RGMII) {
1233                 tw32(MAC_PHYCFG2, val);
1234
1235                 val = tr32(MAC_PHYCFG1);
1236                 val &= ~(MAC_PHYCFG1_RGMII_INT |
1237                          MAC_PHYCFG1_RXCLK_TO_MASK | MAC_PHYCFG1_TXCLK_TO_MASK);
1238                 val |= MAC_PHYCFG1_RXCLK_TIMEOUT | MAC_PHYCFG1_TXCLK_TIMEOUT;
1239                 tw32(MAC_PHYCFG1, val);
1240
1241                 return;
1242         }
1243
1244         if (!tg3_flag(tp, RGMII_INBAND_DISABLE))
1245                 val |= MAC_PHYCFG2_EMODE_MASK_MASK |
1246                        MAC_PHYCFG2_FMODE_MASK_MASK |
1247                        MAC_PHYCFG2_GMODE_MASK_MASK |
1248                        MAC_PHYCFG2_ACT_MASK_MASK   |
1249                        MAC_PHYCFG2_QUAL_MASK_MASK |
1250                        MAC_PHYCFG2_INBAND_ENABLE;
1251
1252         tw32(MAC_PHYCFG2, val);
1253
1254         val = tr32(MAC_PHYCFG1);
1255         val &= ~(MAC_PHYCFG1_RXCLK_TO_MASK | MAC_PHYCFG1_TXCLK_TO_MASK |
1256                  MAC_PHYCFG1_RGMII_EXT_RX_DEC | MAC_PHYCFG1_RGMII_SND_STAT_EN);
1257         if (!tg3_flag(tp, RGMII_INBAND_DISABLE)) {
1258                 if (tg3_flag(tp, RGMII_EXT_IBND_RX_EN))
1259                         val |= MAC_PHYCFG1_RGMII_EXT_RX_DEC;
1260                 if (tg3_flag(tp, RGMII_EXT_IBND_TX_EN))
1261                         val |= MAC_PHYCFG1_RGMII_SND_STAT_EN;
1262         }
1263         val |= MAC_PHYCFG1_RXCLK_TIMEOUT | MAC_PHYCFG1_TXCLK_TIMEOUT |
1264                MAC_PHYCFG1_RGMII_INT | MAC_PHYCFG1_TXC_DRV;
1265         tw32(MAC_PHYCFG1, val);
1266
1267         val = tr32(MAC_EXT_RGMII_MODE);
1268         val &= ~(MAC_RGMII_MODE_RX_INT_B |
1269                  MAC_RGMII_MODE_RX_QUALITY |
1270                  MAC_RGMII_MODE_RX_ACTIVITY |
1271                  MAC_RGMII_MODE_RX_ENG_DET |
1272                  MAC_RGMII_MODE_TX_ENABLE |
1273                  MAC_RGMII_MODE_TX_LOWPWR |
1274                  MAC_RGMII_MODE_TX_RESET);
1275         if (!tg3_flag(tp, RGMII_INBAND_DISABLE)) {
1276                 if (tg3_flag(tp, RGMII_EXT_IBND_RX_EN))
1277                         val |= MAC_RGMII_MODE_RX_INT_B |
1278                                MAC_RGMII_MODE_RX_QUALITY |
1279                                MAC_RGMII_MODE_RX_ACTIVITY |
1280                                MAC_RGMII_MODE_RX_ENG_DET;
1281                 if (tg3_flag(tp, RGMII_EXT_IBND_TX_EN))
1282                         val |= MAC_RGMII_MODE_TX_ENABLE |
1283                                MAC_RGMII_MODE_TX_LOWPWR |
1284                                MAC_RGMII_MODE_TX_RESET;
1285         }
1286         tw32(MAC_EXT_RGMII_MODE, val);
1287 }
1288
1289 static void tg3_mdio_start(struct tg3 *tp)
1290 {
1291         tp->mi_mode &= ~MAC_MI_MODE_AUTO_POLL;
1292         tw32_f(MAC_MI_MODE, tp->mi_mode);
1293         udelay(80);
1294
1295         if (tg3_flag(tp, MDIOBUS_INITED) &&
1296             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
1297                 tg3_mdio_config_5785(tp);
1298 }
1299
1300 static int tg3_mdio_init(struct tg3 *tp)
1301 {
1302         int i;
1303         u32 reg;
1304         struct phy_device *phydev;
1305
1306         if (tg3_flag(tp, 5717_PLUS)) {
1307                 u32 is_serdes;
1308
1309                 tp->phy_addr = tp->pci_fn + 1;
1310
1311                 if (tp->pci_chip_rev_id != CHIPREV_ID_5717_A0)
1312                         is_serdes = tr32(SG_DIG_STATUS) & SG_DIG_IS_SERDES;
1313                 else
1314                         is_serdes = tr32(TG3_CPMU_PHY_STRAP) &
1315                                     TG3_CPMU_PHY_STRAP_IS_SERDES;
1316                 if (is_serdes)
1317                         tp->phy_addr += 7;
1318         } else
1319                 tp->phy_addr = TG3_PHY_MII_ADDR;
1320
1321         tg3_mdio_start(tp);
1322
1323         if (!tg3_flag(tp, USE_PHYLIB) || tg3_flag(tp, MDIOBUS_INITED))
1324                 return 0;
1325
1326         tp->mdio_bus = mdiobus_alloc();
1327         if (tp->mdio_bus == NULL)
1328                 return -ENOMEM;
1329
1330         tp->mdio_bus->name     = "tg3 mdio bus";
1331         snprintf(tp->mdio_bus->id, MII_BUS_ID_SIZE, "%x",
1332                  (tp->pdev->bus->number << 8) | tp->pdev->devfn);
1333         tp->mdio_bus->priv     = tp;
1334         tp->mdio_bus->parent   = &tp->pdev->dev;
1335         tp->mdio_bus->read     = &tg3_mdio_read;
1336         tp->mdio_bus->write    = &tg3_mdio_write;
1337         tp->mdio_bus->reset    = &tg3_mdio_reset;
1338         tp->mdio_bus->phy_mask = ~(1 << TG3_PHY_MII_ADDR);
1339         tp->mdio_bus->irq      = &tp->mdio_irq[0];
1340
1341         for (i = 0; i < PHY_MAX_ADDR; i++)
1342                 tp->mdio_bus->irq[i] = PHY_POLL;
1343
1344         /* The bus registration will look for all the PHYs on the mdio bus.
1345          * Unfortunately, it does not ensure the PHY is powered up before
1346          * accessing the PHY ID registers.  A chip reset is the
1347          * quickest way to bring the device back to an operational state..
1348          */
1349         if (tg3_readphy(tp, MII_BMCR, &reg) || (reg & BMCR_PDOWN))
1350                 tg3_bmcr_reset(tp);
1351
1352         i = mdiobus_register(tp->mdio_bus);
1353         if (i) {
1354                 dev_warn(&tp->pdev->dev, "mdiobus_reg failed (0x%x)\n", i);
1355                 mdiobus_free(tp->mdio_bus);
1356                 return i;
1357         }
1358
1359         phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
1360
1361         if (!phydev || !phydev->drv) {
1362                 dev_warn(&tp->pdev->dev, "No PHY devices\n");
1363                 mdiobus_unregister(tp->mdio_bus);
1364                 mdiobus_free(tp->mdio_bus);
1365                 return -ENODEV;
1366         }
1367
1368         switch (phydev->drv->phy_id & phydev->drv->phy_id_mask) {
1369         case PHY_ID_BCM57780:
1370                 phydev->interface = PHY_INTERFACE_MODE_GMII;
1371                 phydev->dev_flags |= PHY_BRCM_AUTO_PWRDWN_ENABLE;
1372                 break;
1373         case PHY_ID_BCM50610:
1374         case PHY_ID_BCM50610M:
1375                 phydev->dev_flags |= PHY_BRCM_CLEAR_RGMII_MODE |
1376                                      PHY_BRCM_RX_REFCLK_UNUSED |
1377                                      PHY_BRCM_DIS_TXCRXC_NOENRGY |
1378                                      PHY_BRCM_AUTO_PWRDWN_ENABLE;
1379                 if (tg3_flag(tp, RGMII_INBAND_DISABLE))
1380                         phydev->dev_flags |= PHY_BRCM_STD_IBND_DISABLE;
1381                 if (tg3_flag(tp, RGMII_EXT_IBND_RX_EN))
1382                         phydev->dev_flags |= PHY_BRCM_EXT_IBND_RX_ENABLE;
1383                 if (tg3_flag(tp, RGMII_EXT_IBND_TX_EN))
1384                         phydev->dev_flags |= PHY_BRCM_EXT_IBND_TX_ENABLE;
1385                 /* fallthru */
1386         case PHY_ID_RTL8211C:
1387                 phydev->interface = PHY_INTERFACE_MODE_RGMII;
1388                 break;
1389         case PHY_ID_RTL8201E:
1390         case PHY_ID_BCMAC131:
1391                 phydev->interface = PHY_INTERFACE_MODE_MII;
1392                 phydev->dev_flags |= PHY_BRCM_AUTO_PWRDWN_ENABLE;
1393                 tp->phy_flags |= TG3_PHYFLG_IS_FET;
1394                 break;
1395         }
1396
1397         tg3_flag_set(tp, MDIOBUS_INITED);
1398
1399         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
1400                 tg3_mdio_config_5785(tp);
1401
1402         return 0;
1403 }
1404
1405 static void tg3_mdio_fini(struct tg3 *tp)
1406 {
1407         if (tg3_flag(tp, MDIOBUS_INITED)) {
1408                 tg3_flag_clear(tp, MDIOBUS_INITED);
1409                 mdiobus_unregister(tp->mdio_bus);
1410                 mdiobus_free(tp->mdio_bus);
1411         }
1412 }
1413
1414 /* tp->lock is held. */
1415 static inline void tg3_generate_fw_event(struct tg3 *tp)
1416 {
1417         u32 val;
1418
1419         val = tr32(GRC_RX_CPU_EVENT);
1420         val |= GRC_RX_CPU_DRIVER_EVENT;
1421         tw32_f(GRC_RX_CPU_EVENT, val);
1422
1423         tp->last_event_jiffies = jiffies;
1424 }
1425
1426 #define TG3_FW_EVENT_TIMEOUT_USEC 2500
1427
1428 /* tp->lock is held. */
1429 static void tg3_wait_for_event_ack(struct tg3 *tp)
1430 {
1431         int i;
1432         unsigned int delay_cnt;
1433         long time_remain;
1434
1435         /* If enough time has passed, no wait is necessary. */
1436         time_remain = (long)(tp->last_event_jiffies + 1 +
1437                       usecs_to_jiffies(TG3_FW_EVENT_TIMEOUT_USEC)) -
1438                       (long)jiffies;
1439         if (time_remain < 0)
1440                 return;
1441
1442         /* Check if we can shorten the wait time. */
1443         delay_cnt = jiffies_to_usecs(time_remain);
1444         if (delay_cnt > TG3_FW_EVENT_TIMEOUT_USEC)
1445                 delay_cnt = TG3_FW_EVENT_TIMEOUT_USEC;
1446         delay_cnt = (delay_cnt >> 3) + 1;
1447
1448         for (i = 0; i < delay_cnt; i++) {
1449                 if (!(tr32(GRC_RX_CPU_EVENT) & GRC_RX_CPU_DRIVER_EVENT))
1450                         break;
1451                 udelay(8);
1452         }
1453 }
1454
1455 /* tp->lock is held. */
1456 static void tg3_phy_gather_ump_data(struct tg3 *tp, u32 *data)
1457 {
1458         u32 reg, val;
1459
1460         val = 0;
1461         if (!tg3_readphy(tp, MII_BMCR, &reg))
1462                 val = reg << 16;
1463         if (!tg3_readphy(tp, MII_BMSR, &reg))
1464                 val |= (reg & 0xffff);
1465         *data++ = val;
1466
1467         val = 0;
1468         if (!tg3_readphy(tp, MII_ADVERTISE, &reg))
1469                 val = reg << 16;
1470         if (!tg3_readphy(tp, MII_LPA, &reg))
1471                 val |= (reg & 0xffff);
1472         *data++ = val;
1473
1474         val = 0;
1475         if (!(tp->phy_flags & TG3_PHYFLG_MII_SERDES)) {
1476                 if (!tg3_readphy(tp, MII_CTRL1000, &reg))
1477                         val = reg << 16;
1478                 if (!tg3_readphy(tp, MII_STAT1000, &reg))
1479                         val |= (reg & 0xffff);
1480         }
1481         *data++ = val;
1482
1483         if (!tg3_readphy(tp, MII_PHYADDR, &reg))
1484                 val = reg << 16;
1485         else
1486                 val = 0;
1487         *data++ = val;
1488 }
1489
1490 /* tp->lock is held. */
1491 static void tg3_ump_link_report(struct tg3 *tp)
1492 {
1493         u32 data[4];
1494
1495         if (!tg3_flag(tp, 5780_CLASS) || !tg3_flag(tp, ENABLE_ASF))
1496                 return;
1497
1498         tg3_phy_gather_ump_data(tp, data);
1499
1500         tg3_wait_for_event_ack(tp);
1501
1502         tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX, FWCMD_NICDRV_LINK_UPDATE);
1503         tg3_write_mem(tp, NIC_SRAM_FW_CMD_LEN_MBOX, 14);
1504         tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 0x0, data[0]);
1505         tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 0x4, data[1]);
1506         tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 0x8, data[2]);
1507         tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 0xc, data[3]);
1508
1509         tg3_generate_fw_event(tp);
1510 }
1511
1512 /* tp->lock is held. */
1513 static void tg3_stop_fw(struct tg3 *tp)
1514 {
1515         if (tg3_flag(tp, ENABLE_ASF) && !tg3_flag(tp, ENABLE_APE)) {
1516                 /* Wait for RX cpu to ACK the previous event. */
1517                 tg3_wait_for_event_ack(tp);
1518
1519                 tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX, FWCMD_NICDRV_PAUSE_FW);
1520
1521                 tg3_generate_fw_event(tp);
1522
1523                 /* Wait for RX cpu to ACK this event. */
1524                 tg3_wait_for_event_ack(tp);
1525         }
1526 }
1527
1528 /* tp->lock is held. */
1529 static void tg3_write_sig_pre_reset(struct tg3 *tp, int kind)
1530 {
1531         tg3_write_mem(tp, NIC_SRAM_FIRMWARE_MBOX,
1532                       NIC_SRAM_FIRMWARE_MBOX_MAGIC1);
1533
1534         if (tg3_flag(tp, ASF_NEW_HANDSHAKE)) {
1535                 switch (kind) {
1536                 case RESET_KIND_INIT:
1537                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1538                                       DRV_STATE_START);
1539                         break;
1540
1541                 case RESET_KIND_SHUTDOWN:
1542                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1543                                       DRV_STATE_UNLOAD);
1544                         break;
1545
1546                 case RESET_KIND_SUSPEND:
1547                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1548                                       DRV_STATE_SUSPEND);
1549                         break;
1550
1551                 default:
1552                         break;
1553                 }
1554         }
1555
1556         if (kind == RESET_KIND_INIT ||
1557             kind == RESET_KIND_SUSPEND)
1558                 tg3_ape_driver_state_change(tp, kind);
1559 }
1560
1561 /* tp->lock is held. */
1562 static void tg3_write_sig_post_reset(struct tg3 *tp, int kind)
1563 {
1564         if (tg3_flag(tp, ASF_NEW_HANDSHAKE)) {
1565                 switch (kind) {
1566                 case RESET_KIND_INIT:
1567                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1568                                       DRV_STATE_START_DONE);
1569                         break;
1570
1571                 case RESET_KIND_SHUTDOWN:
1572                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1573                                       DRV_STATE_UNLOAD_DONE);
1574                         break;
1575
1576                 default:
1577                         break;
1578                 }
1579         }
1580
1581         if (kind == RESET_KIND_SHUTDOWN)
1582                 tg3_ape_driver_state_change(tp, kind);
1583 }
1584
1585 /* tp->lock is held. */
1586 static void tg3_write_sig_legacy(struct tg3 *tp, int kind)
1587 {
1588         if (tg3_flag(tp, ENABLE_ASF)) {
1589                 switch (kind) {
1590                 case RESET_KIND_INIT:
1591                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1592                                       DRV_STATE_START);
1593                         break;
1594
1595                 case RESET_KIND_SHUTDOWN:
1596                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1597                                       DRV_STATE_UNLOAD);
1598                         break;
1599
1600                 case RESET_KIND_SUSPEND:
1601                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1602                                       DRV_STATE_SUSPEND);
1603                         break;
1604
1605                 default:
1606                         break;
1607                 }
1608         }
1609 }
1610
1611 static int tg3_poll_fw(struct tg3 *tp)
1612 {
1613         int i;
1614         u32 val;
1615
1616         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
1617                 /* Wait up to 20ms for init done. */
1618                 for (i = 0; i < 200; i++) {
1619                         if (tr32(VCPU_STATUS) & VCPU_STATUS_INIT_DONE)
1620                                 return 0;
1621                         udelay(100);
1622                 }
1623                 return -ENODEV;
1624         }
1625
1626         /* Wait for firmware initialization to complete. */
1627         for (i = 0; i < 100000; i++) {
1628                 tg3_read_mem(tp, NIC_SRAM_FIRMWARE_MBOX, &val);
1629                 if (val == ~NIC_SRAM_FIRMWARE_MBOX_MAGIC1)
1630                         break;
1631                 udelay(10);
1632         }
1633
1634         /* Chip might not be fitted with firmware.  Some Sun onboard
1635          * parts are configured like that.  So don't signal the timeout
1636          * of the above loop as an error, but do report the lack of
1637          * running firmware once.
1638          */
1639         if (i >= 100000 && !tg3_flag(tp, NO_FWARE_REPORTED)) {
1640                 tg3_flag_set(tp, NO_FWARE_REPORTED);
1641
1642                 netdev_info(tp->dev, "No firmware running\n");
1643         }
1644
1645         if (tp->pci_chip_rev_id == CHIPREV_ID_57765_A0) {
1646                 /* The 57765 A0 needs a little more
1647                  * time to do some important work.
1648                  */
1649                 mdelay(10);
1650         }
1651
1652         return 0;
1653 }
1654
1655 static void tg3_link_report(struct tg3 *tp)
1656 {
1657         if (!netif_carrier_ok(tp->dev)) {
1658                 netif_info(tp, link, tp->dev, "Link is down\n");
1659                 tg3_ump_link_report(tp);
1660         } else if (netif_msg_link(tp)) {
1661                 netdev_info(tp->dev, "Link is up at %d Mbps, %s duplex\n",
1662                             (tp->link_config.active_speed == SPEED_1000 ?
1663                              1000 :
1664                              (tp->link_config.active_speed == SPEED_100 ?
1665                               100 : 10)),
1666                             (tp->link_config.active_duplex == DUPLEX_FULL ?
1667                              "full" : "half"));
1668
1669                 netdev_info(tp->dev, "Flow control is %s for TX and %s for RX\n",
1670                             (tp->link_config.active_flowctrl & FLOW_CTRL_TX) ?
1671                             "on" : "off",
1672                             (tp->link_config.active_flowctrl & FLOW_CTRL_RX) ?
1673                             "on" : "off");
1674
1675                 if (tp->phy_flags & TG3_PHYFLG_EEE_CAP)
1676                         netdev_info(tp->dev, "EEE is %s\n",
1677                                     tp->setlpicnt ? "enabled" : "disabled");
1678
1679                 tg3_ump_link_report(tp);
1680         }
1681 }
1682
1683 static u16 tg3_advert_flowctrl_1000X(u8 flow_ctrl)
1684 {
1685         u16 miireg;
1686
1687         if ((flow_ctrl & FLOW_CTRL_TX) && (flow_ctrl & FLOW_CTRL_RX))
1688                 miireg = ADVERTISE_1000XPAUSE;
1689         else if (flow_ctrl & FLOW_CTRL_TX)
1690                 miireg = ADVERTISE_1000XPSE_ASYM;
1691         else if (flow_ctrl & FLOW_CTRL_RX)
1692                 miireg = ADVERTISE_1000XPAUSE | ADVERTISE_1000XPSE_ASYM;
1693         else
1694                 miireg = 0;
1695
1696         return miireg;
1697 }
1698
1699 static u8 tg3_resolve_flowctrl_1000X(u16 lcladv, u16 rmtadv)
1700 {
1701         u8 cap = 0;
1702
1703         if (lcladv & rmtadv & ADVERTISE_1000XPAUSE) {
1704                 cap = FLOW_CTRL_TX | FLOW_CTRL_RX;
1705         } else if (lcladv & rmtadv & ADVERTISE_1000XPSE_ASYM) {
1706                 if (lcladv & ADVERTISE_1000XPAUSE)
1707                         cap = FLOW_CTRL_RX;
1708                 if (rmtadv & ADVERTISE_1000XPAUSE)
1709                         cap = FLOW_CTRL_TX;
1710         }
1711
1712         return cap;
1713 }
1714
1715 static void tg3_setup_flow_control(struct tg3 *tp, u32 lcladv, u32 rmtadv)
1716 {
1717         u8 autoneg;
1718         u8 flowctrl = 0;
1719         u32 old_rx_mode = tp->rx_mode;
1720         u32 old_tx_mode = tp->tx_mode;
1721
1722         if (tg3_flag(tp, USE_PHYLIB))
1723                 autoneg = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]->autoneg;
1724         else
1725                 autoneg = tp->link_config.autoneg;
1726
1727         if (autoneg == AUTONEG_ENABLE && tg3_flag(tp, PAUSE_AUTONEG)) {
1728                 if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)
1729                         flowctrl = tg3_resolve_flowctrl_1000X(lcladv, rmtadv);
1730                 else
1731                         flowctrl = mii_resolve_flowctrl_fdx(lcladv, rmtadv);
1732         } else
1733                 flowctrl = tp->link_config.flowctrl;
1734
1735         tp->link_config.active_flowctrl = flowctrl;
1736
1737         if (flowctrl & FLOW_CTRL_RX)
1738                 tp->rx_mode |= RX_MODE_FLOW_CTRL_ENABLE;
1739         else
1740                 tp->rx_mode &= ~RX_MODE_FLOW_CTRL_ENABLE;
1741
1742         if (old_rx_mode != tp->rx_mode)
1743                 tw32_f(MAC_RX_MODE, tp->rx_mode);
1744
1745         if (flowctrl & FLOW_CTRL_TX)
1746                 tp->tx_mode |= TX_MODE_FLOW_CTRL_ENABLE;
1747         else
1748                 tp->tx_mode &= ~TX_MODE_FLOW_CTRL_ENABLE;
1749
1750         if (old_tx_mode != tp->tx_mode)
1751                 tw32_f(MAC_TX_MODE, tp->tx_mode);
1752 }
1753
1754 static void tg3_adjust_link(struct net_device *dev)
1755 {
1756         u8 oldflowctrl, linkmesg = 0;
1757         u32 mac_mode, lcl_adv, rmt_adv;
1758         struct tg3 *tp = netdev_priv(dev);
1759         struct phy_device *phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
1760
1761         spin_lock_bh(&tp->lock);
1762
1763         mac_mode = tp->mac_mode & ~(MAC_MODE_PORT_MODE_MASK |
1764                                     MAC_MODE_HALF_DUPLEX);
1765
1766         oldflowctrl = tp->link_config.active_flowctrl;
1767
1768         if (phydev->link) {
1769                 lcl_adv = 0;
1770                 rmt_adv = 0;
1771
1772                 if (phydev->speed == SPEED_100 || phydev->speed == SPEED_10)
1773                         mac_mode |= MAC_MODE_PORT_MODE_MII;
1774                 else if (phydev->speed == SPEED_1000 ||
1775                          GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785)
1776                         mac_mode |= MAC_MODE_PORT_MODE_GMII;
1777                 else
1778                         mac_mode |= MAC_MODE_PORT_MODE_MII;
1779
1780                 if (phydev->duplex == DUPLEX_HALF)
1781                         mac_mode |= MAC_MODE_HALF_DUPLEX;
1782                 else {
1783                         lcl_adv = mii_advertise_flowctrl(
1784                                   tp->link_config.flowctrl);
1785
1786                         if (phydev->pause)
1787                                 rmt_adv = LPA_PAUSE_CAP;
1788                         if (phydev->asym_pause)
1789                                 rmt_adv |= LPA_PAUSE_ASYM;
1790                 }
1791
1792                 tg3_setup_flow_control(tp, lcl_adv, rmt_adv);
1793         } else
1794                 mac_mode |= MAC_MODE_PORT_MODE_GMII;
1795
1796         if (mac_mode != tp->mac_mode) {
1797                 tp->mac_mode = mac_mode;
1798                 tw32_f(MAC_MODE, tp->mac_mode);
1799                 udelay(40);
1800         }
1801
1802         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785) {
1803                 if (phydev->speed == SPEED_10)
1804                         tw32(MAC_MI_STAT,
1805                              MAC_MI_STAT_10MBPS_MODE |
1806                              MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
1807                 else
1808                         tw32(MAC_MI_STAT, MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
1809         }
1810
1811         if (phydev->speed == SPEED_1000 && phydev->duplex == DUPLEX_HALF)
1812                 tw32(MAC_TX_LENGTHS,
1813                      ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
1814                       (6 << TX_LENGTHS_IPG_SHIFT) |
1815                       (0xff << TX_LENGTHS_SLOT_TIME_SHIFT)));
1816         else
1817                 tw32(MAC_TX_LENGTHS,
1818                      ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
1819                       (6 << TX_LENGTHS_IPG_SHIFT) |
1820                       (32 << TX_LENGTHS_SLOT_TIME_SHIFT)));
1821
1822         if (phydev->link != tp->old_link ||
1823             phydev->speed != tp->link_config.active_speed ||
1824             phydev->duplex != tp->link_config.active_duplex ||
1825             oldflowctrl != tp->link_config.active_flowctrl)
1826                 linkmesg = 1;
1827
1828         tp->old_link = phydev->link;
1829         tp->link_config.active_speed = phydev->speed;
1830         tp->link_config.active_duplex = phydev->duplex;
1831
1832         spin_unlock_bh(&tp->lock);
1833
1834         if (linkmesg)
1835                 tg3_link_report(tp);
1836 }
1837
1838 static int tg3_phy_init(struct tg3 *tp)
1839 {
1840         struct phy_device *phydev;
1841
1842         if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED)
1843                 return 0;
1844
1845         /* Bring the PHY back to a known state. */
1846         tg3_bmcr_reset(tp);
1847
1848         phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
1849
1850         /* Attach the MAC to the PHY. */
1851         phydev = phy_connect(tp->dev, dev_name(&phydev->dev), tg3_adjust_link,
1852                              phydev->dev_flags, phydev->interface);
1853         if (IS_ERR(phydev)) {
1854                 dev_err(&tp->pdev->dev, "Could not attach to PHY\n");
1855                 return PTR_ERR(phydev);
1856         }
1857
1858         /* Mask with MAC supported features. */
1859         switch (phydev->interface) {
1860         case PHY_INTERFACE_MODE_GMII:
1861         case PHY_INTERFACE_MODE_RGMII:
1862                 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
1863                         phydev->supported &= (PHY_GBIT_FEATURES |
1864                                               SUPPORTED_Pause |
1865                                               SUPPORTED_Asym_Pause);
1866                         break;
1867                 }
1868                 /* fallthru */
1869         case PHY_INTERFACE_MODE_MII:
1870                 phydev->supported &= (PHY_BASIC_FEATURES |
1871                                       SUPPORTED_Pause |
1872                                       SUPPORTED_Asym_Pause);
1873                 break;
1874         default:
1875                 phy_disconnect(tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]);
1876                 return -EINVAL;
1877         }
1878
1879         tp->phy_flags |= TG3_PHYFLG_IS_CONNECTED;
1880
1881         phydev->advertising = phydev->supported;
1882
1883         return 0;
1884 }
1885
1886 static void tg3_phy_start(struct tg3 *tp)
1887 {
1888         struct phy_device *phydev;
1889
1890         if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
1891                 return;
1892
1893         phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
1894
1895         if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) {
1896                 tp->phy_flags &= ~TG3_PHYFLG_IS_LOW_POWER;
1897                 phydev->speed = tp->link_config.speed;
1898                 phydev->duplex = tp->link_config.duplex;
1899                 phydev->autoneg = tp->link_config.autoneg;
1900                 phydev->advertising = tp->link_config.advertising;
1901         }
1902
1903         phy_start(phydev);
1904
1905         phy_start_aneg(phydev);
1906 }
1907
1908 static void tg3_phy_stop(struct tg3 *tp)
1909 {
1910         if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
1911                 return;
1912
1913         phy_stop(tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]);
1914 }
1915
1916 static void tg3_phy_fini(struct tg3 *tp)
1917 {
1918         if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) {
1919                 phy_disconnect(tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]);
1920                 tp->phy_flags &= ~TG3_PHYFLG_IS_CONNECTED;
1921         }
1922 }
1923
1924 static int tg3_phy_set_extloopbk(struct tg3 *tp)
1925 {
1926         int err;
1927         u32 val;
1928
1929         if (tp->phy_flags & TG3_PHYFLG_IS_FET)
1930                 return 0;
1931
1932         if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) {
1933                 /* Cannot do read-modify-write on 5401 */
1934                 err = tg3_phy_auxctl_write(tp,
1935                                            MII_TG3_AUXCTL_SHDWSEL_AUXCTL,
1936                                            MII_TG3_AUXCTL_ACTL_EXTLOOPBK |
1937                                            0x4c20);
1938                 goto done;
1939         }
1940
1941         err = tg3_phy_auxctl_read(tp,
1942                                   MII_TG3_AUXCTL_SHDWSEL_AUXCTL, &val);
1943         if (err)
1944                 return err;
1945
1946         val |= MII_TG3_AUXCTL_ACTL_EXTLOOPBK;
1947         err = tg3_phy_auxctl_write(tp,
1948                                    MII_TG3_AUXCTL_SHDWSEL_AUXCTL, val);
1949
1950 done:
1951         return err;
1952 }
1953
1954 static void tg3_phy_fet_toggle_apd(struct tg3 *tp, bool enable)
1955 {
1956         u32 phytest;
1957
1958         if (!tg3_readphy(tp, MII_TG3_FET_TEST, &phytest)) {
1959                 u32 phy;
1960
1961                 tg3_writephy(tp, MII_TG3_FET_TEST,
1962                              phytest | MII_TG3_FET_SHADOW_EN);
1963                 if (!tg3_readphy(tp, MII_TG3_FET_SHDW_AUXSTAT2, &phy)) {
1964                         if (enable)
1965                                 phy |= MII_TG3_FET_SHDW_AUXSTAT2_APD;
1966                         else
1967                                 phy &= ~MII_TG3_FET_SHDW_AUXSTAT2_APD;
1968                         tg3_writephy(tp, MII_TG3_FET_SHDW_AUXSTAT2, phy);
1969                 }
1970                 tg3_writephy(tp, MII_TG3_FET_TEST, phytest);
1971         }
1972 }
1973
1974 static void tg3_phy_toggle_apd(struct tg3 *tp, bool enable)
1975 {
1976         u32 reg;
1977
1978         if (!tg3_flag(tp, 5705_PLUS) ||
1979             (tg3_flag(tp, 5717_PLUS) &&
1980              (tp->phy_flags & TG3_PHYFLG_MII_SERDES)))
1981                 return;
1982
1983         if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
1984                 tg3_phy_fet_toggle_apd(tp, enable);
1985                 return;
1986         }
1987
1988         reg = MII_TG3_MISC_SHDW_WREN |
1989               MII_TG3_MISC_SHDW_SCR5_SEL |
1990               MII_TG3_MISC_SHDW_SCR5_LPED |
1991               MII_TG3_MISC_SHDW_SCR5_DLPTLM |
1992               MII_TG3_MISC_SHDW_SCR5_SDTL |
1993               MII_TG3_MISC_SHDW_SCR5_C125OE;
1994         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5784 || !enable)
1995                 reg |= MII_TG3_MISC_SHDW_SCR5_DLLAPD;
1996
1997         tg3_writephy(tp, MII_TG3_MISC_SHDW, reg);
1998
1999
2000         reg = MII_TG3_MISC_SHDW_WREN |
2001               MII_TG3_MISC_SHDW_APD_SEL |
2002               MII_TG3_MISC_SHDW_APD_WKTM_84MS;
2003         if (enable)
2004                 reg |= MII_TG3_MISC_SHDW_APD_ENABLE;
2005
2006         tg3_writephy(tp, MII_TG3_MISC_SHDW, reg);
2007 }
2008
2009 static void tg3_phy_toggle_automdix(struct tg3 *tp, int enable)
2010 {
2011         u32 phy;
2012
2013         if (!tg3_flag(tp, 5705_PLUS) ||
2014             (tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
2015                 return;
2016
2017         if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
2018                 u32 ephy;
2019
2020                 if (!tg3_readphy(tp, MII_TG3_FET_TEST, &ephy)) {
2021                         u32 reg = MII_TG3_FET_SHDW_MISCCTRL;
2022
2023                         tg3_writephy(tp, MII_TG3_FET_TEST,
2024                                      ephy | MII_TG3_FET_SHADOW_EN);
2025                         if (!tg3_readphy(tp, reg, &phy)) {
2026                                 if (enable)
2027                                         phy |= MII_TG3_FET_SHDW_MISCCTRL_MDIX;
2028                                 else
2029                                         phy &= ~MII_TG3_FET_SHDW_MISCCTRL_MDIX;
2030                                 tg3_writephy(tp, reg, phy);
2031                         }
2032                         tg3_writephy(tp, MII_TG3_FET_TEST, ephy);
2033                 }
2034         } else {
2035                 int ret;
2036
2037                 ret = tg3_phy_auxctl_read(tp,
2038                                           MII_TG3_AUXCTL_SHDWSEL_MISC, &phy);
2039                 if (!ret) {
2040                         if (enable)
2041                                 phy |= MII_TG3_AUXCTL_MISC_FORCE_AMDIX;
2042                         else
2043                                 phy &= ~MII_TG3_AUXCTL_MISC_FORCE_AMDIX;
2044                         tg3_phy_auxctl_write(tp,
2045                                              MII_TG3_AUXCTL_SHDWSEL_MISC, phy);
2046                 }
2047         }
2048 }
2049
2050 static void tg3_phy_set_wirespeed(struct tg3 *tp)
2051 {
2052         int ret;
2053         u32 val;
2054
2055         if (tp->phy_flags & TG3_PHYFLG_NO_ETH_WIRE_SPEED)
2056                 return;
2057
2058         ret = tg3_phy_auxctl_read(tp, MII_TG3_AUXCTL_SHDWSEL_MISC, &val);
2059         if (!ret)
2060                 tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_MISC,
2061                                      val | MII_TG3_AUXCTL_MISC_WIRESPD_EN);
2062 }
2063
2064 static void tg3_phy_apply_otp(struct tg3 *tp)
2065 {
2066         u32 otp, phy;
2067
2068         if (!tp->phy_otp)
2069                 return;
2070
2071         otp = tp->phy_otp;
2072
2073         if (TG3_PHY_AUXCTL_SMDSP_ENABLE(tp))
2074                 return;
2075
2076         phy = ((otp & TG3_OTP_AGCTGT_MASK) >> TG3_OTP_AGCTGT_SHIFT);
2077         phy |= MII_TG3_DSP_TAP1_AGCTGT_DFLT;
2078         tg3_phydsp_write(tp, MII_TG3_DSP_TAP1, phy);
2079
2080         phy = ((otp & TG3_OTP_HPFFLTR_MASK) >> TG3_OTP_HPFFLTR_SHIFT) |
2081               ((otp & TG3_OTP_HPFOVER_MASK) >> TG3_OTP_HPFOVER_SHIFT);
2082         tg3_phydsp_write(tp, MII_TG3_DSP_AADJ1CH0, phy);
2083
2084         phy = ((otp & TG3_OTP_LPFDIS_MASK) >> TG3_OTP_LPFDIS_SHIFT);
2085         phy |= MII_TG3_DSP_AADJ1CH3_ADCCKADJ;
2086         tg3_phydsp_write(tp, MII_TG3_DSP_AADJ1CH3, phy);
2087
2088         phy = ((otp & TG3_OTP_VDAC_MASK) >> TG3_OTP_VDAC_SHIFT);
2089         tg3_phydsp_write(tp, MII_TG3_DSP_EXP75, phy);
2090
2091         phy = ((otp & TG3_OTP_10BTAMP_MASK) >> TG3_OTP_10BTAMP_SHIFT);
2092         tg3_phydsp_write(tp, MII_TG3_DSP_EXP96, phy);
2093
2094         phy = ((otp & TG3_OTP_ROFF_MASK) >> TG3_OTP_ROFF_SHIFT) |
2095               ((otp & TG3_OTP_RCOFF_MASK) >> TG3_OTP_RCOFF_SHIFT);
2096         tg3_phydsp_write(tp, MII_TG3_DSP_EXP97, phy);
2097
2098         TG3_PHY_AUXCTL_SMDSP_DISABLE(tp);
2099 }
2100
2101 static void tg3_phy_eee_adjust(struct tg3 *tp, u32 current_link_up)
2102 {
2103         u32 val;
2104
2105         if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP))
2106                 return;
2107
2108         tp->setlpicnt = 0;
2109
2110         if (tp->link_config.autoneg == AUTONEG_ENABLE &&
2111             current_link_up == 1 &&
2112             tp->link_config.active_duplex == DUPLEX_FULL &&
2113             (tp->link_config.active_speed == SPEED_100 ||
2114              tp->link_config.active_speed == SPEED_1000)) {
2115                 u32 eeectl;
2116
2117                 if (tp->link_config.active_speed == SPEED_1000)
2118                         eeectl = TG3_CPMU_EEE_CTRL_EXIT_16_5_US;
2119                 else
2120                         eeectl = TG3_CPMU_EEE_CTRL_EXIT_36_US;
2121
2122                 tw32(TG3_CPMU_EEE_CTRL, eeectl);
2123
2124                 tg3_phy_cl45_read(tp, MDIO_MMD_AN,
2125                                   TG3_CL45_D7_EEERES_STAT, &val);
2126
2127                 if (val == TG3_CL45_D7_EEERES_STAT_LP_1000T ||
2128                     val == TG3_CL45_D7_EEERES_STAT_LP_100TX)
2129                         tp->setlpicnt = 2;
2130         }
2131
2132         if (!tp->setlpicnt) {
2133                 if (current_link_up == 1 &&
2134                    !TG3_PHY_AUXCTL_SMDSP_ENABLE(tp)) {
2135                         tg3_phydsp_write(tp, MII_TG3_DSP_TAP26, 0x0000);
2136                         TG3_PHY_AUXCTL_SMDSP_DISABLE(tp);
2137                 }
2138
2139                 val = tr32(TG3_CPMU_EEE_MODE);
2140                 tw32(TG3_CPMU_EEE_MODE, val & ~TG3_CPMU_EEEMD_LPI_ENABLE);
2141         }
2142 }
2143
2144 static void tg3_phy_eee_enable(struct tg3 *tp)
2145 {
2146         u32 val;
2147
2148         if (tp->link_config.active_speed == SPEED_1000 &&
2149             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
2150              GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
2151              tg3_flag(tp, 57765_CLASS)) &&
2152             !TG3_PHY_AUXCTL_SMDSP_ENABLE(tp)) {
2153                 val = MII_TG3_DSP_TAP26_ALNOKO |
2154                       MII_TG3_DSP_TAP26_RMRXSTO;
2155                 tg3_phydsp_write(tp, MII_TG3_DSP_TAP26, val);
2156                 TG3_PHY_AUXCTL_SMDSP_DISABLE(tp);
2157         }
2158
2159         val = tr32(TG3_CPMU_EEE_MODE);
2160         tw32(TG3_CPMU_EEE_MODE, val | TG3_CPMU_EEEMD_LPI_ENABLE);
2161 }
2162
2163 static int tg3_wait_macro_done(struct tg3 *tp)
2164 {
2165         int limit = 100;
2166
2167         while (limit--) {
2168                 u32 tmp32;
2169
2170                 if (!tg3_readphy(tp, MII_TG3_DSP_CONTROL, &tmp32)) {
2171                         if ((tmp32 & 0x1000) == 0)
2172                                 break;
2173                 }
2174         }
2175         if (limit < 0)
2176                 return -EBUSY;
2177
2178         return 0;
2179 }
2180
2181 static int tg3_phy_write_and_check_testpat(struct tg3 *tp, int *resetp)
2182 {
2183         static const u32 test_pat[4][6] = {
2184         { 0x00005555, 0x00000005, 0x00002aaa, 0x0000000a, 0x00003456, 0x00000003 },
2185         { 0x00002aaa, 0x0000000a, 0x00003333, 0x00000003, 0x0000789a, 0x00000005 },
2186         { 0x00005a5a, 0x00000005, 0x00002a6a, 0x0000000a, 0x00001bcd, 0x00000003 },
2187         { 0x00002a5a, 0x0000000a, 0x000033c3, 0x00000003, 0x00002ef1, 0x00000005 }
2188         };
2189         int chan;
2190
2191         for (chan = 0; chan < 4; chan++) {
2192                 int i;
2193
2194                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
2195                              (chan * 0x2000) | 0x0200);
2196                 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0002);
2197
2198                 for (i = 0; i < 6; i++)
2199                         tg3_writephy(tp, MII_TG3_DSP_RW_PORT,
2200                                      test_pat[chan][i]);
2201
2202                 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0202);
2203                 if (tg3_wait_macro_done(tp)) {
2204                         *resetp = 1;
2205                         return -EBUSY;
2206                 }
2207
2208                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
2209                              (chan * 0x2000) | 0x0200);
2210                 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0082);
2211                 if (tg3_wait_macro_done(tp)) {
2212                         *resetp = 1;
2213                         return -EBUSY;
2214                 }
2215
2216                 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0802);
2217                 if (tg3_wait_macro_done(tp)) {
2218                         *resetp = 1;
2219                         return -EBUSY;
2220                 }
2221
2222                 for (i = 0; i < 6; i += 2) {
2223                         u32 low, high;
2224
2225                         if (tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &low) ||
2226                             tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &high) ||
2227                             tg3_wait_macro_done(tp)) {
2228                                 *resetp = 1;
2229                                 return -EBUSY;
2230                         }
2231                         low &= 0x7fff;
2232                         high &= 0x000f;
2233                         if (low != test_pat[chan][i] ||
2234                             high != test_pat[chan][i+1]) {
2235                                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000b);
2236                                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x4001);
2237                                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x4005);
2238
2239                                 return -EBUSY;
2240                         }
2241                 }
2242         }
2243
2244         return 0;
2245 }
2246
2247 static int tg3_phy_reset_chanpat(struct tg3 *tp)
2248 {
2249         int chan;
2250
2251         for (chan = 0; chan < 4; chan++) {
2252                 int i;
2253
2254                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
2255                              (chan * 0x2000) | 0x0200);
2256                 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0002);
2257                 for (i = 0; i < 6; i++)
2258                         tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x000);
2259                 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0202);
2260                 if (tg3_wait_macro_done(tp))
2261                         return -EBUSY;
2262         }
2263
2264         return 0;
2265 }
2266
2267 static int tg3_phy_reset_5703_4_5(struct tg3 *tp)
2268 {
2269         u32 reg32, phy9_orig;
2270         int retries, do_phy_reset, err;
2271
2272         retries = 10;
2273         do_phy_reset = 1;
2274         do {
2275                 if (do_phy_reset) {
2276                         err = tg3_bmcr_reset(tp);
2277                         if (err)
2278                                 return err;
2279                         do_phy_reset = 0;
2280                 }
2281
2282                 /* Disable transmitter and interrupt.  */
2283                 if (tg3_readphy(tp, MII_TG3_EXT_CTRL, &reg32))
2284                         continue;
2285
2286                 reg32 |= 0x3000;
2287                 tg3_writephy(tp, MII_TG3_EXT_CTRL, reg32);
2288
2289                 /* Set full-duplex, 1000 mbps.  */
2290                 tg3_writephy(tp, MII_BMCR,
2291                              BMCR_FULLDPLX | BMCR_SPEED1000);
2292
2293                 /* Set to master mode.  */
2294                 if (tg3_readphy(tp, MII_CTRL1000, &phy9_orig))
2295                         continue;
2296
2297                 tg3_writephy(tp, MII_CTRL1000,
2298                              CTL1000_AS_MASTER | CTL1000_ENABLE_MASTER);
2299
2300                 err = TG3_PHY_AUXCTL_SMDSP_ENABLE(tp);
2301                 if (err)
2302                         return err;
2303
2304                 /* Block the PHY control access.  */
2305                 tg3_phydsp_write(tp, 0x8005, 0x0800);
2306
2307                 err = tg3_phy_write_and_check_testpat(tp, &do_phy_reset);
2308                 if (!err)
2309                         break;
2310         } while (--retries);
2311
2312         err = tg3_phy_reset_chanpat(tp);
2313         if (err)
2314                 return err;
2315
2316         tg3_phydsp_write(tp, 0x8005, 0x0000);
2317
2318         tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8200);
2319         tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0000);
2320
2321         TG3_PHY_AUXCTL_SMDSP_DISABLE(tp);
2322
2323         tg3_writephy(tp, MII_CTRL1000, phy9_orig);
2324
2325         if (!tg3_readphy(tp, MII_TG3_EXT_CTRL, &reg32)) {
2326                 reg32 &= ~0x3000;
2327                 tg3_writephy(tp, MII_TG3_EXT_CTRL, reg32);
2328         } else if (!err)
2329                 err = -EBUSY;
2330
2331         return err;
2332 }
2333
2334 /* This will reset the tigon3 PHY if there is no valid
2335  * link unless the FORCE argument is non-zero.
2336  */
2337 static int tg3_phy_reset(struct tg3 *tp)
2338 {
2339         u32 val, cpmuctrl;
2340         int err;
2341
2342         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
2343                 val = tr32(GRC_MISC_CFG);
2344                 tw32_f(GRC_MISC_CFG, val & ~GRC_MISC_CFG_EPHY_IDDQ);
2345                 udelay(40);
2346         }
2347         err  = tg3_readphy(tp, MII_BMSR, &val);
2348         err |= tg3_readphy(tp, MII_BMSR, &val);
2349         if (err != 0)
2350                 return -EBUSY;
2351
2352         if (netif_running(tp->dev) && netif_carrier_ok(tp->dev)) {
2353                 netif_carrier_off(tp->dev);
2354                 tg3_link_report(tp);
2355         }
2356
2357         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
2358             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
2359             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
2360                 err = tg3_phy_reset_5703_4_5(tp);
2361                 if (err)
2362                         return err;
2363                 goto out;
2364         }
2365
2366         cpmuctrl = 0;
2367         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 &&
2368             GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5784_AX) {
2369                 cpmuctrl = tr32(TG3_CPMU_CTRL);
2370                 if (cpmuctrl & CPMU_CTRL_GPHY_10MB_RXONLY)
2371                         tw32(TG3_CPMU_CTRL,
2372                              cpmuctrl & ~CPMU_CTRL_GPHY_10MB_RXONLY);
2373         }
2374
2375         err = tg3_bmcr_reset(tp);
2376         if (err)
2377                 return err;
2378
2379         if (cpmuctrl & CPMU_CTRL_GPHY_10MB_RXONLY) {
2380                 val = MII_TG3_DSP_EXP8_AEDW | MII_TG3_DSP_EXP8_REJ2MHz;
2381                 tg3_phydsp_write(tp, MII_TG3_DSP_EXP8, val);
2382
2383                 tw32(TG3_CPMU_CTRL, cpmuctrl);
2384         }
2385
2386         if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5784_AX ||
2387             GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5761_AX) {
2388                 val = tr32(TG3_CPMU_LSPD_1000MB_CLK);
2389                 if ((val & CPMU_LSPD_1000MB_MACCLK_MASK) ==
2390                     CPMU_LSPD_1000MB_MACCLK_12_5) {
2391                         val &= ~CPMU_LSPD_1000MB_MACCLK_MASK;
2392                         udelay(40);
2393                         tw32_f(TG3_CPMU_LSPD_1000MB_CLK, val);
2394                 }
2395         }
2396
2397         if (tg3_flag(tp, 5717_PLUS) &&
2398             (tp->phy_flags & TG3_PHYFLG_MII_SERDES))
2399                 return 0;
2400
2401         tg3_phy_apply_otp(tp);
2402
2403         if (tp->phy_flags & TG3_PHYFLG_ENABLE_APD)
2404                 tg3_phy_toggle_apd(tp, true);
2405         else
2406                 tg3_phy_toggle_apd(tp, false);
2407
2408 out:
2409         if ((tp->phy_flags & TG3_PHYFLG_ADC_BUG) &&
2410             !TG3_PHY_AUXCTL_SMDSP_ENABLE(tp)) {
2411                 tg3_phydsp_write(tp, 0x201f, 0x2aaa);
2412                 tg3_phydsp_write(tp, 0x000a, 0x0323);
2413                 TG3_PHY_AUXCTL_SMDSP_DISABLE(tp);
2414         }
2415
2416         if (tp->phy_flags & TG3_PHYFLG_5704_A0_BUG) {
2417                 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8d68);
2418                 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8d68);
2419         }
2420
2421         if (tp->phy_flags & TG3_PHYFLG_BER_BUG) {
2422                 if (!TG3_PHY_AUXCTL_SMDSP_ENABLE(tp)) {
2423                         tg3_phydsp_write(tp, 0x000a, 0x310b);
2424                         tg3_phydsp_write(tp, 0x201f, 0x9506);
2425                         tg3_phydsp_write(tp, 0x401f, 0x14e2);
2426                         TG3_PHY_AUXCTL_SMDSP_DISABLE(tp);
2427                 }
2428         } else if (tp->phy_flags & TG3_PHYFLG_JITTER_BUG) {
2429                 if (!TG3_PHY_AUXCTL_SMDSP_ENABLE(tp)) {
2430                         tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000a);
2431                         if (tp->phy_flags & TG3_PHYFLG_ADJUST_TRIM) {
2432                                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x110b);
2433                                 tg3_writephy(tp, MII_TG3_TEST1,
2434                                              MII_TG3_TEST1_TRIM_EN | 0x4);
2435                         } else
2436                                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x010b);
2437
2438                         TG3_PHY_AUXCTL_SMDSP_DISABLE(tp);
2439                 }
2440         }
2441
2442         /* Set Extended packet length bit (bit 14) on all chips that */
2443         /* support jumbo frames */
2444         if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) {
2445                 /* Cannot do read-modify-write on 5401 */
2446                 tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_AUXCTL, 0x4c20);
2447         } else if (tg3_flag(tp, JUMBO_CAPABLE)) {
2448                 /* Set bit 14 with read-modify-write to preserve other bits */
2449                 err = tg3_phy_auxctl_read(tp,
2450                                           MII_TG3_AUXCTL_SHDWSEL_AUXCTL, &val);
2451                 if (!err)
2452                         tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_AUXCTL,
2453                                            val | MII_TG3_AUXCTL_ACTL_EXTPKTLEN);
2454         }
2455
2456         /* Set phy register 0x10 bit 0 to high fifo elasticity to support
2457          * jumbo frames transmission.
2458          */
2459         if (tg3_flag(tp, JUMBO_CAPABLE)) {
2460                 if (!tg3_readphy(tp, MII_TG3_EXT_CTRL, &val))
2461                         tg3_writephy(tp, MII_TG3_EXT_CTRL,
2462                                      val | MII_TG3_EXT_CTRL_FIFO_ELASTIC);
2463         }
2464
2465         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
2466                 /* adjust output voltage */
2467                 tg3_writephy(tp, MII_TG3_FET_PTEST, 0x12);
2468         }
2469
2470         tg3_phy_toggle_automdix(tp, 1);
2471         tg3_phy_set_wirespeed(tp);
2472         return 0;
2473 }
2474
2475 #define TG3_GPIO_MSG_DRVR_PRES           0x00000001
2476 #define TG3_GPIO_MSG_NEED_VAUX           0x00000002
2477 #define TG3_GPIO_MSG_MASK                (TG3_GPIO_MSG_DRVR_PRES | \
2478                                           TG3_GPIO_MSG_NEED_VAUX)
2479 #define TG3_GPIO_MSG_ALL_DRVR_PRES_MASK \
2480         ((TG3_GPIO_MSG_DRVR_PRES << 0) | \
2481          (TG3_GPIO_MSG_DRVR_PRES << 4) | \
2482          (TG3_GPIO_MSG_DRVR_PRES << 8) | \
2483          (TG3_GPIO_MSG_DRVR_PRES << 12))
2484
2485 #define TG3_GPIO_MSG_ALL_NEED_VAUX_MASK \
2486         ((TG3_GPIO_MSG_NEED_VAUX << 0) | \
2487          (TG3_GPIO_MSG_NEED_VAUX << 4) | \
2488          (TG3_GPIO_MSG_NEED_VAUX << 8) | \
2489          (TG3_GPIO_MSG_NEED_VAUX << 12))
2490
2491 static inline u32 tg3_set_function_status(struct tg3 *tp, u32 newstat)
2492 {
2493         u32 status, shift;
2494
2495         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
2496             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719)
2497                 status = tg3_ape_read32(tp, TG3_APE_GPIO_MSG);
2498         else
2499                 status = tr32(TG3_CPMU_DRV_STATUS);
2500
2501         shift = TG3_APE_GPIO_MSG_SHIFT + 4 * tp->pci_fn;
2502         status &= ~(TG3_GPIO_MSG_MASK << shift);
2503         status |= (newstat << shift);
2504
2505         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
2506             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719)
2507                 tg3_ape_write32(tp, TG3_APE_GPIO_MSG, status);
2508         else
2509                 tw32(TG3_CPMU_DRV_STATUS, status);
2510
2511         return status >> TG3_APE_GPIO_MSG_SHIFT;
2512 }
2513
2514 static inline int tg3_pwrsrc_switch_to_vmain(struct tg3 *tp)
2515 {
2516         if (!tg3_flag(tp, IS_NIC))
2517                 return 0;
2518
2519         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
2520             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
2521             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) {
2522                 if (tg3_ape_lock(tp, TG3_APE_LOCK_GPIO))
2523                         return -EIO;
2524
2525                 tg3_set_function_status(tp, TG3_GPIO_MSG_DRVR_PRES);
2526
2527                 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl,
2528                             TG3_GRC_LCLCTL_PWRSW_DELAY);
2529
2530                 tg3_ape_unlock(tp, TG3_APE_LOCK_GPIO);
2531         } else {
2532                 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl,
2533                             TG3_GRC_LCLCTL_PWRSW_DELAY);
2534         }
2535
2536         return 0;
2537 }
2538
2539 static void tg3_pwrsrc_die_with_vmain(struct tg3 *tp)
2540 {
2541         u32 grc_local_ctrl;
2542
2543         if (!tg3_flag(tp, IS_NIC) ||
2544             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
2545             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)
2546                 return;
2547
2548         grc_local_ctrl = tp->grc_local_ctrl | GRC_LCLCTRL_GPIO_OE1;
2549
2550         tw32_wait_f(GRC_LOCAL_CTRL,
2551                     grc_local_ctrl | GRC_LCLCTRL_GPIO_OUTPUT1,
2552                     TG3_GRC_LCLCTL_PWRSW_DELAY);
2553
2554         tw32_wait_f(GRC_LOCAL_CTRL,
2555                     grc_local_ctrl,
2556                     TG3_GRC_LCLCTL_PWRSW_DELAY);
2557
2558         tw32_wait_f(GRC_LOCAL_CTRL,
2559                     grc_local_ctrl | GRC_LCLCTRL_GPIO_OUTPUT1,
2560                     TG3_GRC_LCLCTL_PWRSW_DELAY);
2561 }
2562
2563 static void tg3_pwrsrc_switch_to_vaux(struct tg3 *tp)
2564 {
2565         if (!tg3_flag(tp, IS_NIC))
2566                 return;
2567
2568         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
2569             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
2570                 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
2571                             (GRC_LCLCTRL_GPIO_OE0 |
2572                              GRC_LCLCTRL_GPIO_OE1 |
2573                              GRC_LCLCTRL_GPIO_OE2 |
2574                              GRC_LCLCTRL_GPIO_OUTPUT0 |
2575                              GRC_LCLCTRL_GPIO_OUTPUT1),
2576                             TG3_GRC_LCLCTL_PWRSW_DELAY);
2577         } else if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761 ||
2578                    tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761S) {
2579                 /* The 5761 non-e device swaps GPIO 0 and GPIO 2. */
2580                 u32 grc_local_ctrl = GRC_LCLCTRL_GPIO_OE0 |
2581                                      GRC_LCLCTRL_GPIO_OE1 |
2582                                      GRC_LCLCTRL_GPIO_OE2 |
2583                                      GRC_LCLCTRL_GPIO_OUTPUT0 |
2584                                      GRC_LCLCTRL_GPIO_OUTPUT1 |
2585                                      tp->grc_local_ctrl;
2586                 tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl,
2587                             TG3_GRC_LCLCTL_PWRSW_DELAY);
2588
2589                 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OUTPUT2;
2590                 tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl,
2591                             TG3_GRC_LCLCTL_PWRSW_DELAY);
2592
2593                 grc_local_ctrl &= ~GRC_LCLCTRL_GPIO_OUTPUT0;
2594                 tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl,
2595                             TG3_GRC_LCLCTL_PWRSW_DELAY);
2596         } else {
2597                 u32 no_gpio2;
2598                 u32 grc_local_ctrl = 0;
2599
2600                 /* Workaround to prevent overdrawing Amps. */
2601                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) {
2602                         grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE3;
2603                         tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
2604                                     grc_local_ctrl,
2605                                     TG3_GRC_LCLCTL_PWRSW_DELAY);
2606                 }
2607
2608                 /* On 5753 and variants, GPIO2 cannot be used. */
2609                 no_gpio2 = tp->nic_sram_data_cfg &
2610                            NIC_SRAM_DATA_CFG_NO_GPIO2;
2611
2612                 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE0 |
2613                                   GRC_LCLCTRL_GPIO_OE1 |
2614                                   GRC_LCLCTRL_GPIO_OE2 |
2615                                   GRC_LCLCTRL_GPIO_OUTPUT1 |
2616                                   GRC_LCLCTRL_GPIO_OUTPUT2;
2617                 if (no_gpio2) {
2618                         grc_local_ctrl &= ~(GRC_LCLCTRL_GPIO_OE2 |
2619                                             GRC_LCLCTRL_GPIO_OUTPUT2);
2620                 }
2621                 tw32_wait_f(GRC_LOCAL_CTRL,
2622                             tp->grc_local_ctrl | grc_local_ctrl,
2623                             TG3_GRC_LCLCTL_PWRSW_DELAY);
2624
2625                 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OUTPUT0;
2626
2627                 tw32_wait_f(GRC_LOCAL_CTRL,
2628                             tp->grc_local_ctrl | grc_local_ctrl,
2629                             TG3_GRC_LCLCTL_PWRSW_DELAY);
2630
2631                 if (!no_gpio2) {
2632                         grc_local_ctrl &= ~GRC_LCLCTRL_GPIO_OUTPUT2;
2633                         tw32_wait_f(GRC_LOCAL_CTRL,
2634                                     tp->grc_local_ctrl | grc_local_ctrl,
2635                                     TG3_GRC_LCLCTL_PWRSW_DELAY);
2636                 }
2637         }
2638 }
2639
2640 static void tg3_frob_aux_power_5717(struct tg3 *tp, bool wol_enable)
2641 {
2642         u32 msg = 0;
2643
2644         /* Serialize power state transitions */
2645         if (tg3_ape_lock(tp, TG3_APE_LOCK_GPIO))
2646                 return;
2647
2648         if (tg3_flag(tp, ENABLE_ASF) || tg3_flag(tp, ENABLE_APE) || wol_enable)
2649                 msg = TG3_GPIO_MSG_NEED_VAUX;
2650
2651         msg = tg3_set_function_status(tp, msg);
2652
2653         if (msg & TG3_GPIO_MSG_ALL_DRVR_PRES_MASK)
2654                 goto done;
2655
2656         if (msg & TG3_GPIO_MSG_ALL_NEED_VAUX_MASK)
2657                 tg3_pwrsrc_switch_to_vaux(tp);
2658         else
2659                 tg3_pwrsrc_die_with_vmain(tp);
2660
2661 done:
2662         tg3_ape_unlock(tp, TG3_APE_LOCK_GPIO);
2663 }
2664
2665 static void tg3_frob_aux_power(struct tg3 *tp, bool include_wol)
2666 {
2667         bool need_vaux = false;
2668
2669         /* The GPIOs do something completely different on 57765. */
2670         if (!tg3_flag(tp, IS_NIC) || tg3_flag(tp, 57765_CLASS))
2671                 return;
2672
2673         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
2674             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
2675             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) {
2676                 tg3_frob_aux_power_5717(tp, include_wol ?
2677                                         tg3_flag(tp, WOL_ENABLE) != 0 : 0);
2678                 return;
2679         }
2680
2681         if (tp->pdev_peer && tp->pdev_peer != tp->pdev) {
2682                 struct net_device *dev_peer;
2683
2684                 dev_peer = pci_get_drvdata(tp->pdev_peer);
2685
2686                 /* remove_one() may have been run on the peer. */
2687                 if (dev_peer) {
2688                         struct tg3 *tp_peer = netdev_priv(dev_peer);
2689
2690                         if (tg3_flag(tp_peer, INIT_COMPLETE))
2691                                 return;
2692
2693                         if ((include_wol && tg3_flag(tp_peer, WOL_ENABLE)) ||
2694                             tg3_flag(tp_peer, ENABLE_ASF))
2695                                 need_vaux = true;
2696                 }
2697         }
2698
2699         if ((include_wol && tg3_flag(tp, WOL_ENABLE)) ||
2700             tg3_flag(tp, ENABLE_ASF))
2701                 need_vaux = true;
2702
2703         if (need_vaux)
2704                 tg3_pwrsrc_switch_to_vaux(tp);
2705         else
2706                 tg3_pwrsrc_die_with_vmain(tp);
2707 }
2708
2709 static int tg3_5700_link_polarity(struct tg3 *tp, u32 speed)
2710 {
2711         if (tp->led_ctrl == LED_CTRL_MODE_PHY_2)
2712                 return 1;
2713         else if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5411) {
2714                 if (speed != SPEED_10)
2715                         return 1;
2716         } else if (speed == SPEED_10)
2717                 return 1;
2718
2719         return 0;
2720 }
2721
2722 static void tg3_power_down_phy(struct tg3 *tp, bool do_low_power)
2723 {
2724         u32 val;
2725
2726         if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
2727                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
2728                         u32 sg_dig_ctrl = tr32(SG_DIG_CTRL);
2729                         u32 serdes_cfg = tr32(MAC_SERDES_CFG);
2730
2731                         sg_dig_ctrl |=
2732                                 SG_DIG_USING_HW_AUTONEG | SG_DIG_SOFT_RESET;
2733                         tw32(SG_DIG_CTRL, sg_dig_ctrl);
2734                         tw32(MAC_SERDES_CFG, serdes_cfg | (1 << 15));
2735                 }
2736                 return;
2737         }
2738
2739         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
2740                 tg3_bmcr_reset(tp);
2741                 val = tr32(GRC_MISC_CFG);
2742                 tw32_f(GRC_MISC_CFG, val | GRC_MISC_CFG_EPHY_IDDQ);
2743                 udelay(40);
2744                 return;
2745         } else if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
2746                 u32 phytest;
2747                 if (!tg3_readphy(tp, MII_TG3_FET_TEST, &phytest)) {
2748                         u32 phy;
2749
2750                         tg3_writephy(tp, MII_ADVERTISE, 0);
2751                         tg3_writephy(tp, MII_BMCR,
2752                                      BMCR_ANENABLE | BMCR_ANRESTART);
2753
2754                         tg3_writephy(tp, MII_TG3_FET_TEST,
2755                                      phytest | MII_TG3_FET_SHADOW_EN);
2756                         if (!tg3_readphy(tp, MII_TG3_FET_SHDW_AUXMODE4, &phy)) {
2757                                 phy |= MII_TG3_FET_SHDW_AUXMODE4_SBPD;
2758                                 tg3_writephy(tp,
2759                                              MII_TG3_FET_SHDW_AUXMODE4,
2760                                              phy);
2761                         }
2762                         tg3_writephy(tp, MII_TG3_FET_TEST, phytest);
2763                 }
2764                 return;
2765         } else if (do_low_power) {
2766                 tg3_writephy(tp, MII_TG3_EXT_CTRL,
2767                              MII_TG3_EXT_CTRL_FORCE_LED_OFF);
2768
2769                 val = MII_TG3_AUXCTL_PCTL_100TX_LPWR |
2770                       MII_TG3_AUXCTL_PCTL_SPR_ISOLATE |
2771                       MII_TG3_AUXCTL_PCTL_VREG_11V;
2772                 tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_PWRCTL, val);
2773         }
2774
2775         /* The PHY should not be powered down on some chips because
2776          * of bugs.
2777          */
2778         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
2779             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
2780             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780 &&
2781              (tp->phy_flags & TG3_PHYFLG_MII_SERDES)))
2782                 return;
2783
2784         if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5784_AX ||
2785             GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5761_AX) {
2786                 val = tr32(TG3_CPMU_LSPD_1000MB_CLK);
2787                 val &= ~CPMU_LSPD_1000MB_MACCLK_MASK;
2788                 val |= CPMU_LSPD_1000MB_MACCLK_12_5;
2789                 tw32_f(TG3_CPMU_LSPD_1000MB_CLK, val);
2790         }
2791
2792         tg3_writephy(tp, MII_BMCR, BMCR_PDOWN);
2793 }
2794
2795 /* tp->lock is held. */
2796 static int tg3_nvram_lock(struct tg3 *tp)
2797 {
2798         if (tg3_flag(tp, NVRAM)) {
2799                 int i;
2800
2801                 if (tp->nvram_lock_cnt == 0) {
2802                         tw32(NVRAM_SWARB, SWARB_REQ_SET1);
2803                         for (i = 0; i < 8000; i++) {
2804                                 if (tr32(NVRAM_SWARB) & SWARB_GNT1)
2805                                         break;
2806                                 udelay(20);
2807                         }
2808                         if (i == 8000) {
2809                                 tw32(NVRAM_SWARB, SWARB_REQ_CLR1);
2810                                 return -ENODEV;
2811                         }
2812                 }
2813                 tp->nvram_lock_cnt++;
2814         }
2815         return 0;
2816 }
2817
2818 /* tp->lock is held. */
2819 static void tg3_nvram_unlock(struct tg3 *tp)
2820 {
2821         if (tg3_flag(tp, NVRAM)) {
2822                 if (tp->nvram_lock_cnt > 0)
2823                         tp->nvram_lock_cnt--;
2824                 if (tp->nvram_lock_cnt == 0)
2825                         tw32_f(NVRAM_SWARB, SWARB_REQ_CLR1);
2826         }
2827 }
2828
2829 /* tp->lock is held. */
2830 static void tg3_enable_nvram_access(struct tg3 *tp)
2831 {
2832         if (tg3_flag(tp, 5750_PLUS) && !tg3_flag(tp, PROTECTED_NVRAM)) {
2833                 u32 nvaccess = tr32(NVRAM_ACCESS);
2834
2835                 tw32(NVRAM_ACCESS, nvaccess | ACCESS_ENABLE);
2836         }
2837 }
2838
2839 /* tp->lock is held. */
2840 static void tg3_disable_nvram_access(struct tg3 *tp)
2841 {
2842         if (tg3_flag(tp, 5750_PLUS) && !tg3_flag(tp, PROTECTED_NVRAM)) {
2843                 u32 nvaccess = tr32(NVRAM_ACCESS);
2844
2845                 tw32(NVRAM_ACCESS, nvaccess & ~ACCESS_ENABLE);
2846         }
2847 }
2848
2849 static int tg3_nvram_read_using_eeprom(struct tg3 *tp,
2850                                         u32 offset, u32 *val)
2851 {
2852         u32 tmp;
2853         int i;
2854
2855         if (offset > EEPROM_ADDR_ADDR_MASK || (offset % 4) != 0)
2856                 return -EINVAL;
2857
2858         tmp = tr32(GRC_EEPROM_ADDR) & ~(EEPROM_ADDR_ADDR_MASK |
2859                                         EEPROM_ADDR_DEVID_MASK |
2860                                         EEPROM_ADDR_READ);
2861         tw32(GRC_EEPROM_ADDR,
2862              tmp |
2863              (0 << EEPROM_ADDR_DEVID_SHIFT) |
2864              ((offset << EEPROM_ADDR_ADDR_SHIFT) &
2865               EEPROM_ADDR_ADDR_MASK) |
2866              EEPROM_ADDR_READ | EEPROM_ADDR_START);
2867
2868         for (i = 0; i < 1000; i++) {
2869                 tmp = tr32(GRC_EEPROM_ADDR);
2870
2871                 if (tmp & EEPROM_ADDR_COMPLETE)
2872                         break;
2873                 msleep(1);
2874         }
2875         if (!(tmp & EEPROM_ADDR_COMPLETE))
2876                 return -EBUSY;
2877
2878         tmp = tr32(GRC_EEPROM_DATA);
2879
2880         /*
2881          * The data will always be opposite the native endian
2882          * format.  Perform a blind byteswap to compensate.
2883          */
2884         *val = swab32(tmp);
2885
2886         return 0;
2887 }
2888
2889 #define NVRAM_CMD_TIMEOUT 10000
2890
2891 static int tg3_nvram_exec_cmd(struct tg3 *tp, u32 nvram_cmd)
2892 {
2893         int i;
2894
2895         tw32(NVRAM_CMD, nvram_cmd);
2896         for (i = 0; i < NVRAM_CMD_TIMEOUT; i++) {
2897                 udelay(10);
2898                 if (tr32(NVRAM_CMD) & NVRAM_CMD_DONE) {
2899                         udelay(10);
2900                         break;
2901                 }
2902         }
2903
2904         if (i == NVRAM_CMD_TIMEOUT)
2905                 return -EBUSY;
2906
2907         return 0;
2908 }
2909
2910 static u32 tg3_nvram_phys_addr(struct tg3 *tp, u32 addr)
2911 {
2912         if (tg3_flag(tp, NVRAM) &&
2913             tg3_flag(tp, NVRAM_BUFFERED) &&
2914             tg3_flag(tp, FLASH) &&
2915             !tg3_flag(tp, NO_NVRAM_ADDR_TRANS) &&
2916             (tp->nvram_jedecnum == JEDEC_ATMEL))
2917
2918                 addr = ((addr / tp->nvram_pagesize) <<
2919                         ATMEL_AT45DB0X1B_PAGE_POS) +
2920                        (addr % tp->nvram_pagesize);
2921
2922         return addr;
2923 }
2924
2925 static u32 tg3_nvram_logical_addr(struct tg3 *tp, u32 addr)
2926 {
2927         if (tg3_flag(tp, NVRAM) &&
2928             tg3_flag(tp, NVRAM_BUFFERED) &&
2929             tg3_flag(tp, FLASH) &&
2930             !tg3_flag(tp, NO_NVRAM_ADDR_TRANS) &&
2931             (tp->nvram_jedecnum == JEDEC_ATMEL))
2932
2933                 addr = ((addr >> ATMEL_AT45DB0X1B_PAGE_POS) *
2934                         tp->nvram_pagesize) +
2935                        (addr & ((1 << ATMEL_AT45DB0X1B_PAGE_POS) - 1));
2936
2937         return addr;
2938 }
2939
2940 /* NOTE: Data read in from NVRAM is byteswapped according to
2941  * the byteswapping settings for all other register accesses.
2942  * tg3 devices are BE devices, so on a BE machine, the data
2943  * returned will be exactly as it is seen in NVRAM.  On a LE
2944  * machine, the 32-bit value will be byteswapped.
2945  */
2946 static int tg3_nvram_read(struct tg3 *tp, u32 offset, u32 *val)
2947 {
2948         int ret;
2949
2950         if (!tg3_flag(tp, NVRAM))
2951                 return tg3_nvram_read_using_eeprom(tp, offset, val);
2952
2953         offset = tg3_nvram_phys_addr(tp, offset);
2954
2955         if (offset > NVRAM_ADDR_MSK)
2956                 return -EINVAL;
2957
2958         ret = tg3_nvram_lock(tp);
2959         if (ret)
2960                 return ret;
2961
2962         tg3_enable_nvram_access(tp);
2963
2964         tw32(NVRAM_ADDR, offset);
2965         ret = tg3_nvram_exec_cmd(tp, NVRAM_CMD_RD | NVRAM_CMD_GO |
2966                 NVRAM_CMD_FIRST | NVRAM_CMD_LAST | NVRAM_CMD_DONE);
2967
2968         if (ret == 0)
2969                 *val = tr32(NVRAM_RDDATA);
2970
2971         tg3_disable_nvram_access(tp);
2972
2973         tg3_nvram_unlock(tp);
2974
2975         return ret;
2976 }
2977
2978 /* Ensures NVRAM data is in bytestream format. */
2979 static int tg3_nvram_read_be32(struct tg3 *tp, u32 offset, __be32 *val)
2980 {
2981         u32 v;
2982         int res = tg3_nvram_read(tp, offset, &v);
2983         if (!res)
2984                 *val = cpu_to_be32(v);
2985         return res;
2986 }
2987
2988 static int tg3_nvram_write_block_using_eeprom(struct tg3 *tp,
2989                                     u32 offset, u32 len, u8 *buf)
2990 {
2991         int i, j, rc = 0;
2992         u32 val;
2993
2994         for (i = 0; i < len; i += 4) {
2995                 u32 addr;
2996                 __be32 data;
2997
2998                 addr = offset + i;
2999
3000                 memcpy(&data, buf + i, 4);
3001
3002                 /*
3003                  * The SEEPROM interface expects the data to always be opposite
3004                  * the native endian format.  We accomplish this by reversing
3005                  * all the operations that would have been performed on the
3006                  * data from a call to tg3_nvram_read_be32().
3007                  */
3008                 tw32(GRC_EEPROM_DATA, swab32(be32_to_cpu(data)));
3009
3010                 val = tr32(GRC_EEPROM_ADDR);
3011                 tw32(GRC_EEPROM_ADDR, val | EEPROM_ADDR_COMPLETE);
3012
3013                 val &= ~(EEPROM_ADDR_ADDR_MASK | EEPROM_ADDR_DEVID_MASK |
3014                         EEPROM_ADDR_READ);
3015                 tw32(GRC_EEPROM_ADDR, val |
3016                         (0 << EEPROM_ADDR_DEVID_SHIFT) |
3017                         (addr & EEPROM_ADDR_ADDR_MASK) |
3018                         EEPROM_ADDR_START |
3019                         EEPROM_ADDR_WRITE);
3020
3021                 for (j = 0; j < 1000; j++) {
3022                         val = tr32(GRC_EEPROM_ADDR);
3023
3024                         if (val & EEPROM_ADDR_COMPLETE)
3025                                 break;
3026                         msleep(1);
3027                 }
3028                 if (!(val & EEPROM_ADDR_COMPLETE)) {
3029                         rc = -EBUSY;
3030                         break;
3031                 }
3032         }
3033
3034         return rc;
3035 }
3036
3037 /* offset and length are dword aligned */
3038 static int tg3_nvram_write_block_unbuffered(struct tg3 *tp, u32 offset, u32 len,
3039                 u8 *buf)
3040 {
3041         int ret = 0;
3042         u32 pagesize = tp->nvram_pagesize;
3043         u32 pagemask = pagesize - 1;
3044         u32 nvram_cmd;
3045         u8 *tmp;
3046
3047         tmp = kmalloc(pagesize, GFP_KERNEL);
3048         if (tmp == NULL)
3049                 return -ENOMEM;
3050
3051         while (len) {
3052                 int j;
3053                 u32 phy_addr, page_off, size;
3054
3055                 phy_addr = offset & ~pagemask;
3056
3057                 for (j = 0; j < pagesize; j += 4) {
3058                         ret = tg3_nvram_read_be32(tp, phy_addr + j,
3059                                                   (__be32 *) (tmp + j));
3060                         if (ret)
3061                                 break;
3062                 }
3063                 if (ret)
3064                         break;
3065
3066                 page_off = offset & pagemask;
3067                 size = pagesize;
3068                 if (len < size)
3069                         size = len;
3070
3071                 len -= size;
3072
3073                 memcpy(tmp + page_off, buf, size);
3074
3075                 offset = offset + (pagesize - page_off);
3076
3077                 tg3_enable_nvram_access(tp);
3078
3079                 /*
3080                  * Before we can erase the flash page, we need
3081                  * to issue a special "write enable" command.
3082                  */
3083                 nvram_cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE;
3084
3085                 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
3086                         break;
3087
3088                 /* Erase the target page */
3089                 tw32(NVRAM_ADDR, phy_addr);
3090
3091                 nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE | NVRAM_CMD_WR |
3092                         NVRAM_CMD_FIRST | NVRAM_CMD_LAST | NVRAM_CMD_ERASE;
3093
3094                 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
3095                         break;
3096
3097                 /* Issue another write enable to start the write. */
3098                 nvram_cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE;
3099
3100                 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
3101                         break;
3102
3103                 for (j = 0; j < pagesize; j += 4) {
3104                         __be32 data;
3105
3106                         data = *((__be32 *) (tmp + j));
3107
3108                         tw32(NVRAM_WRDATA, be32_to_cpu(data));
3109
3110                         tw32(NVRAM_ADDR, phy_addr + j);
3111
3112                         nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE |
3113                                 NVRAM_CMD_WR;
3114
3115                         if (j == 0)
3116                                 nvram_cmd |= NVRAM_CMD_FIRST;
3117                         else if (j == (pagesize - 4))
3118                                 nvram_cmd |= NVRAM_CMD_LAST;
3119
3120                         ret = tg3_nvram_exec_cmd(tp, nvram_cmd);
3121                         if (ret)
3122                                 break;
3123                 }
3124                 if (ret)
3125                         break;
3126         }
3127
3128         nvram_cmd = NVRAM_CMD_WRDI | NVRAM_CMD_GO | NVRAM_CMD_DONE;
3129         tg3_nvram_exec_cmd(tp, nvram_cmd);
3130
3131         kfree(tmp);
3132
3133         return ret;
3134 }
3135
3136 /* offset and length are dword aligned */
3137 static int tg3_nvram_write_block_buffered(struct tg3 *tp, u32 offset, u32 len,
3138                 u8 *buf)
3139 {
3140         int i, ret = 0;
3141
3142         for (i = 0; i < len; i += 4, offset += 4) {
3143                 u32 page_off, phy_addr, nvram_cmd;
3144                 __be32 data;
3145
3146                 memcpy(&data, buf + i, 4);
3147                 tw32(NVRAM_WRDATA, be32_to_cpu(data));
3148
3149                 page_off = offset % tp->nvram_pagesize;
3150
3151                 phy_addr = tg3_nvram_phys_addr(tp, offset);
3152
3153                 nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE | NVRAM_CMD_WR;
3154
3155                 if (page_off == 0 || i == 0)
3156                         nvram_cmd |= NVRAM_CMD_FIRST;
3157                 if (page_off == (tp->nvram_pagesize - 4))
3158                         nvram_cmd |= NVRAM_CMD_LAST;
3159
3160                 if (i == (len - 4))
3161                         nvram_cmd |= NVRAM_CMD_LAST;
3162
3163                 if ((nvram_cmd & NVRAM_CMD_FIRST) ||
3164                     !tg3_flag(tp, FLASH) ||
3165                     !tg3_flag(tp, 57765_PLUS))
3166                         tw32(NVRAM_ADDR, phy_addr);
3167
3168                 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5752 &&
3169                     !tg3_flag(tp, 5755_PLUS) &&
3170                     (tp->nvram_jedecnum == JEDEC_ST) &&
3171                     (nvram_cmd & NVRAM_CMD_FIRST)) {
3172                         u32 cmd;
3173
3174                         cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE;
3175                         ret = tg3_nvram_exec_cmd(tp, cmd);
3176                         if (ret)
3177                                 break;
3178                 }
3179                 if (!tg3_flag(tp, FLASH)) {
3180                         /* We always do complete word writes to eeprom. */
3181                         nvram_cmd |= (NVRAM_CMD_FIRST | NVRAM_CMD_LAST);
3182                 }
3183
3184                 ret = tg3_nvram_exec_cmd(tp, nvram_cmd);
3185                 if (ret)
3186                         break;
3187         }
3188         return ret;
3189 }
3190
3191 /* offset and length are dword aligned */
3192 static int tg3_nvram_write_block(struct tg3 *tp, u32 offset, u32 len, u8 *buf)
3193 {
3194         int ret;
3195
3196         if (tg3_flag(tp, EEPROM_WRITE_PROT)) {
3197                 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl &
3198                        ~GRC_LCLCTRL_GPIO_OUTPUT1);
3199                 udelay(40);
3200         }
3201
3202         if (!tg3_flag(tp, NVRAM)) {
3203                 ret = tg3_nvram_write_block_using_eeprom(tp, offset, len, buf);
3204         } else {
3205                 u32 grc_mode;
3206
3207                 ret = tg3_nvram_lock(tp);
3208                 if (ret)
3209                         return ret;
3210
3211                 tg3_enable_nvram_access(tp);
3212                 if (tg3_flag(tp, 5750_PLUS) && !tg3_flag(tp, PROTECTED_NVRAM))
3213                         tw32(NVRAM_WRITE1, 0x406);
3214
3215                 grc_mode = tr32(GRC_MODE);
3216                 tw32(GRC_MODE, grc_mode | GRC_MODE_NVRAM_WR_ENABLE);
3217
3218                 if (tg3_flag(tp, NVRAM_BUFFERED) || !tg3_flag(tp, FLASH)) {
3219                         ret = tg3_nvram_write_block_buffered(tp, offset, len,
3220                                 buf);
3221                 } else {
3222                         ret = tg3_nvram_write_block_unbuffered(tp, offset, len,
3223                                 buf);
3224                 }
3225
3226                 grc_mode = tr32(GRC_MODE);
3227                 tw32(GRC_MODE, grc_mode & ~GRC_MODE_NVRAM_WR_ENABLE);
3228
3229                 tg3_disable_nvram_access(tp);
3230                 tg3_nvram_unlock(tp);
3231         }
3232
3233         if (tg3_flag(tp, EEPROM_WRITE_PROT)) {
3234                 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
3235                 udelay(40);
3236         }
3237
3238         return ret;
3239 }
3240
3241 #define RX_CPU_SCRATCH_BASE     0x30000
3242 #define RX_CPU_SCRATCH_SIZE     0x04000
3243 #define TX_CPU_SCRATCH_BASE     0x34000
3244 #define TX_CPU_SCRATCH_SIZE     0x04000
3245
3246 /* tp->lock is held. */
3247 static int tg3_halt_cpu(struct tg3 *tp, u32 offset)
3248 {
3249         int i;
3250
3251         BUG_ON(offset == TX_CPU_BASE && tg3_flag(tp, 5705_PLUS));
3252
3253         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
3254                 u32 val = tr32(GRC_VCPU_EXT_CTRL);
3255
3256                 tw32(GRC_VCPU_EXT_CTRL, val | GRC_VCPU_EXT_CTRL_HALT_CPU);
3257                 return 0;
3258         }
3259         if (offset == RX_CPU_BASE) {
3260                 for (i = 0; i < 10000; i++) {
3261                         tw32(offset + CPU_STATE, 0xffffffff);
3262                         tw32(offset + CPU_MODE,  CPU_MODE_HALT);
3263                         if (tr32(offset + CPU_MODE) & CPU_MODE_HALT)
3264                                 break;
3265                 }
3266
3267                 tw32(offset + CPU_STATE, 0xffffffff);
3268                 tw32_f(offset + CPU_MODE,  CPU_MODE_HALT);
3269                 udelay(10);
3270         } else {
3271                 for (i = 0; i < 10000; i++) {
3272                         tw32(offset + CPU_STATE, 0xffffffff);
3273                         tw32(offset + CPU_MODE,  CPU_MODE_HALT);
3274                         if (tr32(offset + CPU_MODE) & CPU_MODE_HALT)
3275                                 break;
3276                 }
3277         }
3278
3279         if (i >= 10000) {
3280                 netdev_err(tp->dev, "%s timed out, %s CPU\n",
3281                            __func__, offset == RX_CPU_BASE ? "RX" : "TX");
3282                 return -ENODEV;
3283         }
3284
3285         /* Clear firmware's nvram arbitration. */
3286         if (tg3_flag(tp, NVRAM))
3287                 tw32(NVRAM_SWARB, SWARB_REQ_CLR0);
3288         return 0;
3289 }
3290
3291 struct fw_info {
3292         unsigned int fw_base;
3293         unsigned int fw_len;
3294         const __be32 *fw_data;
3295 };
3296
3297 /* tp->lock is held. */
3298 static int tg3_load_firmware_cpu(struct tg3 *tp, u32 cpu_base,
3299                                  u32 cpu_scratch_base, int cpu_scratch_size,
3300                                  struct fw_info *info)
3301 {
3302         int err, lock_err, i;
3303         void (*write_op)(struct tg3 *, u32, u32);
3304
3305         if (cpu_base == TX_CPU_BASE && tg3_flag(tp, 5705_PLUS)) {
3306                 netdev_err(tp->dev,
3307                            "%s: Trying to load TX cpu firmware which is 5705\n",
3308                            __func__);
3309                 return -EINVAL;
3310         }
3311
3312         if (tg3_flag(tp, 5705_PLUS))
3313                 write_op = tg3_write_mem;
3314         else
3315                 write_op = tg3_write_indirect_reg32;
3316
3317         /* It is possible that bootcode is still loading at this point.
3318          * Get the nvram lock first before halting the cpu.
3319          */
3320         lock_err = tg3_nvram_lock(tp);
3321         err = tg3_halt_cpu(tp, cpu_base);
3322         if (!lock_err)
3323                 tg3_nvram_unlock(tp);
3324         if (err)
3325                 goto out;
3326
3327         for (i = 0; i < cpu_scratch_size; i += sizeof(u32))
3328                 write_op(tp, cpu_scratch_base + i, 0);
3329         tw32(cpu_base + CPU_STATE, 0xffffffff);
3330         tw32(cpu_base + CPU_MODE, tr32(cpu_base+CPU_MODE)|CPU_MODE_HALT);
3331         for (i = 0; i < (info->fw_len / sizeof(u32)); i++)
3332                 write_op(tp, (cpu_scratch_base +
3333                               (info->fw_base & 0xffff) +
3334                               (i * sizeof(u32))),
3335                               be32_to_cpu(info->fw_data[i]));
3336
3337         err = 0;
3338
3339 out:
3340         return err;
3341 }
3342
3343 /* tp->lock is held. */
3344 static int tg3_load_5701_a0_firmware_fix(struct tg3 *tp)
3345 {
3346         struct fw_info info;
3347         const __be32 *fw_data;
3348         int err, i;
3349
3350         fw_data = (void *)tp->fw->data;
3351
3352         /* Firmware blob starts with version numbers, followed by
3353            start address and length. We are setting complete length.
3354            length = end_address_of_bss - start_address_of_text.
3355            Remainder is the blob to be loaded contiguously
3356            from start address. */
3357
3358         info.fw_base = be32_to_cpu(fw_data[1]);
3359         info.fw_len = tp->fw->size - 12;
3360         info.fw_data = &fw_data[3];
3361
3362         err = tg3_load_firmware_cpu(tp, RX_CPU_BASE,
3363                                     RX_CPU_SCRATCH_BASE, RX_CPU_SCRATCH_SIZE,
3364                                     &info);
3365         if (err)
3366                 return err;
3367
3368         err = tg3_load_firmware_cpu(tp, TX_CPU_BASE,
3369                                     TX_CPU_SCRATCH_BASE, TX_CPU_SCRATCH_SIZE,
3370                                     &info);
3371         if (err)
3372                 return err;
3373
3374         /* Now startup only the RX cpu. */
3375         tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
3376         tw32_f(RX_CPU_BASE + CPU_PC, info.fw_base);
3377
3378         for (i = 0; i < 5; i++) {
3379                 if (tr32(RX_CPU_BASE + CPU_PC) == info.fw_base)
3380                         break;
3381                 tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
3382                 tw32(RX_CPU_BASE + CPU_MODE,  CPU_MODE_HALT);
3383                 tw32_f(RX_CPU_BASE + CPU_PC, info.fw_base);
3384                 udelay(1000);
3385         }
3386         if (i >= 5) {
3387                 netdev_err(tp->dev, "%s fails to set RX CPU PC, is %08x "
3388                            "should be %08x\n", __func__,
3389                            tr32(RX_CPU_BASE + CPU_PC), info.fw_base);
3390                 return -ENODEV;
3391         }
3392         tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
3393         tw32_f(RX_CPU_BASE + CPU_MODE,  0x00000000);
3394
3395         return 0;
3396 }
3397
3398 /* tp->lock is held. */
3399 static int tg3_load_tso_firmware(struct tg3 *tp)
3400 {
3401         struct fw_info info;
3402         const __be32 *fw_data;
3403         unsigned long cpu_base, cpu_scratch_base, cpu_scratch_size;
3404         int err, i;
3405
3406         if (tg3_flag(tp, HW_TSO_1) ||
3407             tg3_flag(tp, HW_TSO_2) ||
3408             tg3_flag(tp, HW_TSO_3))
3409                 return 0;
3410
3411         fw_data = (void *)tp->fw->data;
3412
3413         /* Firmware blob starts with version numbers, followed by
3414            start address and length. We are setting complete length.
3415            length = end_address_of_bss - start_address_of_text.
3416            Remainder is the blob to be loaded contiguously
3417            from start address. */
3418
3419         info.fw_base = be32_to_cpu(fw_data[1]);
3420         cpu_scratch_size = tp->fw_len;
3421         info.fw_len = tp->fw->size - 12;
3422         info.fw_data = &fw_data[3];
3423
3424         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
3425                 cpu_base = RX_CPU_BASE;
3426                 cpu_scratch_base = NIC_SRAM_MBUF_POOL_BASE5705;
3427         } else {
3428                 cpu_base = TX_CPU_BASE;
3429                 cpu_scratch_base = TX_CPU_SCRATCH_BASE;
3430                 cpu_scratch_size = TX_CPU_SCRATCH_SIZE;
3431         }
3432
3433         err = tg3_load_firmware_cpu(tp, cpu_base,
3434                                     cpu_scratch_base, cpu_scratch_size,
3435                                     &info);
3436         if (err)
3437                 return err;
3438
3439         /* Now startup the cpu. */
3440         tw32(cpu_base + CPU_STATE, 0xffffffff);
3441         tw32_f(cpu_base + CPU_PC, info.fw_base);
3442
3443         for (i = 0; i < 5; i++) {
3444                 if (tr32(cpu_base + CPU_PC) == info.fw_base)
3445                         break;
3446                 tw32(cpu_base + CPU_STATE, 0xffffffff);
3447                 tw32(cpu_base + CPU_MODE,  CPU_MODE_HALT);
3448                 tw32_f(cpu_base + CPU_PC, info.fw_base);
3449                 udelay(1000);
3450         }
3451         if (i >= 5) {
3452                 netdev_err(tp->dev,
3453                            "%s fails to set CPU PC, is %08x should be %08x\n",
3454                            __func__, tr32(cpu_base + CPU_PC), info.fw_base);
3455                 return -ENODEV;
3456         }
3457         tw32(cpu_base + CPU_STATE, 0xffffffff);
3458         tw32_f(cpu_base + CPU_MODE,  0x00000000);
3459         return 0;
3460 }
3461
3462
3463 /* tp->lock is held. */
3464 static void __tg3_set_mac_addr(struct tg3 *tp, int skip_mac_1)
3465 {
3466         u32 addr_high, addr_low;
3467         int i;
3468
3469         addr_high = ((tp->dev->dev_addr[0] << 8) |
3470                      tp->dev->dev_addr[1]);
3471         addr_low = ((tp->dev->dev_addr[2] << 24) |
3472                     (tp->dev->dev_addr[3] << 16) |
3473                     (tp->dev->dev_addr[4] <<  8) |
3474                     (tp->dev->dev_addr[5] <<  0));
3475         for (i = 0; i < 4; i++) {
3476                 if (i == 1 && skip_mac_1)
3477                         continue;
3478                 tw32(MAC_ADDR_0_HIGH + (i * 8), addr_high);
3479                 tw32(MAC_ADDR_0_LOW + (i * 8), addr_low);
3480         }
3481
3482         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
3483             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
3484                 for (i = 0; i < 12; i++) {
3485                         tw32(MAC_EXTADDR_0_HIGH + (i * 8), addr_high);
3486                         tw32(MAC_EXTADDR_0_LOW + (i * 8), addr_low);
3487                 }
3488         }
3489
3490         addr_high = (tp->dev->dev_addr[0] +
3491                      tp->dev->dev_addr[1] +
3492                      tp->dev->dev_addr[2] +
3493                      tp->dev->dev_addr[3] +
3494                      tp->dev->dev_addr[4] +
3495                      tp->dev->dev_addr[5]) &
3496                 TX_BACKOFF_SEED_MASK;
3497         tw32(MAC_TX_BACKOFF_SEED, addr_high);
3498 }
3499
3500 static void tg3_enable_register_access(struct tg3 *tp)
3501 {
3502         /*
3503          * Make sure register accesses (indirect or otherwise) will function
3504          * correctly.
3505          */
3506         pci_write_config_dword(tp->pdev,
3507                                TG3PCI_MISC_HOST_CTRL, tp->misc_host_ctrl);
3508 }
3509
3510 static int tg3_power_up(struct tg3 *tp)
3511 {
3512         int err;
3513
3514         tg3_enable_register_access(tp);
3515
3516         err = pci_set_power_state(tp->pdev, PCI_D0);
3517         if (!err) {
3518                 /* Switch out of Vaux if it is a NIC */
3519                 tg3_pwrsrc_switch_to_vmain(tp);
3520         } else {
3521                 netdev_err(tp->dev, "Transition to D0 failed\n");
3522         }
3523
3524         return err;
3525 }
3526
3527 static int tg3_setup_phy(struct tg3 *, int);
3528
3529 static int tg3_power_down_prepare(struct tg3 *tp)
3530 {
3531         u32 misc_host_ctrl;
3532         bool device_should_wake, do_low_power;
3533
3534         tg3_enable_register_access(tp);
3535
3536         /* Restore the CLKREQ setting. */
3537         if (tg3_flag(tp, CLKREQ_BUG)) {
3538                 u16 lnkctl;
3539
3540                 pci_read_config_word(tp->pdev,
3541                                      pci_pcie_cap(tp->pdev) + PCI_EXP_LNKCTL,
3542                                      &lnkctl);
3543                 lnkctl |= PCI_EXP_LNKCTL_CLKREQ_EN;
3544                 pci_write_config_word(tp->pdev,
3545                                       pci_pcie_cap(tp->pdev) + PCI_EXP_LNKCTL,
3546                                       lnkctl);
3547         }
3548
3549         misc_host_ctrl = tr32(TG3PCI_MISC_HOST_CTRL);
3550         tw32(TG3PCI_MISC_HOST_CTRL,
3551              misc_host_ctrl | MISC_HOST_CTRL_MASK_PCI_INT);
3552
3553         device_should_wake = device_may_wakeup(&tp->pdev->dev) &&
3554                              tg3_flag(tp, WOL_ENABLE);
3555
3556         if (tg3_flag(tp, USE_PHYLIB)) {
3557                 do_low_power = false;
3558                 if ((tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) &&
3559                     !(tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)) {
3560                         struct phy_device *phydev;
3561                         u32 phyid, advertising;
3562
3563                         phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
3564
3565                         tp->phy_flags |= TG3_PHYFLG_IS_LOW_POWER;
3566
3567                         tp->link_config.speed = phydev->speed;
3568                         tp->link_config.duplex = phydev->duplex;
3569                         tp->link_config.autoneg = phydev->autoneg;
3570                         tp->link_config.advertising = phydev->advertising;
3571
3572                         advertising = ADVERTISED_TP |
3573                                       ADVERTISED_Pause |
3574                                       ADVERTISED_Autoneg |
3575                                       ADVERTISED_10baseT_Half;
3576
3577                         if (tg3_flag(tp, ENABLE_ASF) || device_should_wake) {
3578                                 if (tg3_flag(tp, WOL_SPEED_100MB))
3579                                         advertising |=
3580                                                 ADVERTISED_100baseT_Half |
3581                                                 ADVERTISED_100baseT_Full |
3582                                                 ADVERTISED_10baseT_Full;
3583                                 else
3584                                         advertising |= ADVERTISED_10baseT_Full;
3585                         }
3586
3587                         phydev->advertising = advertising;
3588
3589                         phy_start_aneg(phydev);
3590
3591                         phyid = phydev->drv->phy_id & phydev->drv->phy_id_mask;
3592                         if (phyid != PHY_ID_BCMAC131) {
3593                                 phyid &= PHY_BCM_OUI_MASK;
3594                                 if (phyid == PHY_BCM_OUI_1 ||
3595                                     phyid == PHY_BCM_OUI_2 ||
3596                                     phyid == PHY_BCM_OUI_3)
3597                                         do_low_power = true;
3598                         }
3599                 }
3600         } else {
3601                 do_low_power = true;
3602
3603                 if (!(tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER))
3604                         tp->phy_flags |= TG3_PHYFLG_IS_LOW_POWER;
3605
3606                 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
3607                         tg3_setup_phy(tp, 0);
3608         }
3609
3610         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
3611                 u32 val;
3612
3613                 val = tr32(GRC_VCPU_EXT_CTRL);
3614                 tw32(GRC_VCPU_EXT_CTRL, val | GRC_VCPU_EXT_CTRL_DISABLE_WOL);
3615         } else if (!tg3_flag(tp, ENABLE_ASF)) {
3616                 int i;
3617                 u32 val;
3618
3619                 for (i = 0; i < 200; i++) {
3620                         tg3_read_mem(tp, NIC_SRAM_FW_ASF_STATUS_MBOX, &val);
3621                         if (val == ~NIC_SRAM_FIRMWARE_MBOX_MAGIC1)
3622                                 break;
3623                         msleep(1);
3624                 }
3625         }
3626         if (tg3_flag(tp, WOL_CAP))
3627                 tg3_write_mem(tp, NIC_SRAM_WOL_MBOX, WOL_SIGNATURE |
3628                                                      WOL_DRV_STATE_SHUTDOWN |
3629                                                      WOL_DRV_WOL |
3630                                                      WOL_SET_MAGIC_PKT);
3631
3632         if (device_should_wake) {
3633                 u32 mac_mode;
3634
3635                 if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES)) {
3636                         if (do_low_power &&
3637                             !(tp->phy_flags & TG3_PHYFLG_IS_FET)) {
3638                                 tg3_phy_auxctl_write(tp,
3639                                                MII_TG3_AUXCTL_SHDWSEL_PWRCTL,
3640                                                MII_TG3_AUXCTL_PCTL_WOL_EN |
3641                                                MII_TG3_AUXCTL_PCTL_100TX_LPWR |
3642                                                MII_TG3_AUXCTL_PCTL_CL_AB_TXDAC);
3643                                 udelay(40);
3644                         }
3645
3646                         if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
3647                                 mac_mode = MAC_MODE_PORT_MODE_GMII;
3648                         else
3649                                 mac_mode = MAC_MODE_PORT_MODE_MII;
3650
3651                         mac_mode |= tp->mac_mode & MAC_MODE_LINK_POLARITY;
3652                         if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
3653                             ASIC_REV_5700) {
3654                                 u32 speed = tg3_flag(tp, WOL_SPEED_100MB) ?
3655                                              SPEED_100 : SPEED_10;
3656                                 if (tg3_5700_link_polarity(tp, speed))
3657                                         mac_mode |= MAC_MODE_LINK_POLARITY;
3658                                 else
3659                                         mac_mode &= ~MAC_MODE_LINK_POLARITY;
3660                         }
3661                 } else {
3662                         mac_mode = MAC_MODE_PORT_MODE_TBI;
3663                 }
3664
3665                 if (!tg3_flag(tp, 5750_PLUS))
3666                         tw32(MAC_LED_CTRL, tp->led_ctrl);
3667
3668                 mac_mode |= MAC_MODE_MAGIC_PKT_ENABLE;
3669                 if ((tg3_flag(tp, 5705_PLUS) && !tg3_flag(tp, 5780_CLASS)) &&
3670                     (tg3_flag(tp, ENABLE_ASF) || tg3_flag(tp, ENABLE_APE)))
3671                         mac_mode |= MAC_MODE_KEEP_FRAME_IN_WOL;
3672
3673                 if (tg3_flag(tp, ENABLE_APE))
3674                         mac_mode |= MAC_MODE_APE_TX_EN |
3675                                     MAC_MODE_APE_RX_EN |
3676                                     MAC_MODE_TDE_ENABLE;
3677
3678                 tw32_f(MAC_MODE, mac_mode);
3679                 udelay(100);
3680
3681                 tw32_f(MAC_RX_MODE, RX_MODE_ENABLE);
3682                 udelay(10);
3683         }
3684
3685         if (!tg3_flag(tp, WOL_SPEED_100MB) &&
3686             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
3687              GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)) {
3688                 u32 base_val;
3689
3690                 base_val = tp->pci_clock_ctrl;
3691                 base_val |= (CLOCK_CTRL_RXCLK_DISABLE |
3692                              CLOCK_CTRL_TXCLK_DISABLE);
3693
3694                 tw32_wait_f(TG3PCI_CLOCK_CTRL, base_val | CLOCK_CTRL_ALTCLK |
3695                             CLOCK_CTRL_PWRDOWN_PLL133, 40);
3696         } else if (tg3_flag(tp, 5780_CLASS) ||
3697                    tg3_flag(tp, CPMU_PRESENT) ||
3698                    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
3699                 /* do nothing */
3700         } else if (!(tg3_flag(tp, 5750_PLUS) && tg3_flag(tp, ENABLE_ASF))) {
3701                 u32 newbits1, newbits2;
3702
3703                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
3704                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
3705                         newbits1 = (CLOCK_CTRL_RXCLK_DISABLE |
3706                                     CLOCK_CTRL_TXCLK_DISABLE |
3707                                     CLOCK_CTRL_ALTCLK);
3708                         newbits2 = newbits1 | CLOCK_CTRL_44MHZ_CORE;
3709                 } else if (tg3_flag(tp, 5705_PLUS)) {
3710                         newbits1 = CLOCK_CTRL_625_CORE;
3711                         newbits2 = newbits1 | CLOCK_CTRL_ALTCLK;
3712                 } else {
3713                         newbits1 = CLOCK_CTRL_ALTCLK;
3714                         newbits2 = newbits1 | CLOCK_CTRL_44MHZ_CORE;
3715                 }
3716
3717                 tw32_wait_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl | newbits1,
3718                             40);
3719
3720                 tw32_wait_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl | newbits2,
3721                             40);
3722
3723                 if (!tg3_flag(tp, 5705_PLUS)) {
3724                         u32 newbits3;
3725
3726                         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
3727                             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
3728                                 newbits3 = (CLOCK_CTRL_RXCLK_DISABLE |
3729                                             CLOCK_CTRL_TXCLK_DISABLE |
3730                                             CLOCK_CTRL_44MHZ_CORE);
3731                         } else {
3732                                 newbits3 = CLOCK_CTRL_44MHZ_CORE;
3733                         }
3734
3735                         tw32_wait_f(TG3PCI_CLOCK_CTRL,
3736                                     tp->pci_clock_ctrl | newbits3, 40);
3737                 }
3738         }
3739
3740         if (!(device_should_wake) && !tg3_flag(tp, ENABLE_ASF))
3741                 tg3_power_down_phy(tp, do_low_power);
3742
3743         tg3_frob_aux_power(tp, true);
3744
3745         /* Workaround for unstable PLL clock */
3746         if ((GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_AX) ||
3747             (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_BX)) {
3748                 u32 val = tr32(0x7d00);
3749
3750                 val &= ~((1 << 16) | (1 << 4) | (1 << 2) | (1 << 1) | 1);
3751                 tw32(0x7d00, val);
3752                 if (!tg3_flag(tp, ENABLE_ASF)) {
3753                         int err;
3754
3755                         err = tg3_nvram_lock(tp);
3756                         tg3_halt_cpu(tp, RX_CPU_BASE);
3757                         if (!err)
3758                                 tg3_nvram_unlock(tp);
3759                 }
3760         }
3761
3762         tg3_write_sig_post_reset(tp, RESET_KIND_SHUTDOWN);
3763
3764         return 0;
3765 }
3766
3767 static void tg3_power_down(struct tg3 *tp)
3768 {
3769         tg3_power_down_prepare(tp);
3770
3771         pci_wake_from_d3(tp->pdev, tg3_flag(tp, WOL_ENABLE));
3772         pci_set_power_state(tp->pdev, PCI_D3hot);
3773 }
3774
3775 static void tg3_aux_stat_to_speed_duplex(struct tg3 *tp, u32 val, u16 *speed, u8 *duplex)
3776 {
3777         switch (val & MII_TG3_AUX_STAT_SPDMASK) {
3778         case MII_TG3_AUX_STAT_10HALF:
3779                 *speed = SPEED_10;
3780                 *duplex = DUPLEX_HALF;
3781                 break;
3782
3783         case MII_TG3_AUX_STAT_10FULL:
3784                 *speed = SPEED_10;
3785                 *duplex = DUPLEX_FULL;
3786                 break;
3787
3788         case MII_TG3_AUX_STAT_100HALF:
3789                 *speed = SPEED_100;
3790                 *duplex = DUPLEX_HALF;
3791                 break;
3792
3793         case MII_TG3_AUX_STAT_100FULL:
3794                 *speed = SPEED_100;
3795                 *duplex = DUPLEX_FULL;
3796                 break;
3797
3798         case MII_TG3_AUX_STAT_1000HALF:
3799                 *speed = SPEED_1000;
3800                 *duplex = DUPLEX_HALF;
3801                 break;
3802
3803         case MII_TG3_AUX_STAT_1000FULL:
3804                 *speed = SPEED_1000;
3805                 *duplex = DUPLEX_FULL;
3806                 break;
3807
3808         default:
3809                 if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
3810                         *speed = (val & MII_TG3_AUX_STAT_100) ? SPEED_100 :
3811                                  SPEED_10;
3812                         *duplex = (val & MII_TG3_AUX_STAT_FULL) ? DUPLEX_FULL :
3813                                   DUPLEX_HALF;
3814                         break;
3815                 }
3816                 *speed = SPEED_UNKNOWN;
3817                 *duplex = DUPLEX_UNKNOWN;
3818                 break;
3819         }
3820 }
3821
3822 static int tg3_phy_autoneg_cfg(struct tg3 *tp, u32 advertise, u32 flowctrl)
3823 {
3824         int err = 0;
3825         u32 val, new_adv;
3826
3827         new_adv = ADVERTISE_CSMA;
3828         new_adv |= ethtool_adv_to_mii_adv_t(advertise) & ADVERTISE_ALL;
3829         new_adv |= mii_advertise_flowctrl(flowctrl);
3830
3831         err = tg3_writephy(tp, MII_ADVERTISE, new_adv);
3832         if (err)
3833                 goto done;
3834
3835         if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
3836                 new_adv = ethtool_adv_to_mii_ctrl1000_t(advertise);
3837
3838                 if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
3839                     tp->pci_chip_rev_id == CHIPREV_ID_5701_B0)
3840                         new_adv |= CTL1000_AS_MASTER | CTL1000_ENABLE_MASTER;
3841
3842                 err = tg3_writephy(tp, MII_CTRL1000, new_adv);
3843                 if (err)
3844                         goto done;
3845         }
3846
3847         if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP))
3848                 goto done;
3849
3850         tw32(TG3_CPMU_EEE_MODE,
3851              tr32(TG3_CPMU_EEE_MODE) & ~TG3_CPMU_EEEMD_LPI_ENABLE);
3852
3853         err = TG3_PHY_AUXCTL_SMDSP_ENABLE(tp);
3854         if (!err) {
3855                 u32 err2;
3856
3857                 val = 0;
3858                 /* Advertise 100-BaseTX EEE ability */
3859                 if (advertise & ADVERTISED_100baseT_Full)
3860                         val |= MDIO_AN_EEE_ADV_100TX;
3861                 /* Advertise 1000-BaseT EEE ability */
3862                 if (advertise & ADVERTISED_1000baseT_Full)
3863                         val |= MDIO_AN_EEE_ADV_1000T;
3864                 err = tg3_phy_cl45_write(tp, MDIO_MMD_AN, MDIO_AN_EEE_ADV, val);
3865                 if (err)
3866                         val = 0;
3867
3868                 switch (GET_ASIC_REV(tp->pci_chip_rev_id)) {
3869                 case ASIC_REV_5717:
3870                 case ASIC_REV_57765:
3871                 case ASIC_REV_57766:
3872                 case ASIC_REV_5719:
3873                         /* If we advertised any eee advertisements above... */
3874                         if (val)
3875                                 val = MII_TG3_DSP_TAP26_ALNOKO |
3876                                       MII_TG3_DSP_TAP26_RMRXSTO |
3877                                       MII_TG3_DSP_TAP26_OPCSINPT;
3878                         tg3_phydsp_write(tp, MII_TG3_DSP_TAP26, val);
3879                         /* Fall through */
3880                 case ASIC_REV_5720:
3881                         if (!tg3_phydsp_read(tp, MII_TG3_DSP_CH34TP2, &val))
3882                                 tg3_phydsp_write(tp, MII_TG3_DSP_CH34TP2, val |
3883                                                  MII_TG3_DSP_CH34TP2_HIBW01);
3884                 }
3885
3886                 err2 = TG3_PHY_AUXCTL_SMDSP_DISABLE(tp);
3887                 if (!err)
3888                         err = err2;
3889         }
3890
3891 done:
3892         return err;
3893 }
3894
3895 static void tg3_phy_copper_begin(struct tg3 *tp)
3896 {
3897         if (tp->link_config.autoneg == AUTONEG_ENABLE ||
3898             (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)) {
3899                 u32 adv, fc;
3900
3901                 if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) {
3902                         adv = ADVERTISED_10baseT_Half |
3903                               ADVERTISED_10baseT_Full;
3904                         if (tg3_flag(tp, WOL_SPEED_100MB))
3905                                 adv |= ADVERTISED_100baseT_Half |
3906                                        ADVERTISED_100baseT_Full;
3907
3908                         fc = FLOW_CTRL_TX | FLOW_CTRL_RX;
3909                 } else {
3910                         adv = tp->link_config.advertising;
3911                         if (tp->phy_flags & TG3_PHYFLG_10_100_ONLY)
3912                                 adv &= ~(ADVERTISED_1000baseT_Half |
3913                                          ADVERTISED_1000baseT_Full);
3914
3915                         fc = tp->link_config.flowctrl;
3916                 }
3917
3918                 tg3_phy_autoneg_cfg(tp, adv, fc);
3919
3920                 tg3_writephy(tp, MII_BMCR,
3921                              BMCR_ANENABLE | BMCR_ANRESTART);
3922         } else {
3923                 int i;
3924                 u32 bmcr, orig_bmcr;
3925
3926                 tp->link_config.active_speed = tp->link_config.speed;
3927                 tp->link_config.active_duplex = tp->link_config.duplex;
3928
3929                 bmcr = 0;
3930                 switch (tp->link_config.speed) {
3931                 default:
3932                 case SPEED_10:
3933                         break;
3934
3935                 case SPEED_100:
3936                         bmcr |= BMCR_SPEED100;
3937                         break;
3938
3939                 case SPEED_1000:
3940                         bmcr |= BMCR_SPEED1000;
3941                         break;
3942                 }
3943
3944                 if (tp->link_config.duplex == DUPLEX_FULL)
3945                         bmcr |= BMCR_FULLDPLX;
3946
3947                 if (!tg3_readphy(tp, MII_BMCR, &orig_bmcr) &&
3948                     (bmcr != orig_bmcr)) {
3949                         tg3_writephy(tp, MII_BMCR, BMCR_LOOPBACK);
3950                         for (i = 0; i < 1500; i++) {
3951                                 u32 tmp;
3952
3953                                 udelay(10);
3954                                 if (tg3_readphy(tp, MII_BMSR, &tmp) ||
3955                                     tg3_readphy(tp, MII_BMSR, &tmp))
3956                                         continue;
3957                                 if (!(tmp & BMSR_LSTATUS)) {
3958                                         udelay(40);
3959                                         break;
3960                                 }
3961                         }
3962                         tg3_writephy(tp, MII_BMCR, bmcr);
3963                         udelay(40);
3964                 }
3965         }
3966 }
3967
3968 static int tg3_init_5401phy_dsp(struct tg3 *tp)
3969 {
3970         int err;
3971
3972         /* Turn off tap power management. */
3973         /* Set Extended packet length bit */
3974         err = tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_AUXCTL, 0x4c20);
3975
3976         err |= tg3_phydsp_write(tp, 0x0012, 0x1804);
3977         err |= tg3_phydsp_write(tp, 0x0013, 0x1204);
3978         err |= tg3_phydsp_write(tp, 0x8006, 0x0132);
3979         err |= tg3_phydsp_write(tp, 0x8006, 0x0232);
3980         err |= tg3_phydsp_write(tp, 0x201f, 0x0a20);
3981
3982         udelay(40);
3983
3984         return err;
3985 }
3986
3987 static bool tg3_phy_copper_an_config_ok(struct tg3 *tp, u32 *lcladv)
3988 {
3989         u32 advmsk, tgtadv, advertising;
3990
3991         advertising = tp->link_config.advertising;
3992         tgtadv = ethtool_adv_to_mii_adv_t(advertising) & ADVERTISE_ALL;
3993
3994         advmsk = ADVERTISE_ALL;
3995         if (tp->link_config.active_duplex == DUPLEX_FULL) {
3996                 tgtadv |= mii_advertise_flowctrl(tp->link_config.flowctrl);
3997                 advmsk |= ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM;
3998         }
3999
4000         if (tg3_readphy(tp, MII_ADVERTISE, lcladv))
4001                 return false;
4002
4003         if ((*lcladv & advmsk) != tgtadv)
4004                 return false;
4005
4006         if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
4007                 u32 tg3_ctrl;
4008
4009                 tgtadv = ethtool_adv_to_mii_ctrl1000_t(advertising);
4010
4011                 if (tg3_readphy(tp, MII_CTRL1000, &tg3_ctrl))
4012                         return false;
4013
4014                 if (tgtadv &&
4015                     (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
4016                      tp->pci_chip_rev_id == CHIPREV_ID_5701_B0)) {
4017                         tgtadv |= CTL1000_AS_MASTER | CTL1000_ENABLE_MASTER;
4018                         tg3_ctrl &= (ADVERTISE_1000HALF | ADVERTISE_1000FULL |
4019                                      CTL1000_AS_MASTER | CTL1000_ENABLE_MASTER);
4020                 } else {
4021                         tg3_ctrl &= (ADVERTISE_1000HALF | ADVERTISE_1000FULL);
4022                 }
4023
4024                 if (tg3_ctrl != tgtadv)
4025                         return false;
4026         }
4027
4028         return true;
4029 }
4030
4031 static bool tg3_phy_copper_fetch_rmtadv(struct tg3 *tp, u32 *rmtadv)
4032 {
4033         u32 lpeth = 0;
4034
4035         if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
4036                 u32 val;
4037
4038                 if (tg3_readphy(tp, MII_STAT1000, &val))
4039                         return false;
4040
4041                 lpeth = mii_stat1000_to_ethtool_lpa_t(val);
4042         }
4043
4044         if (tg3_readphy(tp, MII_LPA, rmtadv))
4045                 return false;
4046
4047         lpeth |= mii_lpa_to_ethtool_lpa_t(*rmtadv);
4048         tp->link_config.rmt_adv = lpeth;
4049
4050         return true;
4051 }
4052
4053 static int tg3_setup_copper_phy(struct tg3 *tp, int force_reset)
4054 {
4055         int current_link_up;
4056         u32 bmsr, val;
4057         u32 lcl_adv, rmt_adv;
4058         u16 current_speed;
4059         u8 current_duplex;
4060         int i, err;
4061
4062         tw32(MAC_EVENT, 0);
4063
4064         tw32_f(MAC_STATUS,
4065              (MAC_STATUS_SYNC_CHANGED |
4066               MAC_STATUS_CFG_CHANGED |
4067               MAC_STATUS_MI_COMPLETION |
4068               MAC_STATUS_LNKSTATE_CHANGED));
4069         udelay(40);
4070
4071         if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
4072                 tw32_f(MAC_MI_MODE,
4073                      (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
4074                 udelay(80);
4075         }
4076
4077         tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_PWRCTL, 0);
4078
4079         /* Some third-party PHYs need to be reset on link going
4080          * down.
4081          */
4082         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
4083              GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
4084              GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) &&
4085             netif_carrier_ok(tp->dev)) {
4086                 tg3_readphy(tp, MII_BMSR, &bmsr);
4087                 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
4088                     !(bmsr & BMSR_LSTATUS))
4089                         force_reset = 1;
4090         }
4091         if (force_reset)
4092                 tg3_phy_reset(tp);
4093
4094         if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) {
4095                 tg3_readphy(tp, MII_BMSR, &bmsr);
4096                 if (tg3_readphy(tp, MII_BMSR, &bmsr) ||
4097                     !tg3_flag(tp, INIT_COMPLETE))
4098                         bmsr = 0;
4099
4100                 if (!(bmsr & BMSR_LSTATUS)) {
4101                         err = tg3_init_5401phy_dsp(tp);
4102                         if (err)
4103                                 return err;
4104
4105                         tg3_readphy(tp, MII_BMSR, &bmsr);
4106                         for (i = 0; i < 1000; i++) {
4107                                 udelay(10);
4108                                 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
4109                                     (bmsr & BMSR_LSTATUS)) {
4110                                         udelay(40);
4111                                         break;
4112                                 }
4113                         }
4114
4115                         if ((tp->phy_id & TG3_PHY_ID_REV_MASK) ==
4116                             TG3_PHY_REV_BCM5401_B0 &&
4117                             !(bmsr & BMSR_LSTATUS) &&
4118                             tp->link_config.active_speed == SPEED_1000) {
4119                                 err = tg3_phy_reset(tp);
4120                                 if (!err)
4121                                         err = tg3_init_5401phy_dsp(tp);
4122                                 if (err)
4123                                         return err;
4124                         }
4125                 }
4126         } else if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
4127                    tp->pci_chip_rev_id == CHIPREV_ID_5701_B0) {
4128                 /* 5701 {A0,B0} CRC bug workaround */
4129                 tg3_writephy(tp, 0x15, 0x0a75);
4130                 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8c68);
4131                 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8d68);
4132                 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8c68);
4133         }
4134
4135         /* Clear pending interrupts... */
4136         tg3_readphy(tp, MII_TG3_ISTAT, &val);
4137         tg3_readphy(tp, MII_TG3_ISTAT, &val);
4138
4139         if (tp->phy_flags & TG3_PHYFLG_USE_MI_INTERRUPT)
4140                 tg3_writephy(tp, MII_TG3_IMASK, ~MII_TG3_INT_LINKCHG);
4141         else if (!(tp->phy_flags & TG3_PHYFLG_IS_FET))
4142                 tg3_writephy(tp, MII_TG3_IMASK, ~0);
4143
4144         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
4145             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
4146                 if (tp->led_ctrl == LED_CTRL_MODE_PHY_1)
4147                         tg3_writephy(tp, MII_TG3_EXT_CTRL,
4148                                      MII_TG3_EXT_CTRL_LNK3_LED_MODE);
4149                 else
4150                         tg3_writephy(tp, MII_TG3_EXT_CTRL, 0);
4151         }
4152
4153         current_link_up = 0;
4154         current_speed = SPEED_UNKNOWN;
4155         current_duplex = DUPLEX_UNKNOWN;
4156         tp->phy_flags &= ~TG3_PHYFLG_MDIX_STATE;
4157         tp->link_config.rmt_adv = 0;
4158
4159         if (tp->phy_flags & TG3_PHYFLG_CAPACITIVE_COUPLING) {
4160                 err = tg3_phy_auxctl_read(tp,
4161                                           MII_TG3_AUXCTL_SHDWSEL_MISCTEST,
4162                                           &val);
4163                 if (!err && !(val & (1 << 10))) {
4164                         tg3_phy_auxctl_write(tp,
4165                                              MII_TG3_AUXCTL_SHDWSEL_MISCTEST,
4166                                              val | (1 << 10));
4167                         goto relink;
4168                 }
4169         }
4170
4171         bmsr = 0;
4172         for (i = 0; i < 100; i++) {
4173                 tg3_readphy(tp, MII_BMSR, &bmsr);
4174                 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
4175                     (bmsr & BMSR_LSTATUS))
4176                         break;
4177                 udelay(40);
4178         }
4179
4180         if (bmsr & BMSR_LSTATUS) {
4181                 u32 aux_stat, bmcr;
4182
4183                 tg3_readphy(tp, MII_TG3_AUX_STAT, &aux_stat);
4184                 for (i = 0; i < 2000; i++) {
4185                         udelay(10);
4186                         if (!tg3_readphy(tp, MII_TG3_AUX_STAT, &aux_stat) &&
4187                             aux_stat)
4188                                 break;
4189                 }
4190
4191                 tg3_aux_stat_to_speed_duplex(tp, aux_stat,
4192                                              &current_speed,
4193                                              &current_duplex);
4194
4195                 bmcr = 0;
4196                 for (i = 0; i < 200; i++) {
4197                         tg3_readphy(tp, MII_BMCR, &bmcr);
4198                         if (tg3_readphy(tp, MII_BMCR, &bmcr))
4199                                 continue;
4200                         if (bmcr && bmcr != 0x7fff)
4201                                 break;
4202                         udelay(10);
4203                 }
4204
4205                 lcl_adv = 0;
4206                 rmt_adv = 0;
4207
4208                 tp->link_config.active_speed = current_speed;
4209                 tp->link_config.active_duplex = current_duplex;
4210
4211                 if (tp->link_config.autoneg == AUTONEG_ENABLE) {
4212                         if ((bmcr & BMCR_ANENABLE) &&
4213                             tg3_phy_copper_an_config_ok(tp, &lcl_adv) &&
4214                             tg3_phy_copper_fetch_rmtadv(tp, &rmt_adv))
4215                                 current_link_up = 1;
4216                 } else {
4217                         if (!(bmcr & BMCR_ANENABLE) &&
4218                             tp->link_config.speed == current_speed &&
4219                             tp->link_config.duplex == current_duplex &&
4220                             tp->link_config.flowctrl ==
4221                             tp->link_config.active_flowctrl) {
4222                                 current_link_up = 1;
4223                         }
4224                 }
4225
4226                 if (current_link_up == 1 &&
4227                     tp->link_config.active_duplex == DUPLEX_FULL) {
4228                         u32 reg, bit;
4229
4230                         if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
4231                                 reg = MII_TG3_FET_GEN_STAT;
4232                                 bit = MII_TG3_FET_GEN_STAT_MDIXSTAT;
4233                         } else {
4234                                 reg = MII_TG3_EXT_STAT;
4235                                 bit = MII_TG3_EXT_STAT_MDIX;
4236                         }
4237
4238                         if (!tg3_readphy(tp, reg, &val) && (val & bit))
4239                                 tp->phy_flags |= TG3_PHYFLG_MDIX_STATE;
4240
4241                         tg3_setup_flow_control(tp, lcl_adv, rmt_adv);
4242                 }
4243         }
4244
4245 relink:
4246         if (current_link_up == 0 || (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)) {
4247                 tg3_phy_copper_begin(tp);
4248
4249                 tg3_readphy(tp, MII_BMSR, &bmsr);
4250                 if ((!tg3_readphy(tp, MII_BMSR, &bmsr) && (bmsr & BMSR_LSTATUS)) ||
4251                     (tp->mac_mode & MAC_MODE_PORT_INT_LPBACK))
4252                         current_link_up = 1;
4253         }
4254
4255         tp->mac_mode &= ~MAC_MODE_PORT_MODE_MASK;
4256         if (current_link_up == 1) {
4257                 if (tp->link_config.active_speed == SPEED_100 ||
4258                     tp->link_config.active_speed == SPEED_10)
4259                         tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
4260                 else
4261                         tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
4262         } else if (tp->phy_flags & TG3_PHYFLG_IS_FET)
4263                 tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
4264         else
4265                 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
4266
4267         tp->mac_mode &= ~MAC_MODE_HALF_DUPLEX;
4268         if (tp->link_config.active_duplex == DUPLEX_HALF)
4269                 tp->mac_mode |= MAC_MODE_HALF_DUPLEX;
4270
4271         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700) {
4272                 if (current_link_up == 1 &&
4273                     tg3_5700_link_polarity(tp, tp->link_config.active_speed))
4274                         tp->mac_mode |= MAC_MODE_LINK_POLARITY;
4275                 else
4276                         tp->mac_mode &= ~MAC_MODE_LINK_POLARITY;
4277         }
4278
4279         /* ??? Without this setting Netgear GA302T PHY does not
4280          * ??? send/receive packets...
4281          */
4282         if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5411 &&
4283             tp->pci_chip_rev_id == CHIPREV_ID_5700_ALTIMA) {
4284                 tp->mi_mode |= MAC_MI_MODE_AUTO_POLL;
4285                 tw32_f(MAC_MI_MODE, tp->mi_mode);
4286                 udelay(80);
4287         }
4288
4289         tw32_f(MAC_MODE, tp->mac_mode);
4290         udelay(40);
4291
4292         tg3_phy_eee_adjust(tp, current_link_up);
4293
4294         if (tg3_flag(tp, USE_LINKCHG_REG)) {
4295                 /* Polled via timer. */
4296                 tw32_f(MAC_EVENT, 0);
4297         } else {
4298                 tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
4299         }
4300         udelay(40);
4301
4302         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 &&
4303             current_link_up == 1 &&
4304             tp->link_config.active_speed == SPEED_1000 &&
4305             (tg3_flag(tp, PCIX_MODE) || tg3_flag(tp, PCI_HIGH_SPEED))) {
4306                 udelay(120);
4307                 tw32_f(MAC_STATUS,
4308                      (MAC_STATUS_SYNC_CHANGED |
4309                       MAC_STATUS_CFG_CHANGED));
4310                 udelay(40);
4311                 tg3_write_mem(tp,
4312                               NIC_SRAM_FIRMWARE_MBOX,
4313                               NIC_SRAM_FIRMWARE_MBOX_MAGIC2);
4314         }
4315
4316         /* Prevent send BD corruption. */
4317         if (tg3_flag(tp, CLKREQ_BUG)) {
4318                 u16 oldlnkctl, newlnkctl;
4319
4320                 pci_read_config_word(tp->pdev,
4321                                      pci_pcie_cap(tp->pdev) + PCI_EXP_LNKCTL,
4322                                      &oldlnkctl);
4323                 if (tp->link_config.active_speed == SPEED_100 ||
4324                     tp->link_config.active_speed == SPEED_10)
4325                         newlnkctl = oldlnkctl & ~PCI_EXP_LNKCTL_CLKREQ_EN;
4326                 else
4327                         newlnkctl = oldlnkctl | PCI_EXP_LNKCTL_CLKREQ_EN;
4328                 if (newlnkctl != oldlnkctl)
4329                         pci_write_config_word(tp->pdev,
4330                                               pci_pcie_cap(tp->pdev) +
4331                                               PCI_EXP_LNKCTL, newlnkctl);
4332         }
4333
4334         if (current_link_up != netif_carrier_ok(tp->dev)) {
4335                 if (current_link_up)
4336                         netif_carrier_on(tp->dev);
4337                 else
4338                         netif_carrier_off(tp->dev);
4339                 tg3_link_report(tp);
4340         }
4341
4342         return 0;
4343 }
4344
4345 struct tg3_fiber_aneginfo {
4346         int state;
4347 #define ANEG_STATE_UNKNOWN              0
4348 #define ANEG_STATE_AN_ENABLE            1
4349 #define ANEG_STATE_RESTART_INIT         2
4350 #define ANEG_STATE_RESTART              3
4351 #define ANEG_STATE_DISABLE_LINK_OK      4
4352 #define ANEG_STATE_ABILITY_DETECT_INIT  5
4353 #define ANEG_STATE_ABILITY_DETECT       6
4354 #define ANEG_STATE_ACK_DETECT_INIT      7
4355 #define ANEG_STATE_ACK_DETECT           8
4356 #define ANEG_STATE_COMPLETE_ACK_INIT    9
4357 #define ANEG_STATE_COMPLETE_ACK         10
4358 #define ANEG_STATE_IDLE_DETECT_INIT     11
4359 #define ANEG_STATE_IDLE_DETECT          12
4360 #define ANEG_STATE_LINK_OK              13
4361 #define ANEG_STATE_NEXT_PAGE_WAIT_INIT  14
4362 #define ANEG_STATE_NEXT_PAGE_WAIT       15
4363
4364         u32 flags;
4365 #define MR_AN_ENABLE            0x00000001
4366 #define MR_RESTART_AN           0x00000002
4367 #define MR_AN_COMPLETE          0x00000004
4368 #define MR_PAGE_RX              0x00000008
4369 #define MR_NP_LOADED            0x00000010
4370 #define MR_TOGGLE_TX            0x00000020
4371 #define MR_LP_ADV_FULL_DUPLEX   0x00000040
4372 #define MR_LP_ADV_HALF_DUPLEX   0x00000080
4373 #define MR_LP_ADV_SYM_PAUSE     0x00000100
4374 #define MR_LP_ADV_ASYM_PAUSE    0x00000200
4375 #define MR_LP_ADV_REMOTE_FAULT1 0x00000400
4376 #define MR_LP_ADV_REMOTE_FAULT2 0x00000800
4377 #define MR_LP_ADV_NEXT_PAGE     0x00001000
4378 #define MR_TOGGLE_RX            0x00002000
4379 #define MR_NP_RX                0x00004000
4380
4381 #define MR_LINK_OK              0x80000000
4382
4383         unsigned long link_time, cur_time;
4384
4385         u32 ability_match_cfg;
4386         int ability_match_count;
4387
4388         char ability_match, idle_match, ack_match;
4389
4390         u32 txconfig, rxconfig;
4391 #define ANEG_CFG_NP             0x00000080
4392 #define ANEG_CFG_ACK            0x00000040
4393 #define ANEG_CFG_RF2            0x00000020
4394 #define ANEG_CFG_RF1            0x00000010
4395 #define ANEG_CFG_PS2            0x00000001
4396 #define ANEG_CFG_PS1            0x00008000
4397 #define ANEG_CFG_HD             0x00004000
4398 #define ANEG_CFG_FD             0x00002000
4399 #define ANEG_CFG_INVAL          0x00001f06
4400
4401 };
4402 #define ANEG_OK         0
4403 #define ANEG_DONE       1
4404 #define ANEG_TIMER_ENAB 2
4405 #define ANEG_FAILED     -1
4406
4407 #define ANEG_STATE_SETTLE_TIME  10000
4408
4409 static int tg3_fiber_aneg_smachine(struct tg3 *tp,
4410                                    struct tg3_fiber_aneginfo *ap)
4411 {
4412         u16 flowctrl;
4413         unsigned long delta;
4414         u32 rx_cfg_reg;
4415         int ret;
4416
4417         if (ap->state == ANEG_STATE_UNKNOWN) {
4418                 ap->rxconfig = 0;
4419                 ap->link_time = 0;
4420                 ap->cur_time = 0;
4421                 ap->ability_match_cfg = 0;
4422                 ap->ability_match_count = 0;
4423                 ap->ability_match = 0;
4424                 ap->idle_match = 0;
4425                 ap->ack_match = 0;
4426         }
4427         ap->cur_time++;
4428
4429         if (tr32(MAC_STATUS) & MAC_STATUS_RCVD_CFG) {
4430                 rx_cfg_reg = tr32(MAC_RX_AUTO_NEG);
4431
4432                 if (rx_cfg_reg != ap->ability_match_cfg) {
4433                         ap->ability_match_cfg = rx_cfg_reg;
4434                         ap->ability_match = 0;
4435                         ap->ability_match_count = 0;
4436                 } else {
4437                         if (++ap->ability_match_count > 1) {
4438                                 ap->ability_match = 1;
4439                                 ap->ability_match_cfg = rx_cfg_reg;
4440                         }
4441                 }
4442                 if (rx_cfg_reg & ANEG_CFG_ACK)
4443                         ap->ack_match = 1;
4444                 else
4445                         ap->ack_match = 0;
4446
4447                 ap->idle_match = 0;
4448         } else {
4449                 ap->idle_match = 1;
4450                 ap->ability_match_cfg = 0;
4451                 ap->ability_match_count = 0;
4452                 ap->ability_match = 0;
4453                 ap->ack_match = 0;
4454
4455                 rx_cfg_reg = 0;
4456         }
4457
4458         ap->rxconfig = rx_cfg_reg;
4459         ret = ANEG_OK;
4460
4461         switch (ap->state) {
4462         case ANEG_STATE_UNKNOWN:
4463                 if (ap->flags & (MR_AN_ENABLE | MR_RESTART_AN))
4464                         ap->state = ANEG_STATE_AN_ENABLE;
4465
4466                 /* fallthru */
4467         case ANEG_STATE_AN_ENABLE:
4468                 ap->flags &= ~(MR_AN_COMPLETE | MR_PAGE_RX);
4469                 if (ap->flags & MR_AN_ENABLE) {
4470                         ap->link_time = 0;
4471                         ap->cur_time = 0;
4472                         ap->ability_match_cfg = 0;
4473                         ap->ability_match_count = 0;
4474                         ap->ability_match = 0;
4475                         ap->idle_match = 0;
4476                         ap->ack_match = 0;
4477
4478                         ap->state = ANEG_STATE_RESTART_INIT;
4479                 } else {
4480                         ap->state = ANEG_STATE_DISABLE_LINK_OK;
4481                 }
4482                 break;
4483
4484         case ANEG_STATE_RESTART_INIT:
4485                 ap->link_time = ap->cur_time;
4486                 ap->flags &= ~(MR_NP_LOADED);
4487                 ap->txconfig = 0;
4488                 tw32(MAC_TX_AUTO_NEG, 0);
4489                 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
4490                 tw32_f(MAC_MODE, tp->mac_mode);
4491                 udelay(40);
4492
4493                 ret = ANEG_TIMER_ENAB;
4494                 ap->state = ANEG_STATE_RESTART;
4495
4496                 /* fallthru */
4497         case ANEG_STATE_RESTART:
4498                 delta = ap->cur_time - ap->link_time;
4499                 if (delta > ANEG_STATE_SETTLE_TIME)
4500                         ap->state = ANEG_STATE_ABILITY_DETECT_INIT;
4501                 else
4502                         ret = ANEG_TIMER_ENAB;
4503                 break;
4504
4505         case ANEG_STATE_DISABLE_LINK_OK:
4506                 ret = ANEG_DONE;
4507                 break;
4508
4509         case ANEG_STATE_ABILITY_DETECT_INIT:
4510                 ap->flags &= ~(MR_TOGGLE_TX);
4511                 ap->txconfig = ANEG_CFG_FD;
4512                 flowctrl = tg3_advert_flowctrl_1000X(tp->link_config.flowctrl);
4513                 if (flowctrl & ADVERTISE_1000XPAUSE)
4514                         ap->txconfig |= ANEG_CFG_PS1;
4515                 if (flowctrl & ADVERTISE_1000XPSE_ASYM)
4516                         ap->txconfig |= ANEG_CFG_PS2;
4517                 tw32(MAC_TX_AUTO_NEG, ap->txconfig);
4518                 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
4519                 tw32_f(MAC_MODE, tp->mac_mode);
4520                 udelay(40);
4521
4522                 ap->state = ANEG_STATE_ABILITY_DETECT;
4523                 break;
4524
4525         case ANEG_STATE_ABILITY_DETECT:
4526                 if (ap->ability_match != 0 && ap->rxconfig != 0)
4527                         ap->state = ANEG_STATE_ACK_DETECT_INIT;
4528                 break;
4529
4530         case ANEG_STATE_ACK_DETECT_INIT:
4531                 ap->txconfig |= ANEG_CFG_ACK;
4532                 tw32(MAC_TX_AUTO_NEG, ap->txconfig);
4533                 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
4534                 tw32_f(MAC_MODE, tp->mac_mode);
4535                 udelay(40);
4536
4537                 ap->state = ANEG_STATE_ACK_DETECT;
4538
4539                 /* fallthru */
4540         case ANEG_STATE_ACK_DETECT:
4541                 if (ap->ack_match != 0) {
4542                         if ((ap->rxconfig & ~ANEG_CFG_ACK) ==
4543                             (ap->ability_match_cfg & ~ANEG_CFG_ACK)) {
4544                                 ap->state = ANEG_STATE_COMPLETE_ACK_INIT;
4545                         } else {
4546                                 ap->state = ANEG_STATE_AN_ENABLE;
4547                         }
4548                 } else if (ap->ability_match != 0 &&
4549                            ap->rxconfig == 0) {
4550                         ap->state = ANEG_STATE_AN_ENABLE;
4551                 }
4552                 break;
4553
4554         case ANEG_STATE_COMPLETE_ACK_INIT:
4555                 if (ap->rxconfig & ANEG_CFG_INVAL) {
4556                         ret = ANEG_FAILED;
4557                         break;
4558                 }
4559                 ap->flags &= ~(MR_LP_ADV_FULL_DUPLEX |
4560                                MR_LP_ADV_HALF_DUPLEX |
4561                                MR_LP_ADV_SYM_PAUSE |
4562                                MR_LP_ADV_ASYM_PAUSE |
4563                                MR_LP_ADV_REMOTE_FAULT1 |
4564                                MR_LP_ADV_REMOTE_FAULT2 |
4565                                MR_LP_ADV_NEXT_PAGE |
4566                                MR_TOGGLE_RX |
4567                                MR_NP_RX);
4568                 if (ap->rxconfig & ANEG_CFG_FD)
4569                         ap->flags |= MR_LP_ADV_FULL_DUPLEX;
4570                 if (ap->rxconfig & ANEG_CFG_HD)
4571                         ap->flags |= MR_LP_ADV_HALF_DUPLEX;
4572                 if (ap->rxconfig & ANEG_CFG_PS1)
4573                         ap->flags |= MR_LP_ADV_SYM_PAUSE;
4574                 if (ap->rxconfig & ANEG_CFG_PS2)
4575                         ap->flags |= MR_LP_ADV_ASYM_PAUSE;
4576                 if (ap->rxconfig & ANEG_CFG_RF1)
4577                         ap->flags |= MR_LP_ADV_REMOTE_FAULT1;
4578                 if (ap->rxconfig & ANEG_CFG_RF2)
4579                         ap->flags |= MR_LP_ADV_REMOTE_FAULT2;
4580                 if (ap->rxconfig & ANEG_CFG_NP)
4581                         ap->flags |= MR_LP_ADV_NEXT_PAGE;
4582
4583                 ap->link_time = ap->cur_time;
4584
4585                 ap->flags ^= (MR_TOGGLE_TX);
4586                 if (ap->rxconfig & 0x0008)
4587                         ap->flags |= MR_TOGGLE_RX;
4588                 if (ap->rxconfig & ANEG_CFG_NP)
4589                         ap->flags |= MR_NP_RX;
4590                 ap->flags |= MR_PAGE_RX;
4591
4592                 ap->state = ANEG_STATE_COMPLETE_ACK;
4593                 ret = ANEG_TIMER_ENAB;
4594                 break;
4595
4596         case ANEG_STATE_COMPLETE_ACK:
4597                 if (ap->ability_match != 0 &&
4598                     ap->rxconfig == 0) {
4599                         ap->state = ANEG_STATE_AN_ENABLE;
4600                         break;
4601                 }
4602                 delta = ap->cur_time - ap->link_time;
4603                 if (delta > ANEG_STATE_SETTLE_TIME) {
4604                         if (!(ap->flags & (MR_LP_ADV_NEXT_PAGE))) {
4605                                 ap->state = ANEG_STATE_IDLE_DETECT_INIT;
4606                         } else {
4607                                 if ((ap->txconfig & ANEG_CFG_NP) == 0 &&
4608                                     !(ap->flags & MR_NP_RX)) {
4609                                         ap->state = ANEG_STATE_IDLE_DETECT_INIT;
4610                                 } else {
4611                                         ret = ANEG_FAILED;
4612                                 }
4613                         }
4614                 }
4615                 break;
4616
4617         case ANEG_STATE_IDLE_DETECT_INIT:
4618                 ap->link_time = ap->cur_time;
4619                 tp->mac_mode &= ~MAC_MODE_SEND_CONFIGS;
4620                 tw32_f(MAC_MODE, tp->mac_mode);
4621                 udelay(40);
4622
4623                 ap->state = ANEG_STATE_IDLE_DETECT;
4624                 ret = ANEG_TIMER_ENAB;
4625                 break;
4626
4627         case ANEG_STATE_IDLE_DETECT:
4628                 if (ap->ability_match != 0 &&
4629                     ap->rxconfig == 0) {
4630                         ap->state = ANEG_STATE_AN_ENABLE;
4631                         break;
4632                 }
4633                 delta = ap->cur_time - ap->link_time;
4634                 if (delta > ANEG_STATE_SETTLE_TIME) {
4635                         /* XXX another gem from the Broadcom driver :( */
4636                         ap->state = ANEG_STATE_LINK_OK;
4637                 }
4638                 break;
4639
4640         case ANEG_STATE_LINK_OK:
4641                 ap->flags |= (MR_AN_COMPLETE | MR_LINK_OK);
4642                 ret = ANEG_DONE;
4643                 break;
4644
4645         case ANEG_STATE_NEXT_PAGE_WAIT_INIT:
4646                 /* ??? unimplemented */
4647                 break;
4648
4649         case ANEG_STATE_NEXT_PAGE_WAIT:
4650                 /* ??? unimplemented */
4651                 break;
4652
4653         default:
4654                 ret = ANEG_FAILED;
4655                 break;
4656         }
4657
4658         return ret;
4659 }
4660
4661 static int fiber_autoneg(struct tg3 *tp, u32 *txflags, u32 *rxflags)
4662 {
4663         int res = 0;
4664         struct tg3_fiber_aneginfo aninfo;
4665         int status = ANEG_FAILED;
4666         unsigned int tick;
4667         u32 tmp;
4668
4669         tw32_f(MAC_TX_AUTO_NEG, 0);
4670
4671         tmp = tp->mac_mode & ~MAC_MODE_PORT_MODE_MASK;
4672         tw32_f(MAC_MODE, tmp | MAC_MODE_PORT_MODE_GMII);
4673         udelay(40);
4674
4675         tw32_f(MAC_MODE, tp->mac_mode | MAC_MODE_SEND_CONFIGS);
4676         udelay(40);
4677
4678         memset(&aninfo, 0, sizeof(aninfo));
4679         aninfo.flags |= MR_AN_ENABLE;
4680         aninfo.state = ANEG_STATE_UNKNOWN;
4681         aninfo.cur_time = 0;
4682         tick = 0;
4683         while (++tick < 195000) {
4684                 status = tg3_fiber_aneg_smachine(tp, &aninfo);
4685                 if (status == ANEG_DONE || status == ANEG_FAILED)
4686                         break;
4687
4688                 udelay(1);
4689         }
4690
4691         tp->mac_mode &= ~MAC_MODE_SEND_CONFIGS;
4692         tw32_f(MAC_MODE, tp->mac_mode);
4693         udelay(40);
4694
4695         *txflags = aninfo.txconfig;
4696         *rxflags = aninfo.flags;
4697
4698         if (status == ANEG_DONE &&
4699             (aninfo.flags & (MR_AN_COMPLETE | MR_LINK_OK |
4700                              MR_LP_ADV_FULL_DUPLEX)))
4701                 res = 1;
4702
4703         return res;
4704 }
4705
4706 static void tg3_init_bcm8002(struct tg3 *tp)
4707 {
4708         u32 mac_status = tr32(MAC_STATUS);
4709         int i;
4710
4711         /* Reset when initting first time or we have a link. */
4712         if (tg3_flag(tp, INIT_COMPLETE) &&
4713             !(mac_status & MAC_STATUS_PCS_SYNCED))
4714                 return;
4715
4716         /* Set PLL lock range. */
4717         tg3_writephy(tp, 0x16, 0x8007);
4718
4719         /* SW reset */
4720         tg3_writephy(tp, MII_BMCR, BMCR_RESET);
4721
4722         /* Wait for reset to complete. */
4723         /* XXX schedule_timeout() ... */
4724         for (i = 0; i < 500; i++)
4725                 udelay(10);
4726
4727         /* Config mode; select PMA/Ch 1 regs. */
4728         tg3_writephy(tp, 0x10, 0x8411);
4729
4730         /* Enable auto-lock and comdet, select txclk for tx. */
4731         tg3_writephy(tp, 0x11, 0x0a10);
4732
4733         tg3_writephy(tp, 0x18, 0x00a0);
4734         tg3_writephy(tp, 0x16, 0x41ff);
4735
4736         /* Assert and deassert POR. */
4737         tg3_writephy(tp, 0x13, 0x0400);
4738         udelay(40);
4739         tg3_writephy(tp, 0x13, 0x0000);
4740
4741         tg3_writephy(tp, 0x11, 0x0a50);
4742         udelay(40);
4743         tg3_writephy(tp, 0x11, 0x0a10);
4744
4745         /* Wait for signal to stabilize */
4746         /* XXX schedule_timeout() ... */
4747         for (i = 0; i < 15000; i++)
4748                 udelay(10);
4749
4750         /* Deselect the channel register so we can read the PHYID
4751          * later.
4752          */
4753         tg3_writephy(tp, 0x10, 0x8011);
4754 }
4755
4756 static int tg3_setup_fiber_hw_autoneg(struct tg3 *tp, u32 mac_status)
4757 {
4758         u16 flowctrl;
4759         u32 sg_dig_ctrl, sg_dig_status;
4760         u32 serdes_cfg, expected_sg_dig_ctrl;
4761         int workaround, port_a;
4762         int current_link_up;
4763
4764         serdes_cfg = 0;
4765         expected_sg_dig_ctrl = 0;
4766         workaround = 0;
4767         port_a = 1;
4768         current_link_up = 0;
4769
4770         if (tp->pci_chip_rev_id != CHIPREV_ID_5704_A0 &&
4771             tp->pci_chip_rev_id != CHIPREV_ID_5704_A1) {
4772                 workaround = 1;
4773                 if (tr32(TG3PCI_DUAL_MAC_CTRL) & DUAL_MAC_CTRL_ID)
4774                         port_a = 0;
4775
4776                 /* preserve bits 0-11,13,14 for signal pre-emphasis */
4777                 /* preserve bits 20-23 for voltage regulator */
4778                 serdes_cfg = tr32(MAC_SERDES_CFG) & 0x00f06fff;
4779         }
4780
4781         sg_dig_ctrl = tr32(SG_DIG_CTRL);
4782
4783         if (tp->link_config.autoneg != AUTONEG_ENABLE) {
4784                 if (sg_dig_ctrl & SG_DIG_USING_HW_AUTONEG) {
4785                         if (workaround) {
4786                                 u32 val = serdes_cfg;
4787
4788                                 if (port_a)
4789                                         val |= 0xc010000;
4790                                 else
4791                                         val |= 0x4010000;
4792                                 tw32_f(MAC_SERDES_CFG, val);
4793                         }
4794
4795                         tw32_f(SG_DIG_CTRL, SG_DIG_COMMON_SETUP);
4796                 }
4797                 if (mac_status & MAC_STATUS_PCS_SYNCED) {
4798                         tg3_setup_flow_control(tp, 0, 0);
4799                         current_link_up = 1;
4800                 }
4801                 goto out;
4802         }
4803
4804         /* Want auto-negotiation.  */
4805         expected_sg_dig_ctrl = SG_DIG_USING_HW_AUTONEG | SG_DIG_COMMON_SETUP;
4806
4807         flowctrl = tg3_advert_flowctrl_1000X(tp->link_config.flowctrl);
4808         if (flowctrl & ADVERTISE_1000XPAUSE)
4809                 expected_sg_dig_ctrl |= SG_DIG_PAUSE_CAP;
4810         if (flowctrl & ADVERTISE_1000XPSE_ASYM)
4811                 expected_sg_dig_ctrl |= SG_DIG_ASYM_PAUSE;
4812
4813         if (sg_dig_ctrl != expected_sg_dig_ctrl) {
4814                 if ((tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT) &&
4815                     tp->serdes_counter &&
4816                     ((mac_status & (MAC_STATUS_PCS_SYNCED |
4817                                     MAC_STATUS_RCVD_CFG)) ==
4818                      MAC_STATUS_PCS_SYNCED)) {
4819                         tp->serdes_counter--;
4820                         current_link_up = 1;
4821                         goto out;
4822                 }
4823 restart_autoneg:
4824                 if (workaround)
4825                         tw32_f(MAC_SERDES_CFG, serdes_cfg | 0xc011000);
4826                 tw32_f(SG_DIG_CTRL, expected_sg_dig_ctrl | SG_DIG_SOFT_RESET);
4827                 udelay(5);
4828                 tw32_f(SG_DIG_CTRL, expected_sg_dig_ctrl);
4829
4830                 tp->serdes_counter = SERDES_AN_TIMEOUT_5704S;
4831                 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
4832         } else if (mac_status & (MAC_STATUS_PCS_SYNCED |
4833                                  MAC_STATUS_SIGNAL_DET)) {
4834                 sg_dig_status = tr32(SG_DIG_STATUS);
4835                 mac_status = tr32(MAC_STATUS);
4836
4837                 if ((sg_dig_status & SG_DIG_AUTONEG_COMPLETE) &&
4838                     (mac_status & MAC_STATUS_PCS_SYNCED)) {
4839                         u32 local_adv = 0, remote_adv = 0;
4840
4841                         if (sg_dig_ctrl & SG_DIG_PAUSE_CAP)
4842                                 local_adv |= ADVERTISE_1000XPAUSE;
4843                         if (sg_dig_ctrl & SG_DIG_ASYM_PAUSE)
4844                                 local_adv |= ADVERTISE_1000XPSE_ASYM;
4845
4846                         if (sg_dig_status & SG_DIG_PARTNER_PAUSE_CAPABLE)
4847                                 remote_adv |= LPA_1000XPAUSE;
4848                         if (sg_dig_status & SG_DIG_PARTNER_ASYM_PAUSE)
4849                                 remote_adv |= LPA_1000XPAUSE_ASYM;
4850
4851                         tp->link_config.rmt_adv =
4852                                            mii_adv_to_ethtool_adv_x(remote_adv);
4853
4854                         tg3_setup_flow_control(tp, local_adv, remote_adv);
4855                         current_link_up = 1;
4856                         tp->serdes_counter = 0;
4857                         tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
4858                 } else if (!(sg_dig_status & SG_DIG_AUTONEG_COMPLETE)) {
4859                         if (tp->serdes_counter)
4860                                 tp->serdes_counter--;
4861                         else {
4862                                 if (workaround) {
4863                                         u32 val = serdes_cfg;
4864
4865                                         if (port_a)
4866                                                 val |= 0xc010000;
4867                                         else
4868                                                 val |= 0x4010000;
4869
4870                                         tw32_f(MAC_SERDES_CFG, val);
4871                                 }
4872
4873                                 tw32_f(SG_DIG_CTRL, SG_DIG_COMMON_SETUP);
4874                                 udelay(40);
4875
4876                                 /* Link parallel detection - link is up */
4877                                 /* only if we have PCS_SYNC and not */
4878                                 /* receiving config code words */
4879                                 mac_status = tr32(MAC_STATUS);
4880                                 if ((mac_status & MAC_STATUS_PCS_SYNCED) &&
4881                                     !(mac_status & MAC_STATUS_RCVD_CFG)) {
4882                                         tg3_setup_flow_control(tp, 0, 0);
4883                                         current_link_up = 1;
4884                                         tp->phy_flags |=
4885                                                 TG3_PHYFLG_PARALLEL_DETECT;
4886                                         tp->serdes_counter =
4887                                                 SERDES_PARALLEL_DET_TIMEOUT;
4888                                 } else
4889                                         goto restart_autoneg;
4890                         }
4891                 }
4892         } else {
4893                 tp->serdes_counter = SERDES_AN_TIMEOUT_5704S;
4894                 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
4895         }
4896
4897 out:
4898         return current_link_up;
4899 }
4900
4901 static int tg3_setup_fiber_by_hand(struct tg3 *tp, u32 mac_status)
4902 {
4903         int current_link_up = 0;
4904
4905         if (!(mac_status & MAC_STATUS_PCS_SYNCED))
4906                 goto out;
4907
4908         if (tp->link_config.autoneg == AUTONEG_ENABLE) {
4909                 u32 txflags, rxflags;
4910                 int i;
4911
4912                 if (fiber_autoneg(tp, &txflags, &rxflags)) {
4913                         u32 local_adv = 0, remote_adv = 0;
4914
4915                         if (txflags & ANEG_CFG_PS1)
4916                                 local_adv |= ADVERTISE_1000XPAUSE;
4917                         if (txflags & ANEG_CFG_PS2)
4918                                 local_adv |= ADVERTISE_1000XPSE_ASYM;
4919
4920                         if (rxflags & MR_LP_ADV_SYM_PAUSE)
4921                                 remote_adv |= LPA_1000XPAUSE;
4922                         if (rxflags & MR_LP_ADV_ASYM_PAUSE)
4923                                 remote_adv |= LPA_1000XPAUSE_ASYM;
4924
4925                         tp->link_config.rmt_adv =
4926                                            mii_adv_to_ethtool_adv_x(remote_adv);
4927
4928                         tg3_setup_flow_control(tp, local_adv, remote_adv);
4929
4930                         current_link_up = 1;
4931                 }
4932                 for (i = 0; i < 30; i++) {
4933                         udelay(20);
4934                         tw32_f(MAC_STATUS,
4935                                (MAC_STATUS_SYNC_CHANGED |
4936                                 MAC_STATUS_CFG_CHANGED));
4937                         udelay(40);
4938                         if ((tr32(MAC_STATUS) &
4939                              (MAC_STATUS_SYNC_CHANGED |
4940                               MAC_STATUS_CFG_CHANGED)) == 0)
4941                                 break;
4942                 }
4943
4944                 mac_status = tr32(MAC_STATUS);
4945                 if (current_link_up == 0 &&
4946                     (mac_status & MAC_STATUS_PCS_SYNCED) &&
4947                     !(mac_status & MAC_STATUS_RCVD_CFG))
4948                         current_link_up = 1;
4949         } else {
4950                 tg3_setup_flow_control(tp, 0, 0);
4951
4952                 /* Forcing 1000FD link up. */
4953                 current_link_up = 1;
4954
4955                 tw32_f(MAC_MODE, (tp->mac_mode | MAC_MODE_SEND_CONFIGS));
4956                 udelay(40);
4957
4958                 tw32_f(MAC_MODE, tp->mac_mode);
4959                 udelay(40);
4960         }
4961
4962 out:
4963         return current_link_up;
4964 }
4965
4966 static int tg3_setup_fiber_phy(struct tg3 *tp, int force_reset)
4967 {
4968         u32 orig_pause_cfg;
4969         u16 orig_active_speed;
4970         u8 orig_active_duplex;
4971         u32 mac_status;
4972         int current_link_up;
4973         int i;
4974
4975         orig_pause_cfg = tp->link_config.active_flowctrl;
4976         orig_active_speed = tp->link_config.active_speed;
4977         orig_active_duplex = tp->link_config.active_duplex;
4978
4979         if (!tg3_flag(tp, HW_AUTONEG) &&
4980             netif_carrier_ok(tp->dev) &&
4981             tg3_flag(tp, INIT_COMPLETE)) {
4982                 mac_status = tr32(MAC_STATUS);
4983                 mac_status &= (MAC_STATUS_PCS_SYNCED |
4984                                MAC_STATUS_SIGNAL_DET |
4985                                MAC_STATUS_CFG_CHANGED |
4986                                MAC_STATUS_RCVD_CFG);
4987                 if (mac_status == (MAC_STATUS_PCS_SYNCED |
4988                                    MAC_STATUS_SIGNAL_DET)) {
4989                         tw32_f(MAC_STATUS, (MAC_STATUS_SYNC_CHANGED |
4990                                             MAC_STATUS_CFG_CHANGED));
4991                         return 0;
4992                 }
4993         }
4994
4995         tw32_f(MAC_TX_AUTO_NEG, 0);
4996
4997         tp->mac_mode &= ~(MAC_MODE_PORT_MODE_MASK | MAC_MODE_HALF_DUPLEX);
4998         tp->mac_mode |= MAC_MODE_PORT_MODE_TBI;
4999         tw32_f(MAC_MODE, tp->mac_mode);
5000         udelay(40);
5001
5002         if (tp->phy_id == TG3_PHY_ID_BCM8002)
5003                 tg3_init_bcm8002(tp);
5004
5005         /* Enable link change event even when serdes polling.  */
5006         tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
5007         udelay(40);
5008
5009         current_link_up = 0;
5010         tp->link_config.rmt_adv = 0;
5011         mac_status = tr32(MAC_STATUS);
5012
5013         if (tg3_flag(tp, HW_AUTONEG))
5014                 current_link_up = tg3_setup_fiber_hw_autoneg(tp, mac_status);
5015         else
5016                 current_link_up = tg3_setup_fiber_by_hand(tp, mac_status);
5017
5018         tp->napi[0].hw_status->status =
5019                 (SD_STATUS_UPDATED |
5020                  (tp->napi[0].hw_status->status & ~SD_STATUS_LINK_CHG));
5021
5022         for (i = 0; i < 100; i++) {
5023                 tw32_f(MAC_STATUS, (MAC_STATUS_SYNC_CHANGED |
5024                                     MAC_STATUS_CFG_CHANGED));
5025                 udelay(5);
5026                 if ((tr32(MAC_STATUS) & (MAC_STATUS_SYNC_CHANGED |
5027                                          MAC_STATUS_CFG_CHANGED |
5028                                          MAC_STATUS_LNKSTATE_CHANGED)) == 0)
5029                         break;
5030         }
5031
5032         mac_status = tr32(MAC_STATUS);
5033         if ((mac_status & MAC_STATUS_PCS_SYNCED) == 0) {
5034                 current_link_up = 0;
5035                 if (tp->link_config.autoneg == AUTONEG_ENABLE &&
5036                     tp->serdes_counter == 0) {
5037                         tw32_f(MAC_MODE, (tp->mac_mode |
5038                                           MAC_MODE_SEND_CONFIGS));
5039                         udelay(1);
5040                         tw32_f(MAC_MODE, tp->mac_mode);
5041                 }
5042         }
5043
5044         if (current_link_up == 1) {
5045                 tp->link_config.active_speed = SPEED_1000;
5046                 tp->link_config.active_duplex = DUPLEX_FULL;
5047                 tw32(MAC_LED_CTRL, (tp->led_ctrl |
5048                                     LED_CTRL_LNKLED_OVERRIDE |
5049                                     LED_CTRL_1000MBPS_ON));
5050         } else {
5051                 tp->link_config.active_speed = SPEED_UNKNOWN;
5052                 tp->link_config.active_duplex = DUPLEX_UNKNOWN;
5053                 tw32(MAC_LED_CTRL, (tp->led_ctrl |
5054                                     LED_CTRL_LNKLED_OVERRIDE |
5055                                     LED_CTRL_TRAFFIC_OVERRIDE));
5056         }
5057
5058         if (current_link_up != netif_carrier_ok(tp->dev)) {
5059                 if (current_link_up)
5060                         netif_carrier_on(tp->dev);
5061                 else
5062                         netif_carrier_off(tp->dev);
5063                 tg3_link_report(tp);
5064         } else {
5065                 u32 now_pause_cfg = tp->link_config.active_flowctrl;
5066                 if (orig_pause_cfg != now_pause_cfg ||
5067                     orig_active_speed != tp->link_config.active_speed ||
5068                     orig_active_duplex != tp->link_config.active_duplex)
5069                         tg3_link_report(tp);
5070         }
5071
5072         return 0;
5073 }
5074
5075 static int tg3_setup_fiber_mii_phy(struct tg3 *tp, int force_reset)
5076 {
5077         int current_link_up, err = 0;
5078         u32 bmsr, bmcr;
5079         u16 current_speed;
5080         u8 current_duplex;
5081         u32 local_adv, remote_adv;
5082
5083         tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
5084         tw32_f(MAC_MODE, tp->mac_mode);
5085         udelay(40);
5086
5087         tw32(MAC_EVENT, 0);
5088
5089         tw32_f(MAC_STATUS,
5090              (MAC_STATUS_SYNC_CHANGED |
5091               MAC_STATUS_CFG_CHANGED |
5092               MAC_STATUS_MI_COMPLETION |
5093               MAC_STATUS_LNKSTATE_CHANGED));
5094         udelay(40);
5095
5096         if (force_reset)
5097                 tg3_phy_reset(tp);
5098
5099         current_link_up = 0;
5100         current_speed = SPEED_UNKNOWN;
5101         current_duplex = DUPLEX_UNKNOWN;
5102         tp->link_config.rmt_adv = 0;
5103
5104         err |= tg3_readphy(tp, MII_BMSR, &bmsr);
5105         err |= tg3_readphy(tp, MII_BMSR, &bmsr);
5106         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) {
5107                 if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
5108                         bmsr |= BMSR_LSTATUS;
5109                 else
5110                         bmsr &= ~BMSR_LSTATUS;
5111         }
5112
5113         err |= tg3_readphy(tp, MII_BMCR, &bmcr);
5114
5115         if ((tp->link_config.autoneg == AUTONEG_ENABLE) && !force_reset &&
5116             (tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT)) {
5117                 /* do nothing, just check for link up at the end */
5118         } else if (tp->link_config.autoneg == AUTONEG_ENABLE) {
5119                 u32 adv, newadv;
5120
5121                 err |= tg3_readphy(tp, MII_ADVERTISE, &adv);
5122                 newadv = adv & ~(ADVERTISE_1000XFULL | ADVERTISE_1000XHALF |
5123                                  ADVERTISE_1000XPAUSE |
5124                                  ADVERTISE_1000XPSE_ASYM |
5125                                  ADVERTISE_SLCT);
5126
5127                 newadv |= tg3_advert_flowctrl_1000X(tp->link_config.flowctrl);
5128                 newadv |= ethtool_adv_to_mii_adv_x(tp->link_config.advertising);
5129
5130                 if ((newadv != adv) || !(bmcr & BMCR_ANENABLE)) {
5131                         tg3_writephy(tp, MII_ADVERTISE, newadv);
5132                         bmcr |= BMCR_ANENABLE | BMCR_ANRESTART;
5133                         tg3_writephy(tp, MII_BMCR, bmcr);
5134
5135                         tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
5136                         tp->serdes_counter = SERDES_AN_TIMEOUT_5714S;
5137                         tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
5138
5139                         return err;
5140                 }
5141         } else {
5142                 u32 new_bmcr;
5143
5144                 bmcr &= ~BMCR_SPEED1000;
5145                 new_bmcr = bmcr & ~(BMCR_ANENABLE | BMCR_FULLDPLX);
5146
5147                 if (tp->link_config.duplex == DUPLEX_FULL)
5148                         new_bmcr |= BMCR_FULLDPLX;
5149
5150                 if (new_bmcr != bmcr) {
5151                         /* BMCR_SPEED1000 is a reserved bit that needs
5152                          * to be set on write.
5153                          */
5154                         new_bmcr |= BMCR_SPEED1000;
5155
5156                         /* Force a linkdown */
5157                         if (netif_carrier_ok(tp->dev)) {
5158                                 u32 adv;
5159
5160                                 err |= tg3_readphy(tp, MII_ADVERTISE, &adv);
5161                                 adv &= ~(ADVERTISE_1000XFULL |
5162                                          ADVERTISE_1000XHALF |
5163                                          ADVERTISE_SLCT);
5164                                 tg3_writephy(tp, MII_ADVERTISE, adv);
5165                                 tg3_writephy(tp, MII_BMCR, bmcr |
5166                                                            BMCR_ANRESTART |
5167                                                            BMCR_ANENABLE);
5168                                 udelay(10);
5169                                 netif_carrier_off(tp->dev);
5170                         }
5171                         tg3_writephy(tp, MII_BMCR, new_bmcr);
5172                         bmcr = new_bmcr;
5173                         err |= tg3_readphy(tp, MII_BMSR, &bmsr);
5174                         err |= tg3_readphy(tp, MII_BMSR, &bmsr);
5175                         if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
5176                             ASIC_REV_5714) {
5177                                 if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
5178                                         bmsr |= BMSR_LSTATUS;
5179                                 else
5180                                         bmsr &= ~BMSR_LSTATUS;
5181                         }
5182                         tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
5183                 }
5184         }
5185
5186         if (bmsr & BMSR_LSTATUS) {
5187                 current_speed = SPEED_1000;
5188                 current_link_up = 1;
5189                 if (bmcr & BMCR_FULLDPLX)
5190                         current_duplex = DUPLEX_FULL;
5191                 else
5192                         current_duplex = DUPLEX_HALF;
5193
5194                 local_adv = 0;
5195                 remote_adv = 0;
5196
5197                 if (bmcr & BMCR_ANENABLE) {
5198                         u32 common;
5199
5200                         err |= tg3_readphy(tp, MII_ADVERTISE, &local_adv);
5201                         err |= tg3_readphy(tp, MII_LPA, &remote_adv);
5202                         common = local_adv & remote_adv;
5203                         if (common & (ADVERTISE_1000XHALF |
5204                                       ADVERTISE_1000XFULL)) {
5205                                 if (common & ADVERTISE_1000XFULL)
5206                                         current_duplex = DUPLEX_FULL;
5207                                 else
5208                                         current_duplex = DUPLEX_HALF;
5209
5210                                 tp->link_config.rmt_adv =
5211                                            mii_adv_to_ethtool_adv_x(remote_adv);
5212                         } else if (!tg3_flag(tp, 5780_CLASS)) {
5213                                 /* Link is up via parallel detect */
5214                         } else {
5215                                 current_link_up = 0;
5216                         }
5217                 }
5218         }
5219
5220         if (current_link_up == 1 && current_duplex == DUPLEX_FULL)
5221                 tg3_setup_flow_control(tp, local_adv, remote_adv);
5222
5223         tp->mac_mode &= ~MAC_MODE_HALF_DUPLEX;
5224         if (tp->link_config.active_duplex == DUPLEX_HALF)
5225                 tp->mac_mode |= MAC_MODE_HALF_DUPLEX;
5226
5227         tw32_f(MAC_MODE, tp->mac_mode);
5228         udelay(40);
5229
5230         tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
5231
5232         tp->link_config.active_speed = current_speed;
5233         tp->link_config.active_duplex = current_duplex;
5234
5235         if (current_link_up != netif_carrier_ok(tp->dev)) {
5236                 if (current_link_up)
5237                         netif_carrier_on(tp->dev);
5238                 else {
5239                         netif_carrier_off(tp->dev);
5240                         tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
5241                 }
5242                 tg3_link_report(tp);
5243         }
5244         return err;
5245 }
5246
5247 static void tg3_serdes_parallel_detect(struct tg3 *tp)
5248 {
5249         if (tp->serdes_counter) {
5250                 /* Give autoneg time to complete. */
5251                 tp->serdes_counter--;
5252                 return;
5253         }
5254
5255         if (!netif_carrier_ok(tp->dev) &&
5256             (tp->link_config.autoneg == AUTONEG_ENABLE)) {
5257                 u32 bmcr;
5258
5259                 tg3_readphy(tp, MII_BMCR, &bmcr);
5260                 if (bmcr & BMCR_ANENABLE) {
5261                         u32 phy1, phy2;
5262
5263                         /* Select shadow register 0x1f */
5264                         tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x7c00);
5265                         tg3_readphy(tp, MII_TG3_MISC_SHDW, &phy1);
5266
5267                         /* Select expansion interrupt status register */
5268                         tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
5269                                          MII_TG3_DSP_EXP1_INT_STAT);
5270                         tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &phy2);
5271                         tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &phy2);
5272
5273                         if ((phy1 & 0x10) && !(phy2 & 0x20)) {
5274                                 /* We have signal detect and not receiving
5275                                  * config code words, link is up by parallel
5276                                  * detection.
5277                                  */
5278
5279                                 bmcr &= ~BMCR_ANENABLE;
5280                                 bmcr |= BMCR_SPEED1000 | BMCR_FULLDPLX;
5281                                 tg3_writephy(tp, MII_BMCR, bmcr);
5282                                 tp->phy_flags |= TG3_PHYFLG_PARALLEL_DETECT;
5283                         }
5284                 }
5285         } else if (netif_carrier_ok(tp->dev) &&
5286                    (tp->link_config.autoneg == AUTONEG_ENABLE) &&
5287                    (tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT)) {
5288                 u32 phy2;
5289
5290                 /* Select expansion interrupt status register */
5291                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
5292                                  MII_TG3_DSP_EXP1_INT_STAT);
5293                 tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &phy2);
5294                 if (phy2 & 0x20) {
5295                         u32 bmcr;
5296
5297                         /* Config code words received, turn on autoneg. */
5298                         tg3_readphy(tp, MII_BMCR, &bmcr);
5299                         tg3_writephy(tp, MII_BMCR, bmcr | BMCR_ANENABLE);
5300
5301                         tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
5302
5303                 }
5304         }
5305 }
5306
5307 static int tg3_setup_phy(struct tg3 *tp, int force_reset)
5308 {
5309         u32 val;
5310         int err;
5311
5312         if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
5313                 err = tg3_setup_fiber_phy(tp, force_reset);
5314         else if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
5315                 err = tg3_setup_fiber_mii_phy(tp, force_reset);
5316         else
5317                 err = tg3_setup_copper_phy(tp, force_reset);
5318
5319         if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5784_AX) {
5320                 u32 scale;
5321
5322                 val = tr32(TG3_CPMU_CLCK_STAT) & CPMU_CLCK_STAT_MAC_CLCK_MASK;
5323                 if (val == CPMU_CLCK_STAT_MAC_CLCK_62_5)
5324                         scale = 65;
5325                 else if (val == CPMU_CLCK_STAT_MAC_CLCK_6_25)
5326                         scale = 6;
5327                 else
5328                         scale = 12;
5329
5330                 val = tr32(GRC_MISC_CFG) & ~GRC_MISC_CFG_PRESCALAR_MASK;
5331                 val |= (scale << GRC_MISC_CFG_PRESCALAR_SHIFT);
5332                 tw32(GRC_MISC_CFG, val);
5333         }
5334
5335         val = (2 << TX_LENGTHS_IPG_CRS_SHIFT) |
5336               (6 << TX_LENGTHS_IPG_SHIFT);
5337         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720)
5338                 val |= tr32(MAC_TX_LENGTHS) &
5339                        (TX_LENGTHS_JMB_FRM_LEN_MSK |
5340                         TX_LENGTHS_CNT_DWN_VAL_MSK);
5341
5342         if (tp->link_config.active_speed == SPEED_1000 &&
5343             tp->link_config.active_duplex == DUPLEX_HALF)
5344                 tw32(MAC_TX_LENGTHS, val |
5345                      (0xff << TX_LENGTHS_SLOT_TIME_SHIFT));
5346         else
5347                 tw32(MAC_TX_LENGTHS, val |
5348                      (32 << TX_LENGTHS_SLOT_TIME_SHIFT));
5349
5350         if (!tg3_flag(tp, 5705_PLUS)) {
5351                 if (netif_carrier_ok(tp->dev)) {
5352                         tw32(HOSTCC_STAT_COAL_TICKS,
5353                              tp->coal.stats_block_coalesce_usecs);
5354                 } else {
5355                         tw32(HOSTCC_STAT_COAL_TICKS, 0);
5356                 }
5357         }
5358
5359         if (tg3_flag(tp, ASPM_WORKAROUND)) {
5360                 val = tr32(PCIE_PWR_MGMT_THRESH);
5361                 if (!netif_carrier_ok(tp->dev))
5362                         val = (val & ~PCIE_PWR_MGMT_L1_THRESH_MSK) |
5363                               tp->pwrmgmt_thresh;
5364                 else
5365                         val |= PCIE_PWR_MGMT_L1_THRESH_MSK;
5366                 tw32(PCIE_PWR_MGMT_THRESH, val);
5367         }
5368
5369         return err;
5370 }
5371
5372 static inline int tg3_irq_sync(struct tg3 *tp)
5373 {
5374         return tp->irq_sync;
5375 }
5376
5377 static inline void tg3_rd32_loop(struct tg3 *tp, u32 *dst, u32 off, u32 len)
5378 {
5379         int i;
5380
5381         dst = (u32 *)((u8 *)dst + off);
5382         for (i = 0; i < len; i += sizeof(u32))
5383                 *dst++ = tr32(off + i);
5384 }
5385
5386 static void tg3_dump_legacy_regs(struct tg3 *tp, u32 *regs)
5387 {
5388         tg3_rd32_loop(tp, regs, TG3PCI_VENDOR, 0xb0);
5389         tg3_rd32_loop(tp, regs, MAILBOX_INTERRUPT_0, 0x200);
5390         tg3_rd32_loop(tp, regs, MAC_MODE, 0x4f0);
5391         tg3_rd32_loop(tp, regs, SNDDATAI_MODE, 0xe0);
5392         tg3_rd32_loop(tp, regs, SNDDATAC_MODE, 0x04);
5393         tg3_rd32_loop(tp, regs, SNDBDS_MODE, 0x80);
5394         tg3_rd32_loop(tp, regs, SNDBDI_MODE, 0x48);
5395         tg3_rd32_loop(tp, regs, SNDBDC_MODE, 0x04);
5396         tg3_rd32_loop(tp, regs, RCVLPC_MODE, 0x20);
5397         tg3_rd32_loop(tp, regs, RCVLPC_SELLST_BASE, 0x15c);
5398         tg3_rd32_loop(tp, regs, RCVDBDI_MODE, 0x0c);
5399         tg3_rd32_loop(tp, regs, RCVDBDI_JUMBO_BD, 0x3c);
5400         tg3_rd32_loop(tp, regs, RCVDBDI_BD_PROD_IDX_0, 0x44);
5401         tg3_rd32_loop(tp, regs, RCVDCC_MODE, 0x04);
5402         tg3_rd32_loop(tp, regs, RCVBDI_MODE, 0x20);
5403         tg3_rd32_loop(tp, regs, RCVCC_MODE, 0x14);
5404         tg3_rd32_loop(tp, regs, RCVLSC_MODE, 0x08);
5405         tg3_rd32_loop(tp, regs, MBFREE_MODE, 0x08);
5406         tg3_rd32_loop(tp, regs, HOSTCC_MODE, 0x100);
5407
5408         if (tg3_flag(tp, SUPPORT_MSIX))
5409                 tg3_rd32_loop(tp, regs, HOSTCC_RXCOL_TICKS_VEC1, 0x180);
5410
5411         tg3_rd32_loop(tp, regs, MEMARB_MODE, 0x10);
5412         tg3_rd32_loop(tp, regs, BUFMGR_MODE, 0x58);
5413         tg3_rd32_loop(tp, regs, RDMAC_MODE, 0x08);
5414         tg3_rd32_loop(tp, regs, WDMAC_MODE, 0x08);
5415         tg3_rd32_loop(tp, regs, RX_CPU_MODE, 0x04);
5416         tg3_rd32_loop(tp, regs, RX_CPU_STATE, 0x04);
5417         tg3_rd32_loop(tp, regs, RX_CPU_PGMCTR, 0x04);
5418         tg3_rd32_loop(tp, regs, RX_CPU_HWBKPT, 0x04);
5419
5420         if (!tg3_flag(tp, 5705_PLUS)) {
5421                 tg3_rd32_loop(tp, regs, TX_CPU_MODE, 0x04);
5422                 tg3_rd32_loop(tp, regs, TX_CPU_STATE, 0x04);
5423                 tg3_rd32_loop(tp, regs, TX_CPU_PGMCTR, 0x04);
5424         }
5425
5426         tg3_rd32_loop(tp, regs, GRCMBOX_INTERRUPT_0, 0x110);
5427         tg3_rd32_loop(tp, regs, FTQ_RESET, 0x120);
5428         tg3_rd32_loop(tp, regs, MSGINT_MODE, 0x0c);
5429         tg3_rd32_loop(tp, regs, DMAC_MODE, 0x04);
5430         tg3_rd32_loop(tp, regs, GRC_MODE, 0x4c);
5431
5432         if (tg3_flag(tp, NVRAM))
5433                 tg3_rd32_loop(tp, regs, NVRAM_CMD, 0x24);
5434 }
5435
5436 static void tg3_dump_state(struct tg3 *tp)
5437 {
5438         int i;
5439         u32 *regs;
5440
5441         regs = kzalloc(TG3_REG_BLK_SIZE, GFP_ATOMIC);
5442         if (!regs) {
5443                 netdev_err(tp->dev, "Failed allocating register dump buffer\n");
5444                 return;
5445         }
5446
5447         if (tg3_flag(tp, PCI_EXPRESS)) {
5448                 /* Read up to but not including private PCI registers */
5449                 for (i = 0; i < TG3_PCIE_TLDLPL_PORT; i += sizeof(u32))
5450                         regs[i / sizeof(u32)] = tr32(i);
5451         } else
5452                 tg3_dump_legacy_regs(tp, regs);
5453
5454         for (i = 0; i < TG3_REG_BLK_SIZE / sizeof(u32); i += 4) {
5455                 if (!regs[i + 0] && !regs[i + 1] &&
5456                     !regs[i + 2] && !regs[i + 3])
5457                         continue;
5458
5459                 netdev_err(tp->dev, "0x%08x: 0x%08x, 0x%08x, 0x%08x, 0x%08x\n",
5460                            i * 4,
5461                            regs[i + 0], regs[i + 1], regs[i + 2], regs[i + 3]);
5462         }
5463
5464         kfree(regs);
5465
5466         for (i = 0; i < tp->irq_cnt; i++) {
5467                 struct tg3_napi *tnapi = &tp->napi[i];
5468
5469                 /* SW status block */
5470                 netdev_err(tp->dev,
5471                          "%d: Host status block [%08x:%08x:(%04x:%04x:%04x):(%04x:%04x)]\n",
5472                            i,
5473                            tnapi->hw_status->status,
5474                            tnapi->hw_status->status_tag,
5475                            tnapi->hw_status->rx_jumbo_consumer,
5476                            tnapi->hw_status->rx_consumer,
5477                            tnapi->hw_status->rx_mini_consumer,
5478                            tnapi->hw_status->idx[0].rx_producer,
5479                            tnapi->hw_status->idx[0].tx_consumer);
5480
5481                 netdev_err(tp->dev,
5482                 "%d: NAPI info [%08x:%08x:(%04x:%04x:%04x):%04x:(%04x:%04x:%04x:%04x)]\n",
5483                            i,
5484                            tnapi->last_tag, tnapi->last_irq_tag,
5485                            tnapi->tx_prod, tnapi->tx_cons, tnapi->tx_pending,
5486                            tnapi->rx_rcb_ptr,
5487                            tnapi->prodring.rx_std_prod_idx,
5488                            tnapi->prodring.rx_std_cons_idx,
5489                            tnapi->prodring.rx_jmb_prod_idx,
5490                            tnapi->prodring.rx_jmb_cons_idx);
5491         }
5492 }
5493
5494 /* This is called whenever we suspect that the system chipset is re-
5495  * ordering the sequence of MMIO to the tx send mailbox. The symptom
5496  * is bogus tx completions. We try to recover by setting the
5497  * TG3_FLAG_MBOX_WRITE_REORDER flag and resetting the chip later
5498  * in the workqueue.
5499  */
5500 static void tg3_tx_recover(struct tg3 *tp)
5501 {
5502         BUG_ON(tg3_flag(tp, MBOX_WRITE_REORDER) ||
5503                tp->write32_tx_mbox == tg3_write_indirect_mbox);
5504
5505         netdev_warn(tp->dev,
5506                     "The system may be re-ordering memory-mapped I/O "
5507                     "cycles to the network device, attempting to recover. "
5508                     "Please report the problem to the driver maintainer "
5509                     "and include system chipset information.\n");
5510
5511         spin_lock(&tp->lock);
5512         tg3_flag_set(tp, TX_RECOVERY_PENDING);
5513         spin_unlock(&tp->lock);
5514 }
5515
5516 static inline u32 tg3_tx_avail(struct tg3_napi *tnapi)
5517 {
5518         /* Tell compiler to fetch tx indices from memory. */
5519         barrier();
5520         return tnapi->tx_pending -
5521                ((tnapi->tx_prod - tnapi->tx_cons) & (TG3_TX_RING_SIZE - 1));
5522 }
5523
5524 /* Tigon3 never reports partial packet sends.  So we do not
5525  * need special logic to handle SKBs that have not had all
5526  * of their frags sent yet, like SunGEM does.
5527  */
5528 static void tg3_tx(struct tg3_napi *tnapi)
5529 {
5530         struct tg3 *tp = tnapi->tp;
5531         u32 hw_idx = tnapi->hw_status->idx[0].tx_consumer;
5532         u32 sw_idx = tnapi->tx_cons;
5533         struct netdev_queue *txq;
5534         int index = tnapi - tp->napi;
5535         unsigned int pkts_compl = 0, bytes_compl = 0;
5536
5537         if (tg3_flag(tp, ENABLE_TSS))
5538                 index--;
5539
5540         txq = netdev_get_tx_queue(tp->dev, index);
5541
5542         while (sw_idx != hw_idx) {
5543                 struct tg3_tx_ring_info *ri = &tnapi->tx_buffers[sw_idx];
5544                 struct sk_buff *skb = ri->skb;
5545                 int i, tx_bug = 0;
5546
5547                 if (unlikely(skb == NULL)) {
5548                         tg3_tx_recover(tp);
5549                         return;
5550                 }
5551
5552                 pci_unmap_single(tp->pdev,
5553                                  dma_unmap_addr(ri, mapping),
5554                                  skb_headlen(skb),
5555                                  PCI_DMA_TODEVICE);
5556
5557                 ri->skb = NULL;
5558
5559                 while (ri->fragmented) {
5560                         ri->fragmented = false;
5561                         sw_idx = NEXT_TX(sw_idx);
5562                         ri = &tnapi->tx_buffers[sw_idx];
5563                 }
5564
5565                 sw_idx = NEXT_TX(sw_idx);
5566
5567                 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
5568                         ri = &tnapi->tx_buffers[sw_idx];
5569                         if (unlikely(ri->skb != NULL || sw_idx == hw_idx))
5570                                 tx_bug = 1;
5571
5572                         pci_unmap_page(tp->pdev,
5573                                        dma_unmap_addr(ri, mapping),
5574                                        skb_frag_size(&skb_shinfo(skb)->frags[i]),
5575                                        PCI_DMA_TODEVICE);
5576
5577                         while (ri->fragmented) {
5578                                 ri->fragmented = false;
5579                                 sw_idx = NEXT_TX(sw_idx);
5580                                 ri = &tnapi->tx_buffers[sw_idx];
5581                         }
5582
5583                         sw_idx = NEXT_TX(sw_idx);
5584                 }
5585
5586                 pkts_compl++;
5587                 bytes_compl += skb->len;
5588
5589                 dev_kfree_skb(skb);
5590
5591                 if (unlikely(tx_bug)) {
5592                         tg3_tx_recover(tp);
5593                         return;
5594                 }
5595         }
5596
5597         netdev_tx_completed_queue(txq, pkts_compl, bytes_compl);
5598
5599         tnapi->tx_cons = sw_idx;
5600
5601         /* Need to make the tx_cons update visible to tg3_start_xmit()
5602          * before checking for netif_queue_stopped().  Without the
5603          * memory barrier, there is a small possibility that tg3_start_xmit()
5604          * will miss it and cause the queue to be stopped forever.
5605          */
5606         smp_mb();
5607
5608         if (unlikely(netif_tx_queue_stopped(txq) &&
5609                      (tg3_tx_avail(tnapi) > TG3_TX_WAKEUP_THRESH(tnapi)))) {
5610                 __netif_tx_lock(txq, smp_processor_id());
5611                 if (netif_tx_queue_stopped(txq) &&
5612                     (tg3_tx_avail(tnapi) > TG3_TX_WAKEUP_THRESH(tnapi)))
5613                         netif_tx_wake_queue(txq);
5614                 __netif_tx_unlock(txq);
5615         }
5616 }
5617
5618 static void tg3_rx_data_free(struct tg3 *tp, struct ring_info *ri, u32 map_sz)
5619 {
5620         if (!ri->data)
5621                 return;
5622
5623         pci_unmap_single(tp->pdev, dma_unmap_addr(ri, mapping),
5624                          map_sz, PCI_DMA_FROMDEVICE);
5625         kfree(ri->data);
5626         ri->data = NULL;
5627 }
5628
5629 /* Returns size of skb allocated or < 0 on error.
5630  *
5631  * We only need to fill in the address because the other members
5632  * of the RX descriptor are invariant, see tg3_init_rings.
5633  *
5634  * Note the purposeful assymetry of cpu vs. chip accesses.  For
5635  * posting buffers we only dirty the first cache line of the RX
5636  * descriptor (containing the address).  Whereas for the RX status
5637  * buffers the cpu only reads the last cacheline of the RX descriptor
5638  * (to fetch the error flags, vlan tag, checksum, and opaque cookie).
5639  */
5640 static int tg3_alloc_rx_data(struct tg3 *tp, struct tg3_rx_prodring_set *tpr,
5641                             u32 opaque_key, u32 dest_idx_unmasked)
5642 {
5643         struct tg3_rx_buffer_desc *desc;
5644         struct ring_info *map;
5645         u8 *data;
5646         dma_addr_t mapping;
5647         int skb_size, data_size, dest_idx;
5648
5649         switch (opaque_key) {
5650         case RXD_OPAQUE_RING_STD:
5651                 dest_idx = dest_idx_unmasked & tp->rx_std_ring_mask;
5652                 desc = &tpr->rx_std[dest_idx];
5653                 map = &tpr->rx_std_buffers[dest_idx];
5654                 data_size = tp->rx_pkt_map_sz;
5655                 break;
5656
5657         case RXD_OPAQUE_RING_JUMBO:
5658                 dest_idx = dest_idx_unmasked & tp->rx_jmb_ring_mask;
5659                 desc = &tpr->rx_jmb[dest_idx].std;
5660                 map = &tpr->rx_jmb_buffers[dest_idx];
5661                 data_size = TG3_RX_JMB_MAP_SZ;
5662                 break;
5663
5664         default:
5665                 return -EINVAL;
5666         }
5667
5668         /* Do not overwrite any of the map or rp information
5669          * until we are sure we can commit to a new buffer.
5670          *
5671          * Callers depend upon this behavior and assume that
5672          * we leave everything unchanged if we fail.
5673          */
5674         skb_size = SKB_DATA_ALIGN(data_size + TG3_RX_OFFSET(tp)) +
5675                    SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
5676         data = kmalloc(skb_size, GFP_ATOMIC);
5677         if (!data)
5678                 return -ENOMEM;
5679
5680         mapping = pci_map_single(tp->pdev,
5681                                  data + TG3_RX_OFFSET(tp),
5682                                  data_size,
5683                                  PCI_DMA_FROMDEVICE);
5684         if (pci_dma_mapping_error(tp->pdev, mapping)) {
5685                 kfree(data);
5686                 return -EIO;
5687         }
5688
5689         map->data = data;
5690         dma_unmap_addr_set(map, mapping, mapping);
5691
5692         desc->addr_hi = ((u64)mapping >> 32);
5693         desc->addr_lo = ((u64)mapping & 0xffffffff);
5694
5695         return data_size;
5696 }
5697
5698 /* We only need to move over in the address because the other
5699  * members of the RX descriptor are invariant.  See notes above
5700  * tg3_alloc_rx_data for full details.
5701  */
5702 static void tg3_recycle_rx(struct tg3_napi *tnapi,
5703                            struct tg3_rx_prodring_set *dpr,
5704                            u32 opaque_key, int src_idx,
5705                            u32 dest_idx_unmasked)
5706 {
5707         struct tg3 *tp = tnapi->tp;
5708         struct tg3_rx_buffer_desc *src_desc, *dest_desc;
5709         struct ring_info *src_map, *dest_map;
5710         struct tg3_rx_prodring_set *spr = &tp->napi[0].prodring;
5711         int dest_idx;
5712
5713         switch (opaque_key) {
5714         case RXD_OPAQUE_RING_STD:
5715                 dest_idx = dest_idx_unmasked & tp->rx_std_ring_mask;
5716                 dest_desc = &dpr->rx_std[dest_idx];
5717                 dest_map = &dpr->rx_std_buffers[dest_idx];
5718                 src_desc = &spr->rx_std[src_idx];
5719                 src_map = &spr->rx_std_buffers[src_idx];
5720                 break;
5721
5722         case RXD_OPAQUE_RING_JUMBO:
5723                 dest_idx = dest_idx_unmasked & tp->rx_jmb_ring_mask;
5724                 dest_desc = &dpr->rx_jmb[dest_idx].std;
5725                 dest_map = &dpr->rx_jmb_buffers[dest_idx];
5726                 src_desc = &spr->rx_jmb[src_idx].std;
5727                 src_map = &spr->rx_jmb_buffers[src_idx];
5728                 break;
5729
5730         default:
5731                 return;
5732         }
5733
5734         dest_map->data = src_map->data;
5735         dma_unmap_addr_set(dest_map, mapping,
5736                            dma_unmap_addr(src_map, mapping));
5737         dest_desc->addr_hi = src_desc->addr_hi;
5738         dest_desc->addr_lo = src_desc->addr_lo;
5739
5740         /* Ensure that the update to the skb happens after the physical
5741          * addresses have been transferred to the new BD location.
5742          */
5743         smp_wmb();
5744
5745         src_map->data = NULL;
5746 }
5747
5748 /* The RX ring scheme is composed of multiple rings which post fresh
5749  * buffers to the chip, and one special ring the chip uses to report
5750  * status back to the host.
5751  *
5752  * The special ring reports the status of received packets to the
5753  * host.  The chip does not write into the original descriptor the
5754  * RX buffer was obtained from.  The chip simply takes the original
5755  * descriptor as provided by the host, updates the status and length
5756  * field, then writes this into the next status ring entry.
5757  *
5758  * Each ring the host uses to post buffers to the chip is described
5759  * by a TG3_BDINFO entry in the chips SRAM area.  When a packet arrives,
5760  * it is first placed into the on-chip ram.  When the packet's length
5761  * is known, it walks down the TG3_BDINFO entries to select the ring.
5762  * Each TG3_BDINFO specifies a MAXLEN field and the first TG3_BDINFO
5763  * which is within the range of the new packet's length is chosen.
5764  *
5765  * The "separate ring for rx status" scheme may sound queer, but it makes
5766  * sense from a cache coherency perspective.  If only the host writes
5767  * to the buffer post rings, and only the chip writes to the rx status
5768  * rings, then cache lines never move beyond shared-modified state.
5769  * If both the host and chip were to write into the same ring, cache line
5770  * eviction could occur since both entities want it in an exclusive state.
5771  */
5772 static int tg3_rx(struct tg3_napi *tnapi, int budget)
5773 {
5774         struct tg3 *tp = tnapi->tp;
5775         u32 work_mask, rx_std_posted = 0;
5776         u32 std_prod_idx, jmb_prod_idx;
5777         u32 sw_idx = tnapi->rx_rcb_ptr;
5778         u16 hw_idx;
5779         int received;
5780         struct tg3_rx_prodring_set *tpr = &tnapi->prodring;
5781
5782         hw_idx = *(tnapi->rx_rcb_prod_idx);
5783         /*
5784          * We need to order the read of hw_idx and the read of
5785          * the opaque cookie.
5786          */
5787         rmb();
5788         work_mask = 0;
5789         received = 0;
5790         std_prod_idx = tpr->rx_std_prod_idx;
5791         jmb_prod_idx = tpr->rx_jmb_prod_idx;
5792         while (sw_idx != hw_idx && budget > 0) {
5793                 struct ring_info *ri;
5794                 struct tg3_rx_buffer_desc *desc = &tnapi->rx_rcb[sw_idx];
5795                 unsigned int len;
5796                 struct sk_buff *skb;
5797                 dma_addr_t dma_addr;
5798                 u32 opaque_key, desc_idx, *post_ptr;
5799                 u8 *data;
5800
5801                 desc_idx = desc->opaque & RXD_OPAQUE_INDEX_MASK;
5802                 opaque_key = desc->opaque & RXD_OPAQUE_RING_MASK;
5803                 if (opaque_key == RXD_OPAQUE_RING_STD) {
5804                         ri = &tp->napi[0].prodring.rx_std_buffers[desc_idx];
5805                         dma_addr = dma_unmap_addr(ri, mapping);
5806                         data = ri->data;
5807                         post_ptr = &std_prod_idx;
5808                         rx_std_posted++;
5809                 } else if (opaque_key == RXD_OPAQUE_RING_JUMBO) {
5810                         ri = &tp->napi[0].prodring.rx_jmb_buffers[desc_idx];
5811                         dma_addr = dma_unmap_addr(ri, mapping);
5812                         data = ri->data;
5813                         post_ptr = &jmb_prod_idx;
5814                 } else
5815                         goto next_pkt_nopost;
5816
5817                 work_mask |= opaque_key;
5818
5819                 if ((desc->err_vlan & RXD_ERR_MASK) != 0 &&
5820                     (desc->err_vlan != RXD_ERR_ODD_NIBBLE_RCVD_MII)) {
5821                 drop_it:
5822                         tg3_recycle_rx(tnapi, tpr, opaque_key,
5823                                        desc_idx, *post_ptr);
5824                 drop_it_no_recycle:
5825                         /* Other statistics kept track of by card. */
5826                         tp->rx_dropped++;
5827                         goto next_pkt;
5828                 }
5829
5830                 prefetch(data + TG3_RX_OFFSET(tp));
5831                 len = ((desc->idx_len & RXD_LEN_MASK) >> RXD_LEN_SHIFT) -
5832                       ETH_FCS_LEN;
5833
5834                 if (len > TG3_RX_COPY_THRESH(tp)) {
5835                         int skb_size;
5836
5837                         skb_size = tg3_alloc_rx_data(tp, tpr, opaque_key,
5838                                                     *post_ptr);
5839                         if (skb_size < 0)
5840                                 goto drop_it;
5841
5842                         pci_unmap_single(tp->pdev, dma_addr, skb_size,
5843                                          PCI_DMA_FROMDEVICE);
5844
5845                         skb = build_skb(data);
5846                         if (!skb) {
5847                                 kfree(data);
5848                                 goto drop_it_no_recycle;
5849                         }
5850                         skb_reserve(skb, TG3_RX_OFFSET(tp));
5851                         /* Ensure that the update to the data happens
5852                          * after the usage of