tg3: Workaround tagged status update bug
[linux-2.6.git] / drivers / net / tg3.c
1 /*
2  * tg3.c: Broadcom Tigon3 ethernet driver.
3  *
4  * Copyright (C) 2001, 2002, 2003, 2004 David S. Miller (davem@redhat.com)
5  * Copyright (C) 2001, 2002, 2003 Jeff Garzik (jgarzik@pobox.com)
6  * Copyright (C) 2004 Sun Microsystems Inc.
7  * Copyright (C) 2005-2011 Broadcom Corporation.
8  *
9  * Firmware is:
10  *      Derived from proprietary unpublished source code,
11  *      Copyright (C) 2000-2003 Broadcom Corporation.
12  *
13  *      Permission is hereby granted for the distribution of this firmware
14  *      data in hexadecimal or equivalent format, provided this copyright
15  *      notice is accompanying it.
16  */
17
18
19 #include <linux/module.h>
20 #include <linux/moduleparam.h>
21 #include <linux/stringify.h>
22 #include <linux/kernel.h>
23 #include <linux/types.h>
24 #include <linux/compiler.h>
25 #include <linux/slab.h>
26 #include <linux/delay.h>
27 #include <linux/in.h>
28 #include <linux/init.h>
29 #include <linux/interrupt.h>
30 #include <linux/ioport.h>
31 #include <linux/pci.h>
32 #include <linux/netdevice.h>
33 #include <linux/etherdevice.h>
34 #include <linux/skbuff.h>
35 #include <linux/ethtool.h>
36 #include <linux/mdio.h>
37 #include <linux/mii.h>
38 #include <linux/phy.h>
39 #include <linux/brcmphy.h>
40 #include <linux/if_vlan.h>
41 #include <linux/ip.h>
42 #include <linux/tcp.h>
43 #include <linux/workqueue.h>
44 #include <linux/prefetch.h>
45 #include <linux/dma-mapping.h>
46 #include <linux/firmware.h>
47
48 #include <net/checksum.h>
49 #include <net/ip.h>
50
51 #include <asm/system.h>
52 #include <linux/io.h>
53 #include <asm/byteorder.h>
54 #include <linux/uaccess.h>
55
56 #ifdef CONFIG_SPARC
57 #include <asm/idprom.h>
58 #include <asm/prom.h>
59 #endif
60
61 #define BAR_0   0
62 #define BAR_2   2
63
64 #include "tg3.h"
65
66 /* Functions & macros to verify TG3_FLAGS types */
67
68 static inline int _tg3_flag(enum TG3_FLAGS flag, unsigned long *bits)
69 {
70         return test_bit(flag, bits);
71 }
72
73 static inline void _tg3_flag_set(enum TG3_FLAGS flag, unsigned long *bits)
74 {
75         set_bit(flag, bits);
76 }
77
78 static inline void _tg3_flag_clear(enum TG3_FLAGS flag, unsigned long *bits)
79 {
80         clear_bit(flag, bits);
81 }
82
83 #define tg3_flag(tp, flag)                              \
84         _tg3_flag(TG3_FLAG_##flag, (tp)->tg3_flags)
85 #define tg3_flag_set(tp, flag)                          \
86         _tg3_flag_set(TG3_FLAG_##flag, (tp)->tg3_flags)
87 #define tg3_flag_clear(tp, flag)                        \
88         _tg3_flag_clear(TG3_FLAG_##flag, (tp)->tg3_flags)
89
90 #define DRV_MODULE_NAME         "tg3"
91 #define TG3_MAJ_NUM                     3
92 #define TG3_MIN_NUM                     119
93 #define DRV_MODULE_VERSION      \
94         __stringify(TG3_MAJ_NUM) "." __stringify(TG3_MIN_NUM)
95 #define DRV_MODULE_RELDATE      "May 18, 2011"
96
97 #define TG3_DEF_MAC_MODE        0
98 #define TG3_DEF_RX_MODE         0
99 #define TG3_DEF_TX_MODE         0
100 #define TG3_DEF_MSG_ENABLE        \
101         (NETIF_MSG_DRV          | \
102          NETIF_MSG_PROBE        | \
103          NETIF_MSG_LINK         | \
104          NETIF_MSG_TIMER        | \
105          NETIF_MSG_IFDOWN       | \
106          NETIF_MSG_IFUP         | \
107          NETIF_MSG_RX_ERR       | \
108          NETIF_MSG_TX_ERR)
109
110 /* length of time before we decide the hardware is borked,
111  * and dev->tx_timeout() should be called to fix the problem
112  */
113
114 #define TG3_TX_TIMEOUT                  (5 * HZ)
115
116 /* hardware minimum and maximum for a single frame's data payload */
117 #define TG3_MIN_MTU                     60
118 #define TG3_MAX_MTU(tp) \
119         (tg3_flag(tp, JUMBO_CAPABLE) ? 9000 : 1500)
120
121 /* These numbers seem to be hard coded in the NIC firmware somehow.
122  * You can't change the ring sizes, but you can change where you place
123  * them in the NIC onboard memory.
124  */
125 #define TG3_RX_STD_RING_SIZE(tp) \
126         (tg3_flag(tp, LRG_PROD_RING_CAP) ? \
127          TG3_RX_STD_MAX_SIZE_5717 : TG3_RX_STD_MAX_SIZE_5700)
128 #define TG3_DEF_RX_RING_PENDING         200
129 #define TG3_RX_JMB_RING_SIZE(tp) \
130         (tg3_flag(tp, LRG_PROD_RING_CAP) ? \
131          TG3_RX_JMB_MAX_SIZE_5717 : TG3_RX_JMB_MAX_SIZE_5700)
132 #define TG3_DEF_RX_JUMBO_RING_PENDING   100
133 #define TG3_RSS_INDIR_TBL_SIZE          128
134
135 /* Do not place this n-ring entries value into the tp struct itself,
136  * we really want to expose these constants to GCC so that modulo et
137  * al.  operations are done with shifts and masks instead of with
138  * hw multiply/modulo instructions.  Another solution would be to
139  * replace things like '% foo' with '& (foo - 1)'.
140  */
141
142 #define TG3_TX_RING_SIZE                512
143 #define TG3_DEF_TX_RING_PENDING         (TG3_TX_RING_SIZE - 1)
144
145 #define TG3_RX_STD_RING_BYTES(tp) \
146         (sizeof(struct tg3_rx_buffer_desc) * TG3_RX_STD_RING_SIZE(tp))
147 #define TG3_RX_JMB_RING_BYTES(tp) \
148         (sizeof(struct tg3_ext_rx_buffer_desc) * TG3_RX_JMB_RING_SIZE(tp))
149 #define TG3_RX_RCB_RING_BYTES(tp) \
150         (sizeof(struct tg3_rx_buffer_desc) * (tp->rx_ret_ring_mask + 1))
151 #define TG3_TX_RING_BYTES       (sizeof(struct tg3_tx_buffer_desc) * \
152                                  TG3_TX_RING_SIZE)
153 #define NEXT_TX(N)              (((N) + 1) & (TG3_TX_RING_SIZE - 1))
154
155 #define TG3_DMA_BYTE_ENAB               64
156
157 #define TG3_RX_STD_DMA_SZ               1536
158 #define TG3_RX_JMB_DMA_SZ               9046
159
160 #define TG3_RX_DMA_TO_MAP_SZ(x)         ((x) + TG3_DMA_BYTE_ENAB)
161
162 #define TG3_RX_STD_MAP_SZ               TG3_RX_DMA_TO_MAP_SZ(TG3_RX_STD_DMA_SZ)
163 #define TG3_RX_JMB_MAP_SZ               TG3_RX_DMA_TO_MAP_SZ(TG3_RX_JMB_DMA_SZ)
164
165 #define TG3_RX_STD_BUFF_RING_SIZE(tp) \
166         (sizeof(struct ring_info) * TG3_RX_STD_RING_SIZE(tp))
167
168 #define TG3_RX_JMB_BUFF_RING_SIZE(tp) \
169         (sizeof(struct ring_info) * TG3_RX_JMB_RING_SIZE(tp))
170
171 /* Due to a hardware bug, the 5701 can only DMA to memory addresses
172  * that are at least dword aligned when used in PCIX mode.  The driver
173  * works around this bug by double copying the packet.  This workaround
174  * is built into the normal double copy length check for efficiency.
175  *
176  * However, the double copy is only necessary on those architectures
177  * where unaligned memory accesses are inefficient.  For those architectures
178  * where unaligned memory accesses incur little penalty, we can reintegrate
179  * the 5701 in the normal rx path.  Doing so saves a device structure
180  * dereference by hardcoding the double copy threshold in place.
181  */
182 #define TG3_RX_COPY_THRESHOLD           256
183 #if NET_IP_ALIGN == 0 || defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS)
184         #define TG3_RX_COPY_THRESH(tp)  TG3_RX_COPY_THRESHOLD
185 #else
186         #define TG3_RX_COPY_THRESH(tp)  ((tp)->rx_copy_thresh)
187 #endif
188
189 /* minimum number of free TX descriptors required to wake up TX process */
190 #define TG3_TX_WAKEUP_THRESH(tnapi)             ((tnapi)->tx_pending / 4)
191
192 #define TG3_RAW_IP_ALIGN 2
193
194 #define TG3_FW_UPDATE_TIMEOUT_SEC       5
195
196 #define FIRMWARE_TG3            "tigon/tg3.bin"
197 #define FIRMWARE_TG3TSO         "tigon/tg3_tso.bin"
198 #define FIRMWARE_TG3TSO5        "tigon/tg3_tso5.bin"
199
200 static char version[] __devinitdata =
201         DRV_MODULE_NAME ".c:v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")";
202
203 MODULE_AUTHOR("David S. Miller (davem@redhat.com) and Jeff Garzik (jgarzik@pobox.com)");
204 MODULE_DESCRIPTION("Broadcom Tigon3 ethernet driver");
205 MODULE_LICENSE("GPL");
206 MODULE_VERSION(DRV_MODULE_VERSION);
207 MODULE_FIRMWARE(FIRMWARE_TG3);
208 MODULE_FIRMWARE(FIRMWARE_TG3TSO);
209 MODULE_FIRMWARE(FIRMWARE_TG3TSO5);
210
211 static int tg3_debug = -1;      /* -1 == use TG3_DEF_MSG_ENABLE as value */
212 module_param(tg3_debug, int, 0);
213 MODULE_PARM_DESC(tg3_debug, "Tigon3 bitmapped debugging message enable value");
214
215 static DEFINE_PCI_DEVICE_TABLE(tg3_pci_tbl) = {
216         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5700)},
217         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5701)},
218         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702)},
219         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703)},
220         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704)},
221         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702FE)},
222         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705)},
223         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705_2)},
224         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705M)},
225         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705M_2)},
226         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702X)},
227         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703X)},
228         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704S)},
229         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702A3)},
230         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703A3)},
231         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5782)},
232         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5788)},
233         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5789)},
234         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5901)},
235         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5901_2)},
236         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704S_2)},
237         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705F)},
238         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5721)},
239         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5722)},
240         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751)},
241         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751M)},
242         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751F)},
243         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5752)},
244         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5752M)},
245         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753)},
246         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753M)},
247         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753F)},
248         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5754)},
249         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5754M)},
250         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5755)},
251         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5755M)},
252         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5756)},
253         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5786)},
254         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787)},
255         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787M)},
256         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787F)},
257         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5714)},
258         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5714S)},
259         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5715)},
260         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5715S)},
261         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5780)},
262         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5780S)},
263         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5781)},
264         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5906)},
265         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5906M)},
266         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5784)},
267         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5764)},
268         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5723)},
269         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5761)},
270         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5761E)},
271         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5761S)},
272         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5761SE)},
273         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5785_G)},
274         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5785_F)},
275         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57780)},
276         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57760)},
277         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57790)},
278         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57788)},
279         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5717)},
280         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5718)},
281         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57781)},
282         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57785)},
283         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57761)},
284         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57765)},
285         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57791)},
286         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57795)},
287         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5719)},
288         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5720)},
289         {PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9DXX)},
290         {PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9MXX)},
291         {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1000)},
292         {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1001)},
293         {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1003)},
294         {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC9100)},
295         {PCI_DEVICE(PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_TIGON3)},
296         {PCI_DEVICE(0x10cf, 0x11a2)}, /* Fujitsu 1000base-SX with BCM5703SKHB */
297         {}
298 };
299
300 MODULE_DEVICE_TABLE(pci, tg3_pci_tbl);
301
302 static const struct {
303         const char string[ETH_GSTRING_LEN];
304 } ethtool_stats_keys[] = {
305         { "rx_octets" },
306         { "rx_fragments" },
307         { "rx_ucast_packets" },
308         { "rx_mcast_packets" },
309         { "rx_bcast_packets" },
310         { "rx_fcs_errors" },
311         { "rx_align_errors" },
312         { "rx_xon_pause_rcvd" },
313         { "rx_xoff_pause_rcvd" },
314         { "rx_mac_ctrl_rcvd" },
315         { "rx_xoff_entered" },
316         { "rx_frame_too_long_errors" },
317         { "rx_jabbers" },
318         { "rx_undersize_packets" },
319         { "rx_in_length_errors" },
320         { "rx_out_length_errors" },
321         { "rx_64_or_less_octet_packets" },
322         { "rx_65_to_127_octet_packets" },
323         { "rx_128_to_255_octet_packets" },
324         { "rx_256_to_511_octet_packets" },
325         { "rx_512_to_1023_octet_packets" },
326         { "rx_1024_to_1522_octet_packets" },
327         { "rx_1523_to_2047_octet_packets" },
328         { "rx_2048_to_4095_octet_packets" },
329         { "rx_4096_to_8191_octet_packets" },
330         { "rx_8192_to_9022_octet_packets" },
331
332         { "tx_octets" },
333         { "tx_collisions" },
334
335         { "tx_xon_sent" },
336         { "tx_xoff_sent" },
337         { "tx_flow_control" },
338         { "tx_mac_errors" },
339         { "tx_single_collisions" },
340         { "tx_mult_collisions" },
341         { "tx_deferred" },
342         { "tx_excessive_collisions" },
343         { "tx_late_collisions" },
344         { "tx_collide_2times" },
345         { "tx_collide_3times" },
346         { "tx_collide_4times" },
347         { "tx_collide_5times" },
348         { "tx_collide_6times" },
349         { "tx_collide_7times" },
350         { "tx_collide_8times" },
351         { "tx_collide_9times" },
352         { "tx_collide_10times" },
353         { "tx_collide_11times" },
354         { "tx_collide_12times" },
355         { "tx_collide_13times" },
356         { "tx_collide_14times" },
357         { "tx_collide_15times" },
358         { "tx_ucast_packets" },
359         { "tx_mcast_packets" },
360         { "tx_bcast_packets" },
361         { "tx_carrier_sense_errors" },
362         { "tx_discards" },
363         { "tx_errors" },
364
365         { "dma_writeq_full" },
366         { "dma_write_prioq_full" },
367         { "rxbds_empty" },
368         { "rx_discards" },
369         { "rx_errors" },
370         { "rx_threshold_hit" },
371
372         { "dma_readq_full" },
373         { "dma_read_prioq_full" },
374         { "tx_comp_queue_full" },
375
376         { "ring_set_send_prod_index" },
377         { "ring_status_update" },
378         { "nic_irqs" },
379         { "nic_avoided_irqs" },
380         { "nic_tx_threshold_hit" },
381
382         { "mbuf_lwm_thresh_hit" },
383 };
384
385 #define TG3_NUM_STATS   ARRAY_SIZE(ethtool_stats_keys)
386
387
388 static const struct {
389         const char string[ETH_GSTRING_LEN];
390 } ethtool_test_keys[] = {
391         { "nvram test     (online) " },
392         { "link test      (online) " },
393         { "register test  (offline)" },
394         { "memory test    (offline)" },
395         { "loopback test  (offline)" },
396         { "interrupt test (offline)" },
397 };
398
399 #define TG3_NUM_TEST    ARRAY_SIZE(ethtool_test_keys)
400
401
402 static void tg3_write32(struct tg3 *tp, u32 off, u32 val)
403 {
404         writel(val, tp->regs + off);
405 }
406
407 static u32 tg3_read32(struct tg3 *tp, u32 off)
408 {
409         return readl(tp->regs + off);
410 }
411
412 static void tg3_ape_write32(struct tg3 *tp, u32 off, u32 val)
413 {
414         writel(val, tp->aperegs + off);
415 }
416
417 static u32 tg3_ape_read32(struct tg3 *tp, u32 off)
418 {
419         return readl(tp->aperegs + off);
420 }
421
422 static void tg3_write_indirect_reg32(struct tg3 *tp, u32 off, u32 val)
423 {
424         unsigned long flags;
425
426         spin_lock_irqsave(&tp->indirect_lock, flags);
427         pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off);
428         pci_write_config_dword(tp->pdev, TG3PCI_REG_DATA, val);
429         spin_unlock_irqrestore(&tp->indirect_lock, flags);
430 }
431
432 static void tg3_write_flush_reg32(struct tg3 *tp, u32 off, u32 val)
433 {
434         writel(val, tp->regs + off);
435         readl(tp->regs + off);
436 }
437
438 static u32 tg3_read_indirect_reg32(struct tg3 *tp, u32 off)
439 {
440         unsigned long flags;
441         u32 val;
442
443         spin_lock_irqsave(&tp->indirect_lock, flags);
444         pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off);
445         pci_read_config_dword(tp->pdev, TG3PCI_REG_DATA, &val);
446         spin_unlock_irqrestore(&tp->indirect_lock, flags);
447         return val;
448 }
449
450 static void tg3_write_indirect_mbox(struct tg3 *tp, u32 off, u32 val)
451 {
452         unsigned long flags;
453
454         if (off == (MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW)) {
455                 pci_write_config_dword(tp->pdev, TG3PCI_RCV_RET_RING_CON_IDX +
456                                        TG3_64BIT_REG_LOW, val);
457                 return;
458         }
459         if (off == TG3_RX_STD_PROD_IDX_REG) {
460                 pci_write_config_dword(tp->pdev, TG3PCI_STD_RING_PROD_IDX +
461                                        TG3_64BIT_REG_LOW, val);
462                 return;
463         }
464
465         spin_lock_irqsave(&tp->indirect_lock, flags);
466         pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off + 0x5600);
467         pci_write_config_dword(tp->pdev, TG3PCI_REG_DATA, val);
468         spin_unlock_irqrestore(&tp->indirect_lock, flags);
469
470         /* In indirect mode when disabling interrupts, we also need
471          * to clear the interrupt bit in the GRC local ctrl register.
472          */
473         if ((off == (MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW)) &&
474             (val == 0x1)) {
475                 pci_write_config_dword(tp->pdev, TG3PCI_MISC_LOCAL_CTRL,
476                                        tp->grc_local_ctrl|GRC_LCLCTRL_CLEARINT);
477         }
478 }
479
480 static u32 tg3_read_indirect_mbox(struct tg3 *tp, u32 off)
481 {
482         unsigned long flags;
483         u32 val;
484
485         spin_lock_irqsave(&tp->indirect_lock, flags);
486         pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off + 0x5600);
487         pci_read_config_dword(tp->pdev, TG3PCI_REG_DATA, &val);
488         spin_unlock_irqrestore(&tp->indirect_lock, flags);
489         return val;
490 }
491
492 /* usec_wait specifies the wait time in usec when writing to certain registers
493  * where it is unsafe to read back the register without some delay.
494  * GRC_LOCAL_CTRL is one example if the GPIOs are toggled to switch power.
495  * TG3PCI_CLOCK_CTRL is another example if the clock frequencies are changed.
496  */
497 static void _tw32_flush(struct tg3 *tp, u32 off, u32 val, u32 usec_wait)
498 {
499         if (tg3_flag(tp, PCIX_TARGET_HWBUG) || tg3_flag(tp, ICH_WORKAROUND))
500                 /* Non-posted methods */
501                 tp->write32(tp, off, val);
502         else {
503                 /* Posted method */
504                 tg3_write32(tp, off, val);
505                 if (usec_wait)
506                         udelay(usec_wait);
507                 tp->read32(tp, off);
508         }
509         /* Wait again after the read for the posted method to guarantee that
510          * the wait time is met.
511          */
512         if (usec_wait)
513                 udelay(usec_wait);
514 }
515
516 static inline void tw32_mailbox_flush(struct tg3 *tp, u32 off, u32 val)
517 {
518         tp->write32_mbox(tp, off, val);
519         if (!tg3_flag(tp, MBOX_WRITE_REORDER) && !tg3_flag(tp, ICH_WORKAROUND))
520                 tp->read32_mbox(tp, off);
521 }
522
523 static void tg3_write32_tx_mbox(struct tg3 *tp, u32 off, u32 val)
524 {
525         void __iomem *mbox = tp->regs + off;
526         writel(val, mbox);
527         if (tg3_flag(tp, TXD_MBOX_HWBUG))
528                 writel(val, mbox);
529         if (tg3_flag(tp, MBOX_WRITE_REORDER))
530                 readl(mbox);
531 }
532
533 static u32 tg3_read32_mbox_5906(struct tg3 *tp, u32 off)
534 {
535         return readl(tp->regs + off + GRCMBOX_BASE);
536 }
537
538 static void tg3_write32_mbox_5906(struct tg3 *tp, u32 off, u32 val)
539 {
540         writel(val, tp->regs + off + GRCMBOX_BASE);
541 }
542
543 #define tw32_mailbox(reg, val)          tp->write32_mbox(tp, reg, val)
544 #define tw32_mailbox_f(reg, val)        tw32_mailbox_flush(tp, (reg), (val))
545 #define tw32_rx_mbox(reg, val)          tp->write32_rx_mbox(tp, reg, val)
546 #define tw32_tx_mbox(reg, val)          tp->write32_tx_mbox(tp, reg, val)
547 #define tr32_mailbox(reg)               tp->read32_mbox(tp, reg)
548
549 #define tw32(reg, val)                  tp->write32(tp, reg, val)
550 #define tw32_f(reg, val)                _tw32_flush(tp, (reg), (val), 0)
551 #define tw32_wait_f(reg, val, us)       _tw32_flush(tp, (reg), (val), (us))
552 #define tr32(reg)                       tp->read32(tp, reg)
553
554 static void tg3_write_mem(struct tg3 *tp, u32 off, u32 val)
555 {
556         unsigned long flags;
557
558         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906 &&
559             (off >= NIC_SRAM_STATS_BLK) && (off < NIC_SRAM_TX_BUFFER_DESC))
560                 return;
561
562         spin_lock_irqsave(&tp->indirect_lock, flags);
563         if (tg3_flag(tp, SRAM_USE_CONFIG)) {
564                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off);
565                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
566
567                 /* Always leave this as zero. */
568                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
569         } else {
570                 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, off);
571                 tw32_f(TG3PCI_MEM_WIN_DATA, val);
572
573                 /* Always leave this as zero. */
574                 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, 0);
575         }
576         spin_unlock_irqrestore(&tp->indirect_lock, flags);
577 }
578
579 static void tg3_read_mem(struct tg3 *tp, u32 off, u32 *val)
580 {
581         unsigned long flags;
582
583         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906 &&
584             (off >= NIC_SRAM_STATS_BLK) && (off < NIC_SRAM_TX_BUFFER_DESC)) {
585                 *val = 0;
586                 return;
587         }
588
589         spin_lock_irqsave(&tp->indirect_lock, flags);
590         if (tg3_flag(tp, SRAM_USE_CONFIG)) {
591                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off);
592                 pci_read_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
593
594                 /* Always leave this as zero. */
595                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
596         } else {
597                 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, off);
598                 *val = tr32(TG3PCI_MEM_WIN_DATA);
599
600                 /* Always leave this as zero. */
601                 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, 0);
602         }
603         spin_unlock_irqrestore(&tp->indirect_lock, flags);
604 }
605
606 static void tg3_ape_lock_init(struct tg3 *tp)
607 {
608         int i;
609         u32 regbase;
610
611         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
612                 regbase = TG3_APE_LOCK_GRANT;
613         else
614                 regbase = TG3_APE_PER_LOCK_GRANT;
615
616         /* Make sure the driver hasn't any stale locks. */
617         for (i = 0; i < 8; i++)
618                 tg3_ape_write32(tp, regbase + 4 * i, APE_LOCK_GRANT_DRIVER);
619 }
620
621 static int tg3_ape_lock(struct tg3 *tp, int locknum)
622 {
623         int i, off;
624         int ret = 0;
625         u32 status, req, gnt;
626
627         if (!tg3_flag(tp, ENABLE_APE))
628                 return 0;
629
630         switch (locknum) {
631         case TG3_APE_LOCK_GRC:
632         case TG3_APE_LOCK_MEM:
633                 break;
634         default:
635                 return -EINVAL;
636         }
637
638         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761) {
639                 req = TG3_APE_LOCK_REQ;
640                 gnt = TG3_APE_LOCK_GRANT;
641         } else {
642                 req = TG3_APE_PER_LOCK_REQ;
643                 gnt = TG3_APE_PER_LOCK_GRANT;
644         }
645
646         off = 4 * locknum;
647
648         tg3_ape_write32(tp, req + off, APE_LOCK_REQ_DRIVER);
649
650         /* Wait for up to 1 millisecond to acquire lock. */
651         for (i = 0; i < 100; i++) {
652                 status = tg3_ape_read32(tp, gnt + off);
653                 if (status == APE_LOCK_GRANT_DRIVER)
654                         break;
655                 udelay(10);
656         }
657
658         if (status != APE_LOCK_GRANT_DRIVER) {
659                 /* Revoke the lock request. */
660                 tg3_ape_write32(tp, gnt + off,
661                                 APE_LOCK_GRANT_DRIVER);
662
663                 ret = -EBUSY;
664         }
665
666         return ret;
667 }
668
669 static void tg3_ape_unlock(struct tg3 *tp, int locknum)
670 {
671         u32 gnt;
672
673         if (!tg3_flag(tp, ENABLE_APE))
674                 return;
675
676         switch (locknum) {
677         case TG3_APE_LOCK_GRC:
678         case TG3_APE_LOCK_MEM:
679                 break;
680         default:
681                 return;
682         }
683
684         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
685                 gnt = TG3_APE_LOCK_GRANT;
686         else
687                 gnt = TG3_APE_PER_LOCK_GRANT;
688
689         tg3_ape_write32(tp, gnt + 4 * locknum, APE_LOCK_GRANT_DRIVER);
690 }
691
692 static void tg3_disable_ints(struct tg3 *tp)
693 {
694         int i;
695
696         tw32(TG3PCI_MISC_HOST_CTRL,
697              (tp->misc_host_ctrl | MISC_HOST_CTRL_MASK_PCI_INT));
698         for (i = 0; i < tp->irq_max; i++)
699                 tw32_mailbox_f(tp->napi[i].int_mbox, 0x00000001);
700 }
701
702 static void tg3_enable_ints(struct tg3 *tp)
703 {
704         int i;
705
706         tp->irq_sync = 0;
707         wmb();
708
709         tw32(TG3PCI_MISC_HOST_CTRL,
710              (tp->misc_host_ctrl & ~MISC_HOST_CTRL_MASK_PCI_INT));
711
712         tp->coal_now = tp->coalesce_mode | HOSTCC_MODE_ENABLE;
713         for (i = 0; i < tp->irq_cnt; i++) {
714                 struct tg3_napi *tnapi = &tp->napi[i];
715
716                 tw32_mailbox_f(tnapi->int_mbox, tnapi->last_tag << 24);
717                 if (tg3_flag(tp, 1SHOT_MSI))
718                         tw32_mailbox_f(tnapi->int_mbox, tnapi->last_tag << 24);
719
720                 tp->coal_now |= tnapi->coal_now;
721         }
722
723         /* Force an initial interrupt */
724         if (!tg3_flag(tp, TAGGED_STATUS) &&
725             (tp->napi[0].hw_status->status & SD_STATUS_UPDATED))
726                 tw32(GRC_LOCAL_CTRL, tp->grc_local_ctrl | GRC_LCLCTRL_SETINT);
727         else
728                 tw32(HOSTCC_MODE, tp->coal_now);
729
730         tp->coal_now &= ~(tp->napi[0].coal_now | tp->napi[1].coal_now);
731 }
732
733 static inline unsigned int tg3_has_work(struct tg3_napi *tnapi)
734 {
735         struct tg3 *tp = tnapi->tp;
736         struct tg3_hw_status *sblk = tnapi->hw_status;
737         unsigned int work_exists = 0;
738
739         /* check for phy events */
740         if (!(tg3_flag(tp, USE_LINKCHG_REG) || tg3_flag(tp, POLL_SERDES))) {
741                 if (sblk->status & SD_STATUS_LINK_CHG)
742                         work_exists = 1;
743         }
744         /* check for RX/TX work to do */
745         if (sblk->idx[0].tx_consumer != tnapi->tx_cons ||
746             *(tnapi->rx_rcb_prod_idx) != tnapi->rx_rcb_ptr)
747                 work_exists = 1;
748
749         return work_exists;
750 }
751
752 /* tg3_int_reenable
753  *  similar to tg3_enable_ints, but it accurately determines whether there
754  *  is new work pending and can return without flushing the PIO write
755  *  which reenables interrupts
756  */
757 static void tg3_int_reenable(struct tg3_napi *tnapi)
758 {
759         struct tg3 *tp = tnapi->tp;
760
761         tw32_mailbox(tnapi->int_mbox, tnapi->last_tag << 24);
762         mmiowb();
763
764         /* When doing tagged status, this work check is unnecessary.
765          * The last_tag we write above tells the chip which piece of
766          * work we've completed.
767          */
768         if (!tg3_flag(tp, TAGGED_STATUS) && tg3_has_work(tnapi))
769                 tw32(HOSTCC_MODE, tp->coalesce_mode |
770                      HOSTCC_MODE_ENABLE | tnapi->coal_now);
771 }
772
773 static void tg3_switch_clocks(struct tg3 *tp)
774 {
775         u32 clock_ctrl;
776         u32 orig_clock_ctrl;
777
778         if (tg3_flag(tp, CPMU_PRESENT) || tg3_flag(tp, 5780_CLASS))
779                 return;
780
781         clock_ctrl = tr32(TG3PCI_CLOCK_CTRL);
782
783         orig_clock_ctrl = clock_ctrl;
784         clock_ctrl &= (CLOCK_CTRL_FORCE_CLKRUN |
785                        CLOCK_CTRL_CLKRUN_OENABLE |
786                        0x1f);
787         tp->pci_clock_ctrl = clock_ctrl;
788
789         if (tg3_flag(tp, 5705_PLUS)) {
790                 if (orig_clock_ctrl & CLOCK_CTRL_625_CORE) {
791                         tw32_wait_f(TG3PCI_CLOCK_CTRL,
792                                     clock_ctrl | CLOCK_CTRL_625_CORE, 40);
793                 }
794         } else if ((orig_clock_ctrl & CLOCK_CTRL_44MHZ_CORE) != 0) {
795                 tw32_wait_f(TG3PCI_CLOCK_CTRL,
796                             clock_ctrl |
797                             (CLOCK_CTRL_44MHZ_CORE | CLOCK_CTRL_ALTCLK),
798                             40);
799                 tw32_wait_f(TG3PCI_CLOCK_CTRL,
800                             clock_ctrl | (CLOCK_CTRL_ALTCLK),
801                             40);
802         }
803         tw32_wait_f(TG3PCI_CLOCK_CTRL, clock_ctrl, 40);
804 }
805
806 #define PHY_BUSY_LOOPS  5000
807
808 static int tg3_readphy(struct tg3 *tp, int reg, u32 *val)
809 {
810         u32 frame_val;
811         unsigned int loops;
812         int ret;
813
814         if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
815                 tw32_f(MAC_MI_MODE,
816                      (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
817                 udelay(80);
818         }
819
820         *val = 0x0;
821
822         frame_val  = ((tp->phy_addr << MI_COM_PHY_ADDR_SHIFT) &
823                       MI_COM_PHY_ADDR_MASK);
824         frame_val |= ((reg << MI_COM_REG_ADDR_SHIFT) &
825                       MI_COM_REG_ADDR_MASK);
826         frame_val |= (MI_COM_CMD_READ | MI_COM_START);
827
828         tw32_f(MAC_MI_COM, frame_val);
829
830         loops = PHY_BUSY_LOOPS;
831         while (loops != 0) {
832                 udelay(10);
833                 frame_val = tr32(MAC_MI_COM);
834
835                 if ((frame_val & MI_COM_BUSY) == 0) {
836                         udelay(5);
837                         frame_val = tr32(MAC_MI_COM);
838                         break;
839                 }
840                 loops -= 1;
841         }
842
843         ret = -EBUSY;
844         if (loops != 0) {
845                 *val = frame_val & MI_COM_DATA_MASK;
846                 ret = 0;
847         }
848
849         if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
850                 tw32_f(MAC_MI_MODE, tp->mi_mode);
851                 udelay(80);
852         }
853
854         return ret;
855 }
856
857 static int tg3_writephy(struct tg3 *tp, int reg, u32 val)
858 {
859         u32 frame_val;
860         unsigned int loops;
861         int ret;
862
863         if ((tp->phy_flags & TG3_PHYFLG_IS_FET) &&
864             (reg == MII_TG3_CTRL || reg == MII_TG3_AUX_CTRL))
865                 return 0;
866
867         if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
868                 tw32_f(MAC_MI_MODE,
869                      (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
870                 udelay(80);
871         }
872
873         frame_val  = ((tp->phy_addr << MI_COM_PHY_ADDR_SHIFT) &
874                       MI_COM_PHY_ADDR_MASK);
875         frame_val |= ((reg << MI_COM_REG_ADDR_SHIFT) &
876                       MI_COM_REG_ADDR_MASK);
877         frame_val |= (val & MI_COM_DATA_MASK);
878         frame_val |= (MI_COM_CMD_WRITE | MI_COM_START);
879
880         tw32_f(MAC_MI_COM, frame_val);
881
882         loops = PHY_BUSY_LOOPS;
883         while (loops != 0) {
884                 udelay(10);
885                 frame_val = tr32(MAC_MI_COM);
886                 if ((frame_val & MI_COM_BUSY) == 0) {
887                         udelay(5);
888                         frame_val = tr32(MAC_MI_COM);
889                         break;
890                 }
891                 loops -= 1;
892         }
893
894         ret = -EBUSY;
895         if (loops != 0)
896                 ret = 0;
897
898         if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
899                 tw32_f(MAC_MI_MODE, tp->mi_mode);
900                 udelay(80);
901         }
902
903         return ret;
904 }
905
906 static int tg3_phy_cl45_write(struct tg3 *tp, u32 devad, u32 addr, u32 val)
907 {
908         int err;
909
910         err = tg3_writephy(tp, MII_TG3_MMD_CTRL, devad);
911         if (err)
912                 goto done;
913
914         err = tg3_writephy(tp, MII_TG3_MMD_ADDRESS, addr);
915         if (err)
916                 goto done;
917
918         err = tg3_writephy(tp, MII_TG3_MMD_CTRL,
919                            MII_TG3_MMD_CTRL_DATA_NOINC | devad);
920         if (err)
921                 goto done;
922
923         err = tg3_writephy(tp, MII_TG3_MMD_ADDRESS, val);
924
925 done:
926         return err;
927 }
928
929 static int tg3_phy_cl45_read(struct tg3 *tp, u32 devad, u32 addr, u32 *val)
930 {
931         int err;
932
933         err = tg3_writephy(tp, MII_TG3_MMD_CTRL, devad);
934         if (err)
935                 goto done;
936
937         err = tg3_writephy(tp, MII_TG3_MMD_ADDRESS, addr);
938         if (err)
939                 goto done;
940
941         err = tg3_writephy(tp, MII_TG3_MMD_CTRL,
942                            MII_TG3_MMD_CTRL_DATA_NOINC | devad);
943         if (err)
944                 goto done;
945
946         err = tg3_readphy(tp, MII_TG3_MMD_ADDRESS, val);
947
948 done:
949         return err;
950 }
951
952 static int tg3_phydsp_read(struct tg3 *tp, u32 reg, u32 *val)
953 {
954         int err;
955
956         err = tg3_writephy(tp, MII_TG3_DSP_ADDRESS, reg);
957         if (!err)
958                 err = tg3_readphy(tp, MII_TG3_DSP_RW_PORT, val);
959
960         return err;
961 }
962
963 static int tg3_phydsp_write(struct tg3 *tp, u32 reg, u32 val)
964 {
965         int err;
966
967         err = tg3_writephy(tp, MII_TG3_DSP_ADDRESS, reg);
968         if (!err)
969                 err = tg3_writephy(tp, MII_TG3_DSP_RW_PORT, val);
970
971         return err;
972 }
973
974 static int tg3_phy_auxctl_read(struct tg3 *tp, int reg, u32 *val)
975 {
976         int err;
977
978         err = tg3_writephy(tp, MII_TG3_AUX_CTRL,
979                            (reg << MII_TG3_AUXCTL_MISC_RDSEL_SHIFT) |
980                            MII_TG3_AUXCTL_SHDWSEL_MISC);
981         if (!err)
982                 err = tg3_readphy(tp, MII_TG3_AUX_CTRL, val);
983
984         return err;
985 }
986
987 static int tg3_phy_auxctl_write(struct tg3 *tp, int reg, u32 set)
988 {
989         if (reg == MII_TG3_AUXCTL_SHDWSEL_MISC)
990                 set |= MII_TG3_AUXCTL_MISC_WREN;
991
992         return tg3_writephy(tp, MII_TG3_AUX_CTRL, set | reg);
993 }
994
995 #define TG3_PHY_AUXCTL_SMDSP_ENABLE(tp) \
996         tg3_phy_auxctl_write((tp), MII_TG3_AUXCTL_SHDWSEL_AUXCTL, \
997                              MII_TG3_AUXCTL_ACTL_SMDSP_ENA | \
998                              MII_TG3_AUXCTL_ACTL_TX_6DB)
999
1000 #define TG3_PHY_AUXCTL_SMDSP_DISABLE(tp) \
1001         tg3_phy_auxctl_write((tp), MII_TG3_AUXCTL_SHDWSEL_AUXCTL, \
1002                              MII_TG3_AUXCTL_ACTL_TX_6DB);
1003
1004 static int tg3_bmcr_reset(struct tg3 *tp)
1005 {
1006         u32 phy_control;
1007         int limit, err;
1008
1009         /* OK, reset it, and poll the BMCR_RESET bit until it
1010          * clears or we time out.
1011          */
1012         phy_control = BMCR_RESET;
1013         err = tg3_writephy(tp, MII_BMCR, phy_control);
1014         if (err != 0)
1015                 return -EBUSY;
1016
1017         limit = 5000;
1018         while (limit--) {
1019                 err = tg3_readphy(tp, MII_BMCR, &phy_control);
1020                 if (err != 0)
1021                         return -EBUSY;
1022
1023                 if ((phy_control & BMCR_RESET) == 0) {
1024                         udelay(40);
1025                         break;
1026                 }
1027                 udelay(10);
1028         }
1029         if (limit < 0)
1030                 return -EBUSY;
1031
1032         return 0;
1033 }
1034
1035 static int tg3_mdio_read(struct mii_bus *bp, int mii_id, int reg)
1036 {
1037         struct tg3 *tp = bp->priv;
1038         u32 val;
1039
1040         spin_lock_bh(&tp->lock);
1041
1042         if (tg3_readphy(tp, reg, &val))
1043                 val = -EIO;
1044
1045         spin_unlock_bh(&tp->lock);
1046
1047         return val;
1048 }
1049
1050 static int tg3_mdio_write(struct mii_bus *bp, int mii_id, int reg, u16 val)
1051 {
1052         struct tg3 *tp = bp->priv;
1053         u32 ret = 0;
1054
1055         spin_lock_bh(&tp->lock);
1056
1057         if (tg3_writephy(tp, reg, val))
1058                 ret = -EIO;
1059
1060         spin_unlock_bh(&tp->lock);
1061
1062         return ret;
1063 }
1064
1065 static int tg3_mdio_reset(struct mii_bus *bp)
1066 {
1067         return 0;
1068 }
1069
1070 static void tg3_mdio_config_5785(struct tg3 *tp)
1071 {
1072         u32 val;
1073         struct phy_device *phydev;
1074
1075         phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
1076         switch (phydev->drv->phy_id & phydev->drv->phy_id_mask) {
1077         case PHY_ID_BCM50610:
1078         case PHY_ID_BCM50610M:
1079                 val = MAC_PHYCFG2_50610_LED_MODES;
1080                 break;
1081         case PHY_ID_BCMAC131:
1082                 val = MAC_PHYCFG2_AC131_LED_MODES;
1083                 break;
1084         case PHY_ID_RTL8211C:
1085                 val = MAC_PHYCFG2_RTL8211C_LED_MODES;
1086                 break;
1087         case PHY_ID_RTL8201E:
1088                 val = MAC_PHYCFG2_RTL8201E_LED_MODES;
1089                 break;
1090         default:
1091                 return;
1092         }
1093
1094         if (phydev->interface != PHY_INTERFACE_MODE_RGMII) {
1095                 tw32(MAC_PHYCFG2, val);
1096
1097                 val = tr32(MAC_PHYCFG1);
1098                 val &= ~(MAC_PHYCFG1_RGMII_INT |
1099                          MAC_PHYCFG1_RXCLK_TO_MASK | MAC_PHYCFG1_TXCLK_TO_MASK);
1100                 val |= MAC_PHYCFG1_RXCLK_TIMEOUT | MAC_PHYCFG1_TXCLK_TIMEOUT;
1101                 tw32(MAC_PHYCFG1, val);
1102
1103                 return;
1104         }
1105
1106         if (!tg3_flag(tp, RGMII_INBAND_DISABLE))
1107                 val |= MAC_PHYCFG2_EMODE_MASK_MASK |
1108                        MAC_PHYCFG2_FMODE_MASK_MASK |
1109                        MAC_PHYCFG2_GMODE_MASK_MASK |
1110                        MAC_PHYCFG2_ACT_MASK_MASK   |
1111                        MAC_PHYCFG2_QUAL_MASK_MASK |
1112                        MAC_PHYCFG2_INBAND_ENABLE;
1113
1114         tw32(MAC_PHYCFG2, val);
1115
1116         val = tr32(MAC_PHYCFG1);
1117         val &= ~(MAC_PHYCFG1_RXCLK_TO_MASK | MAC_PHYCFG1_TXCLK_TO_MASK |
1118                  MAC_PHYCFG1_RGMII_EXT_RX_DEC | MAC_PHYCFG1_RGMII_SND_STAT_EN);
1119         if (!tg3_flag(tp, RGMII_INBAND_DISABLE)) {
1120                 if (tg3_flag(tp, RGMII_EXT_IBND_RX_EN))
1121                         val |= MAC_PHYCFG1_RGMII_EXT_RX_DEC;
1122                 if (tg3_flag(tp, RGMII_EXT_IBND_TX_EN))
1123                         val |= MAC_PHYCFG1_RGMII_SND_STAT_EN;
1124         }
1125         val |= MAC_PHYCFG1_RXCLK_TIMEOUT | MAC_PHYCFG1_TXCLK_TIMEOUT |
1126                MAC_PHYCFG1_RGMII_INT | MAC_PHYCFG1_TXC_DRV;
1127         tw32(MAC_PHYCFG1, val);
1128
1129         val = tr32(MAC_EXT_RGMII_MODE);
1130         val &= ~(MAC_RGMII_MODE_RX_INT_B |
1131                  MAC_RGMII_MODE_RX_QUALITY |
1132                  MAC_RGMII_MODE_RX_ACTIVITY |
1133                  MAC_RGMII_MODE_RX_ENG_DET |
1134                  MAC_RGMII_MODE_TX_ENABLE |
1135                  MAC_RGMII_MODE_TX_LOWPWR |
1136                  MAC_RGMII_MODE_TX_RESET);
1137         if (!tg3_flag(tp, RGMII_INBAND_DISABLE)) {
1138                 if (tg3_flag(tp, RGMII_EXT_IBND_RX_EN))
1139                         val |= MAC_RGMII_MODE_RX_INT_B |
1140                                MAC_RGMII_MODE_RX_QUALITY |
1141                                MAC_RGMII_MODE_RX_ACTIVITY |
1142                                MAC_RGMII_MODE_RX_ENG_DET;
1143                 if (tg3_flag(tp, RGMII_EXT_IBND_TX_EN))
1144                         val |= MAC_RGMII_MODE_TX_ENABLE |
1145                                MAC_RGMII_MODE_TX_LOWPWR |
1146                                MAC_RGMII_MODE_TX_RESET;
1147         }
1148         tw32(MAC_EXT_RGMII_MODE, val);
1149 }
1150
1151 static void tg3_mdio_start(struct tg3 *tp)
1152 {
1153         tp->mi_mode &= ~MAC_MI_MODE_AUTO_POLL;
1154         tw32_f(MAC_MI_MODE, tp->mi_mode);
1155         udelay(80);
1156
1157         if (tg3_flag(tp, MDIOBUS_INITED) &&
1158             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
1159                 tg3_mdio_config_5785(tp);
1160 }
1161
1162 static int tg3_mdio_init(struct tg3 *tp)
1163 {
1164         int i;
1165         u32 reg;
1166         struct phy_device *phydev;
1167
1168         if (tg3_flag(tp, 5717_PLUS)) {
1169                 u32 is_serdes;
1170
1171                 tp->phy_addr = PCI_FUNC(tp->pdev->devfn) + 1;
1172
1173                 if (tp->pci_chip_rev_id != CHIPREV_ID_5717_A0)
1174                         is_serdes = tr32(SG_DIG_STATUS) & SG_DIG_IS_SERDES;
1175                 else
1176                         is_serdes = tr32(TG3_CPMU_PHY_STRAP) &
1177                                     TG3_CPMU_PHY_STRAP_IS_SERDES;
1178                 if (is_serdes)
1179                         tp->phy_addr += 7;
1180         } else
1181                 tp->phy_addr = TG3_PHY_MII_ADDR;
1182
1183         tg3_mdio_start(tp);
1184
1185         if (!tg3_flag(tp, USE_PHYLIB) || tg3_flag(tp, MDIOBUS_INITED))
1186                 return 0;
1187
1188         tp->mdio_bus = mdiobus_alloc();
1189         if (tp->mdio_bus == NULL)
1190                 return -ENOMEM;
1191
1192         tp->mdio_bus->name     = "tg3 mdio bus";
1193         snprintf(tp->mdio_bus->id, MII_BUS_ID_SIZE, "%x",
1194                  (tp->pdev->bus->number << 8) | tp->pdev->devfn);
1195         tp->mdio_bus->priv     = tp;
1196         tp->mdio_bus->parent   = &tp->pdev->dev;
1197         tp->mdio_bus->read     = &tg3_mdio_read;
1198         tp->mdio_bus->write    = &tg3_mdio_write;
1199         tp->mdio_bus->reset    = &tg3_mdio_reset;
1200         tp->mdio_bus->phy_mask = ~(1 << TG3_PHY_MII_ADDR);
1201         tp->mdio_bus->irq      = &tp->mdio_irq[0];
1202
1203         for (i = 0; i < PHY_MAX_ADDR; i++)
1204                 tp->mdio_bus->irq[i] = PHY_POLL;
1205
1206         /* The bus registration will look for all the PHYs on the mdio bus.
1207          * Unfortunately, it does not ensure the PHY is powered up before
1208          * accessing the PHY ID registers.  A chip reset is the
1209          * quickest way to bring the device back to an operational state..
1210          */
1211         if (tg3_readphy(tp, MII_BMCR, &reg) || (reg & BMCR_PDOWN))
1212                 tg3_bmcr_reset(tp);
1213
1214         i = mdiobus_register(tp->mdio_bus);
1215         if (i) {
1216                 dev_warn(&tp->pdev->dev, "mdiobus_reg failed (0x%x)\n", i);
1217                 mdiobus_free(tp->mdio_bus);
1218                 return i;
1219         }
1220
1221         phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
1222
1223         if (!phydev || !phydev->drv) {
1224                 dev_warn(&tp->pdev->dev, "No PHY devices\n");
1225                 mdiobus_unregister(tp->mdio_bus);
1226                 mdiobus_free(tp->mdio_bus);
1227                 return -ENODEV;
1228         }
1229
1230         switch (phydev->drv->phy_id & phydev->drv->phy_id_mask) {
1231         case PHY_ID_BCM57780:
1232                 phydev->interface = PHY_INTERFACE_MODE_GMII;
1233                 phydev->dev_flags |= PHY_BRCM_AUTO_PWRDWN_ENABLE;
1234                 break;
1235         case PHY_ID_BCM50610:
1236         case PHY_ID_BCM50610M:
1237                 phydev->dev_flags |= PHY_BRCM_CLEAR_RGMII_MODE |
1238                                      PHY_BRCM_RX_REFCLK_UNUSED |
1239                                      PHY_BRCM_DIS_TXCRXC_NOENRGY |
1240                                      PHY_BRCM_AUTO_PWRDWN_ENABLE;
1241                 if (tg3_flag(tp, RGMII_INBAND_DISABLE))
1242                         phydev->dev_flags |= PHY_BRCM_STD_IBND_DISABLE;
1243                 if (tg3_flag(tp, RGMII_EXT_IBND_RX_EN))
1244                         phydev->dev_flags |= PHY_BRCM_EXT_IBND_RX_ENABLE;
1245                 if (tg3_flag(tp, RGMII_EXT_IBND_TX_EN))
1246                         phydev->dev_flags |= PHY_BRCM_EXT_IBND_TX_ENABLE;
1247                 /* fallthru */
1248         case PHY_ID_RTL8211C:
1249                 phydev->interface = PHY_INTERFACE_MODE_RGMII;
1250                 break;
1251         case PHY_ID_RTL8201E:
1252         case PHY_ID_BCMAC131:
1253                 phydev->interface = PHY_INTERFACE_MODE_MII;
1254                 phydev->dev_flags |= PHY_BRCM_AUTO_PWRDWN_ENABLE;
1255                 tp->phy_flags |= TG3_PHYFLG_IS_FET;
1256                 break;
1257         }
1258
1259         tg3_flag_set(tp, MDIOBUS_INITED);
1260
1261         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
1262                 tg3_mdio_config_5785(tp);
1263
1264         return 0;
1265 }
1266
1267 static void tg3_mdio_fini(struct tg3 *tp)
1268 {
1269         if (tg3_flag(tp, MDIOBUS_INITED)) {
1270                 tg3_flag_clear(tp, MDIOBUS_INITED);
1271                 mdiobus_unregister(tp->mdio_bus);
1272                 mdiobus_free(tp->mdio_bus);
1273         }
1274 }
1275
1276 /* tp->lock is held. */
1277 static inline void tg3_generate_fw_event(struct tg3 *tp)
1278 {
1279         u32 val;
1280
1281         val = tr32(GRC_RX_CPU_EVENT);
1282         val |= GRC_RX_CPU_DRIVER_EVENT;
1283         tw32_f(GRC_RX_CPU_EVENT, val);
1284
1285         tp->last_event_jiffies = jiffies;
1286 }
1287
1288 #define TG3_FW_EVENT_TIMEOUT_USEC 2500
1289
1290 /* tp->lock is held. */
1291 static void tg3_wait_for_event_ack(struct tg3 *tp)
1292 {
1293         int i;
1294         unsigned int delay_cnt;
1295         long time_remain;
1296
1297         /* If enough time has passed, no wait is necessary. */
1298         time_remain = (long)(tp->last_event_jiffies + 1 +
1299                       usecs_to_jiffies(TG3_FW_EVENT_TIMEOUT_USEC)) -
1300                       (long)jiffies;
1301         if (time_remain < 0)
1302                 return;
1303
1304         /* Check if we can shorten the wait time. */
1305         delay_cnt = jiffies_to_usecs(time_remain);
1306         if (delay_cnt > TG3_FW_EVENT_TIMEOUT_USEC)
1307                 delay_cnt = TG3_FW_EVENT_TIMEOUT_USEC;
1308         delay_cnt = (delay_cnt >> 3) + 1;
1309
1310         for (i = 0; i < delay_cnt; i++) {
1311                 if (!(tr32(GRC_RX_CPU_EVENT) & GRC_RX_CPU_DRIVER_EVENT))
1312                         break;
1313                 udelay(8);
1314         }
1315 }
1316
1317 /* tp->lock is held. */
1318 static void tg3_ump_link_report(struct tg3 *tp)
1319 {
1320         u32 reg;
1321         u32 val;
1322
1323         if (!tg3_flag(tp, 5780_CLASS) || !tg3_flag(tp, ENABLE_ASF))
1324                 return;
1325
1326         tg3_wait_for_event_ack(tp);
1327
1328         tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX, FWCMD_NICDRV_LINK_UPDATE);
1329
1330         tg3_write_mem(tp, NIC_SRAM_FW_CMD_LEN_MBOX, 14);
1331
1332         val = 0;
1333         if (!tg3_readphy(tp, MII_BMCR, &reg))
1334                 val = reg << 16;
1335         if (!tg3_readphy(tp, MII_BMSR, &reg))
1336                 val |= (reg & 0xffff);
1337         tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX, val);
1338
1339         val = 0;
1340         if (!tg3_readphy(tp, MII_ADVERTISE, &reg))
1341                 val = reg << 16;
1342         if (!tg3_readphy(tp, MII_LPA, &reg))
1343                 val |= (reg & 0xffff);
1344         tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 4, val);
1345
1346         val = 0;
1347         if (!(tp->phy_flags & TG3_PHYFLG_MII_SERDES)) {
1348                 if (!tg3_readphy(tp, MII_CTRL1000, &reg))
1349                         val = reg << 16;
1350                 if (!tg3_readphy(tp, MII_STAT1000, &reg))
1351                         val |= (reg & 0xffff);
1352         }
1353         tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 8, val);
1354
1355         if (!tg3_readphy(tp, MII_PHYADDR, &reg))
1356                 val = reg << 16;
1357         else
1358                 val = 0;
1359         tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 12, val);
1360
1361         tg3_generate_fw_event(tp);
1362 }
1363
1364 static void tg3_link_report(struct tg3 *tp)
1365 {
1366         if (!netif_carrier_ok(tp->dev)) {
1367                 netif_info(tp, link, tp->dev, "Link is down\n");
1368                 tg3_ump_link_report(tp);
1369         } else if (netif_msg_link(tp)) {
1370                 netdev_info(tp->dev, "Link is up at %d Mbps, %s duplex\n",
1371                             (tp->link_config.active_speed == SPEED_1000 ?
1372                              1000 :
1373                              (tp->link_config.active_speed == SPEED_100 ?
1374                               100 : 10)),
1375                             (tp->link_config.active_duplex == DUPLEX_FULL ?
1376                              "full" : "half"));
1377
1378                 netdev_info(tp->dev, "Flow control is %s for TX and %s for RX\n",
1379                             (tp->link_config.active_flowctrl & FLOW_CTRL_TX) ?
1380                             "on" : "off",
1381                             (tp->link_config.active_flowctrl & FLOW_CTRL_RX) ?
1382                             "on" : "off");
1383
1384                 if (tp->phy_flags & TG3_PHYFLG_EEE_CAP)
1385                         netdev_info(tp->dev, "EEE is %s\n",
1386                                     tp->setlpicnt ? "enabled" : "disabled");
1387
1388                 tg3_ump_link_report(tp);
1389         }
1390 }
1391
1392 static u16 tg3_advert_flowctrl_1000T(u8 flow_ctrl)
1393 {
1394         u16 miireg;
1395
1396         if ((flow_ctrl & FLOW_CTRL_TX) && (flow_ctrl & FLOW_CTRL_RX))
1397                 miireg = ADVERTISE_PAUSE_CAP;
1398         else if (flow_ctrl & FLOW_CTRL_TX)
1399                 miireg = ADVERTISE_PAUSE_ASYM;
1400         else if (flow_ctrl & FLOW_CTRL_RX)
1401                 miireg = ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM;
1402         else
1403                 miireg = 0;
1404
1405         return miireg;
1406 }
1407
1408 static u16 tg3_advert_flowctrl_1000X(u8 flow_ctrl)
1409 {
1410         u16 miireg;
1411
1412         if ((flow_ctrl & FLOW_CTRL_TX) && (flow_ctrl & FLOW_CTRL_RX))
1413                 miireg = ADVERTISE_1000XPAUSE;
1414         else if (flow_ctrl & FLOW_CTRL_TX)
1415                 miireg = ADVERTISE_1000XPSE_ASYM;
1416         else if (flow_ctrl & FLOW_CTRL_RX)
1417                 miireg = ADVERTISE_1000XPAUSE | ADVERTISE_1000XPSE_ASYM;
1418         else
1419                 miireg = 0;
1420
1421         return miireg;
1422 }
1423
1424 static u8 tg3_resolve_flowctrl_1000X(u16 lcladv, u16 rmtadv)
1425 {
1426         u8 cap = 0;
1427
1428         if (lcladv & ADVERTISE_1000XPAUSE) {
1429                 if (lcladv & ADVERTISE_1000XPSE_ASYM) {
1430                         if (rmtadv & LPA_1000XPAUSE)
1431                                 cap = FLOW_CTRL_TX | FLOW_CTRL_RX;
1432                         else if (rmtadv & LPA_1000XPAUSE_ASYM)
1433                                 cap = FLOW_CTRL_RX;
1434                 } else {
1435                         if (rmtadv & LPA_1000XPAUSE)
1436                                 cap = FLOW_CTRL_TX | FLOW_CTRL_RX;
1437                 }
1438         } else if (lcladv & ADVERTISE_1000XPSE_ASYM) {
1439                 if ((rmtadv & LPA_1000XPAUSE) && (rmtadv & LPA_1000XPAUSE_ASYM))
1440                         cap = FLOW_CTRL_TX;
1441         }
1442
1443         return cap;
1444 }
1445
1446 static void tg3_setup_flow_control(struct tg3 *tp, u32 lcladv, u32 rmtadv)
1447 {
1448         u8 autoneg;
1449         u8 flowctrl = 0;
1450         u32 old_rx_mode = tp->rx_mode;
1451         u32 old_tx_mode = tp->tx_mode;
1452
1453         if (tg3_flag(tp, USE_PHYLIB))
1454                 autoneg = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]->autoneg;
1455         else
1456                 autoneg = tp->link_config.autoneg;
1457
1458         if (autoneg == AUTONEG_ENABLE && tg3_flag(tp, PAUSE_AUTONEG)) {
1459                 if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)
1460                         flowctrl = tg3_resolve_flowctrl_1000X(lcladv, rmtadv);
1461                 else
1462                         flowctrl = mii_resolve_flowctrl_fdx(lcladv, rmtadv);
1463         } else
1464                 flowctrl = tp->link_config.flowctrl;
1465
1466         tp->link_config.active_flowctrl = flowctrl;
1467
1468         if (flowctrl & FLOW_CTRL_RX)
1469                 tp->rx_mode |= RX_MODE_FLOW_CTRL_ENABLE;
1470         else
1471                 tp->rx_mode &= ~RX_MODE_FLOW_CTRL_ENABLE;
1472
1473         if (old_rx_mode != tp->rx_mode)
1474                 tw32_f(MAC_RX_MODE, tp->rx_mode);
1475
1476         if (flowctrl & FLOW_CTRL_TX)
1477                 tp->tx_mode |= TX_MODE_FLOW_CTRL_ENABLE;
1478         else
1479                 tp->tx_mode &= ~TX_MODE_FLOW_CTRL_ENABLE;
1480
1481         if (old_tx_mode != tp->tx_mode)
1482                 tw32_f(MAC_TX_MODE, tp->tx_mode);
1483 }
1484
1485 static void tg3_adjust_link(struct net_device *dev)
1486 {
1487         u8 oldflowctrl, linkmesg = 0;
1488         u32 mac_mode, lcl_adv, rmt_adv;
1489         struct tg3 *tp = netdev_priv(dev);
1490         struct phy_device *phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
1491
1492         spin_lock_bh(&tp->lock);
1493
1494         mac_mode = tp->mac_mode & ~(MAC_MODE_PORT_MODE_MASK |
1495                                     MAC_MODE_HALF_DUPLEX);
1496
1497         oldflowctrl = tp->link_config.active_flowctrl;
1498
1499         if (phydev->link) {
1500                 lcl_adv = 0;
1501                 rmt_adv = 0;
1502
1503                 if (phydev->speed == SPEED_100 || phydev->speed == SPEED_10)
1504                         mac_mode |= MAC_MODE_PORT_MODE_MII;
1505                 else if (phydev->speed == SPEED_1000 ||
1506                          GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785)
1507                         mac_mode |= MAC_MODE_PORT_MODE_GMII;
1508                 else
1509                         mac_mode |= MAC_MODE_PORT_MODE_MII;
1510
1511                 if (phydev->duplex == DUPLEX_HALF)
1512                         mac_mode |= MAC_MODE_HALF_DUPLEX;
1513                 else {
1514                         lcl_adv = tg3_advert_flowctrl_1000T(
1515                                   tp->link_config.flowctrl);
1516
1517                         if (phydev->pause)
1518                                 rmt_adv = LPA_PAUSE_CAP;
1519                         if (phydev->asym_pause)
1520                                 rmt_adv |= LPA_PAUSE_ASYM;
1521                 }
1522
1523                 tg3_setup_flow_control(tp, lcl_adv, rmt_adv);
1524         } else
1525                 mac_mode |= MAC_MODE_PORT_MODE_GMII;
1526
1527         if (mac_mode != tp->mac_mode) {
1528                 tp->mac_mode = mac_mode;
1529                 tw32_f(MAC_MODE, tp->mac_mode);
1530                 udelay(40);
1531         }
1532
1533         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785) {
1534                 if (phydev->speed == SPEED_10)
1535                         tw32(MAC_MI_STAT,
1536                              MAC_MI_STAT_10MBPS_MODE |
1537                              MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
1538                 else
1539                         tw32(MAC_MI_STAT, MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
1540         }
1541
1542         if (phydev->speed == SPEED_1000 && phydev->duplex == DUPLEX_HALF)
1543                 tw32(MAC_TX_LENGTHS,
1544                      ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
1545                       (6 << TX_LENGTHS_IPG_SHIFT) |
1546                       (0xff << TX_LENGTHS_SLOT_TIME_SHIFT)));
1547         else
1548                 tw32(MAC_TX_LENGTHS,
1549                      ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
1550                       (6 << TX_LENGTHS_IPG_SHIFT) |
1551                       (32 << TX_LENGTHS_SLOT_TIME_SHIFT)));
1552
1553         if ((phydev->link && tp->link_config.active_speed == SPEED_INVALID) ||
1554             (!phydev->link && tp->link_config.active_speed != SPEED_INVALID) ||
1555             phydev->speed != tp->link_config.active_speed ||
1556             phydev->duplex != tp->link_config.active_duplex ||
1557             oldflowctrl != tp->link_config.active_flowctrl)
1558                 linkmesg = 1;
1559
1560         tp->link_config.active_speed = phydev->speed;
1561         tp->link_config.active_duplex = phydev->duplex;
1562
1563         spin_unlock_bh(&tp->lock);
1564
1565         if (linkmesg)
1566                 tg3_link_report(tp);
1567 }
1568
1569 static int tg3_phy_init(struct tg3 *tp)
1570 {
1571         struct phy_device *phydev;
1572
1573         if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED)
1574                 return 0;
1575
1576         /* Bring the PHY back to a known state. */
1577         tg3_bmcr_reset(tp);
1578
1579         phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
1580
1581         /* Attach the MAC to the PHY. */
1582         phydev = phy_connect(tp->dev, dev_name(&phydev->dev), tg3_adjust_link,
1583                              phydev->dev_flags, phydev->interface);
1584         if (IS_ERR(phydev)) {
1585                 dev_err(&tp->pdev->dev, "Could not attach to PHY\n");
1586                 return PTR_ERR(phydev);
1587         }
1588
1589         /* Mask with MAC supported features. */
1590         switch (phydev->interface) {
1591         case PHY_INTERFACE_MODE_GMII:
1592         case PHY_INTERFACE_MODE_RGMII:
1593                 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
1594                         phydev->supported &= (PHY_GBIT_FEATURES |
1595                                               SUPPORTED_Pause |
1596                                               SUPPORTED_Asym_Pause);
1597                         break;
1598                 }
1599                 /* fallthru */
1600         case PHY_INTERFACE_MODE_MII:
1601                 phydev->supported &= (PHY_BASIC_FEATURES |
1602                                       SUPPORTED_Pause |
1603                                       SUPPORTED_Asym_Pause);
1604                 break;
1605         default:
1606                 phy_disconnect(tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]);
1607                 return -EINVAL;
1608         }
1609
1610         tp->phy_flags |= TG3_PHYFLG_IS_CONNECTED;
1611
1612         phydev->advertising = phydev->supported;
1613
1614         return 0;
1615 }
1616
1617 static void tg3_phy_start(struct tg3 *tp)
1618 {
1619         struct phy_device *phydev;
1620
1621         if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
1622                 return;
1623
1624         phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
1625
1626         if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) {
1627                 tp->phy_flags &= ~TG3_PHYFLG_IS_LOW_POWER;
1628                 phydev->speed = tp->link_config.orig_speed;
1629                 phydev->duplex = tp->link_config.orig_duplex;
1630                 phydev->autoneg = tp->link_config.orig_autoneg;
1631                 phydev->advertising = tp->link_config.orig_advertising;
1632         }
1633
1634         phy_start(phydev);
1635
1636         phy_start_aneg(phydev);
1637 }
1638
1639 static void tg3_phy_stop(struct tg3 *tp)
1640 {
1641         if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
1642                 return;
1643
1644         phy_stop(tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]);
1645 }
1646
1647 static void tg3_phy_fini(struct tg3 *tp)
1648 {
1649         if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) {
1650                 phy_disconnect(tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]);
1651                 tp->phy_flags &= ~TG3_PHYFLG_IS_CONNECTED;
1652         }
1653 }
1654
1655 static void tg3_phy_fet_toggle_apd(struct tg3 *tp, bool enable)
1656 {
1657         u32 phytest;
1658
1659         if (!tg3_readphy(tp, MII_TG3_FET_TEST, &phytest)) {
1660                 u32 phy;
1661
1662                 tg3_writephy(tp, MII_TG3_FET_TEST,
1663                              phytest | MII_TG3_FET_SHADOW_EN);
1664                 if (!tg3_readphy(tp, MII_TG3_FET_SHDW_AUXSTAT2, &phy)) {
1665                         if (enable)
1666                                 phy |= MII_TG3_FET_SHDW_AUXSTAT2_APD;
1667                         else
1668                                 phy &= ~MII_TG3_FET_SHDW_AUXSTAT2_APD;
1669                         tg3_writephy(tp, MII_TG3_FET_SHDW_AUXSTAT2, phy);
1670                 }
1671                 tg3_writephy(tp, MII_TG3_FET_TEST, phytest);
1672         }
1673 }
1674
1675 static void tg3_phy_toggle_apd(struct tg3 *tp, bool enable)
1676 {
1677         u32 reg;
1678
1679         if (!tg3_flag(tp, 5705_PLUS) ||
1680             (tg3_flag(tp, 5717_PLUS) &&
1681              (tp->phy_flags & TG3_PHYFLG_MII_SERDES)))
1682                 return;
1683
1684         if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
1685                 tg3_phy_fet_toggle_apd(tp, enable);
1686                 return;
1687         }
1688
1689         reg = MII_TG3_MISC_SHDW_WREN |
1690               MII_TG3_MISC_SHDW_SCR5_SEL |
1691               MII_TG3_MISC_SHDW_SCR5_LPED |
1692               MII_TG3_MISC_SHDW_SCR5_DLPTLM |
1693               MII_TG3_MISC_SHDW_SCR5_SDTL |
1694               MII_TG3_MISC_SHDW_SCR5_C125OE;
1695         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5784 || !enable)
1696                 reg |= MII_TG3_MISC_SHDW_SCR5_DLLAPD;
1697
1698         tg3_writephy(tp, MII_TG3_MISC_SHDW, reg);
1699
1700
1701         reg = MII_TG3_MISC_SHDW_WREN |
1702               MII_TG3_MISC_SHDW_APD_SEL |
1703               MII_TG3_MISC_SHDW_APD_WKTM_84MS;
1704         if (enable)
1705                 reg |= MII_TG3_MISC_SHDW_APD_ENABLE;
1706
1707         tg3_writephy(tp, MII_TG3_MISC_SHDW, reg);
1708 }
1709
1710 static void tg3_phy_toggle_automdix(struct tg3 *tp, int enable)
1711 {
1712         u32 phy;
1713
1714         if (!tg3_flag(tp, 5705_PLUS) ||
1715             (tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
1716                 return;
1717
1718         if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
1719                 u32 ephy;
1720
1721                 if (!tg3_readphy(tp, MII_TG3_FET_TEST, &ephy)) {
1722                         u32 reg = MII_TG3_FET_SHDW_MISCCTRL;
1723
1724                         tg3_writephy(tp, MII_TG3_FET_TEST,
1725                                      ephy | MII_TG3_FET_SHADOW_EN);
1726                         if (!tg3_readphy(tp, reg, &phy)) {
1727                                 if (enable)
1728                                         phy |= MII_TG3_FET_SHDW_MISCCTRL_MDIX;
1729                                 else
1730                                         phy &= ~MII_TG3_FET_SHDW_MISCCTRL_MDIX;
1731                                 tg3_writephy(tp, reg, phy);
1732                         }
1733                         tg3_writephy(tp, MII_TG3_FET_TEST, ephy);
1734                 }
1735         } else {
1736                 int ret;
1737
1738                 ret = tg3_phy_auxctl_read(tp,
1739                                           MII_TG3_AUXCTL_SHDWSEL_MISC, &phy);
1740                 if (!ret) {
1741                         if (enable)
1742                                 phy |= MII_TG3_AUXCTL_MISC_FORCE_AMDIX;
1743                         else
1744                                 phy &= ~MII_TG3_AUXCTL_MISC_FORCE_AMDIX;
1745                         tg3_phy_auxctl_write(tp,
1746                                              MII_TG3_AUXCTL_SHDWSEL_MISC, phy);
1747                 }
1748         }
1749 }
1750
1751 static void tg3_phy_set_wirespeed(struct tg3 *tp)
1752 {
1753         int ret;
1754         u32 val;
1755
1756         if (tp->phy_flags & TG3_PHYFLG_NO_ETH_WIRE_SPEED)
1757                 return;
1758
1759         ret = tg3_phy_auxctl_read(tp, MII_TG3_AUXCTL_SHDWSEL_MISC, &val);
1760         if (!ret)
1761                 tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_MISC,
1762                                      val | MII_TG3_AUXCTL_MISC_WIRESPD_EN);
1763 }
1764
1765 static void tg3_phy_apply_otp(struct tg3 *tp)
1766 {
1767         u32 otp, phy;
1768
1769         if (!tp->phy_otp)
1770                 return;
1771
1772         otp = tp->phy_otp;
1773
1774         if (TG3_PHY_AUXCTL_SMDSP_ENABLE(tp))
1775                 return;
1776
1777         phy = ((otp & TG3_OTP_AGCTGT_MASK) >> TG3_OTP_AGCTGT_SHIFT);
1778         phy |= MII_TG3_DSP_TAP1_AGCTGT_DFLT;
1779         tg3_phydsp_write(tp, MII_TG3_DSP_TAP1, phy);
1780
1781         phy = ((otp & TG3_OTP_HPFFLTR_MASK) >> TG3_OTP_HPFFLTR_SHIFT) |
1782               ((otp & TG3_OTP_HPFOVER_MASK) >> TG3_OTP_HPFOVER_SHIFT);
1783         tg3_phydsp_write(tp, MII_TG3_DSP_AADJ1CH0, phy);
1784
1785         phy = ((otp & TG3_OTP_LPFDIS_MASK) >> TG3_OTP_LPFDIS_SHIFT);
1786         phy |= MII_TG3_DSP_AADJ1CH3_ADCCKADJ;
1787         tg3_phydsp_write(tp, MII_TG3_DSP_AADJ1CH3, phy);
1788
1789         phy = ((otp & TG3_OTP_VDAC_MASK) >> TG3_OTP_VDAC_SHIFT);
1790         tg3_phydsp_write(tp, MII_TG3_DSP_EXP75, phy);
1791
1792         phy = ((otp & TG3_OTP_10BTAMP_MASK) >> TG3_OTP_10BTAMP_SHIFT);
1793         tg3_phydsp_write(tp, MII_TG3_DSP_EXP96, phy);
1794
1795         phy = ((otp & TG3_OTP_ROFF_MASK) >> TG3_OTP_ROFF_SHIFT) |
1796               ((otp & TG3_OTP_RCOFF_MASK) >> TG3_OTP_RCOFF_SHIFT);
1797         tg3_phydsp_write(tp, MII_TG3_DSP_EXP97, phy);
1798
1799         TG3_PHY_AUXCTL_SMDSP_DISABLE(tp);
1800 }
1801
1802 static void tg3_phy_eee_adjust(struct tg3 *tp, u32 current_link_up)
1803 {
1804         u32 val;
1805
1806         if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP))
1807                 return;
1808
1809         tp->setlpicnt = 0;
1810
1811         if (tp->link_config.autoneg == AUTONEG_ENABLE &&
1812             current_link_up == 1 &&
1813             tp->link_config.active_duplex == DUPLEX_FULL &&
1814             (tp->link_config.active_speed == SPEED_100 ||
1815              tp->link_config.active_speed == SPEED_1000)) {
1816                 u32 eeectl;
1817
1818                 if (tp->link_config.active_speed == SPEED_1000)
1819                         eeectl = TG3_CPMU_EEE_CTRL_EXIT_16_5_US;
1820                 else
1821                         eeectl = TG3_CPMU_EEE_CTRL_EXIT_36_US;
1822
1823                 tw32(TG3_CPMU_EEE_CTRL, eeectl);
1824
1825                 tg3_phy_cl45_read(tp, MDIO_MMD_AN,
1826                                   TG3_CL45_D7_EEERES_STAT, &val);
1827
1828                 if (val == TG3_CL45_D7_EEERES_STAT_LP_1000T ||
1829                     val == TG3_CL45_D7_EEERES_STAT_LP_100TX)
1830                         tp->setlpicnt = 2;
1831         }
1832
1833         if (!tp->setlpicnt) {
1834                 val = tr32(TG3_CPMU_EEE_MODE);
1835                 tw32(TG3_CPMU_EEE_MODE, val & ~TG3_CPMU_EEEMD_LPI_ENABLE);
1836         }
1837 }
1838
1839 static void tg3_phy_eee_enable(struct tg3 *tp)
1840 {
1841         u32 val;
1842
1843         if (tp->link_config.active_speed == SPEED_1000 &&
1844             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
1845              GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
1846              GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765) &&
1847             !TG3_PHY_AUXCTL_SMDSP_ENABLE(tp)) {
1848                 tg3_phydsp_write(tp, MII_TG3_DSP_TAP26, 0x0003);
1849                 TG3_PHY_AUXCTL_SMDSP_DISABLE(tp);
1850         }
1851
1852         val = tr32(TG3_CPMU_EEE_MODE);
1853         tw32(TG3_CPMU_EEE_MODE, val | TG3_CPMU_EEEMD_LPI_ENABLE);
1854 }
1855
1856 static int tg3_wait_macro_done(struct tg3 *tp)
1857 {
1858         int limit = 100;
1859
1860         while (limit--) {
1861                 u32 tmp32;
1862
1863                 if (!tg3_readphy(tp, MII_TG3_DSP_CONTROL, &tmp32)) {
1864                         if ((tmp32 & 0x1000) == 0)
1865                                 break;
1866                 }
1867         }
1868         if (limit < 0)
1869                 return -EBUSY;
1870
1871         return 0;
1872 }
1873
1874 static int tg3_phy_write_and_check_testpat(struct tg3 *tp, int *resetp)
1875 {
1876         static const u32 test_pat[4][6] = {
1877         { 0x00005555, 0x00000005, 0x00002aaa, 0x0000000a, 0x00003456, 0x00000003 },
1878         { 0x00002aaa, 0x0000000a, 0x00003333, 0x00000003, 0x0000789a, 0x00000005 },
1879         { 0x00005a5a, 0x00000005, 0x00002a6a, 0x0000000a, 0x00001bcd, 0x00000003 },
1880         { 0x00002a5a, 0x0000000a, 0x000033c3, 0x00000003, 0x00002ef1, 0x00000005 }
1881         };
1882         int chan;
1883
1884         for (chan = 0; chan < 4; chan++) {
1885                 int i;
1886
1887                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
1888                              (chan * 0x2000) | 0x0200);
1889                 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0002);
1890
1891                 for (i = 0; i < 6; i++)
1892                         tg3_writephy(tp, MII_TG3_DSP_RW_PORT,
1893                                      test_pat[chan][i]);
1894
1895                 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0202);
1896                 if (tg3_wait_macro_done(tp)) {
1897                         *resetp = 1;
1898                         return -EBUSY;
1899                 }
1900
1901                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
1902                              (chan * 0x2000) | 0x0200);
1903                 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0082);
1904                 if (tg3_wait_macro_done(tp)) {
1905                         *resetp = 1;
1906                         return -EBUSY;
1907                 }
1908
1909                 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0802);
1910                 if (tg3_wait_macro_done(tp)) {
1911                         *resetp = 1;
1912                         return -EBUSY;
1913                 }
1914
1915                 for (i = 0; i < 6; i += 2) {
1916                         u32 low, high;
1917
1918                         if (tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &low) ||
1919                             tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &high) ||
1920                             tg3_wait_macro_done(tp)) {
1921                                 *resetp = 1;
1922                                 return -EBUSY;
1923                         }
1924                         low &= 0x7fff;
1925                         high &= 0x000f;
1926                         if (low != test_pat[chan][i] ||
1927                             high != test_pat[chan][i+1]) {
1928                                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000b);
1929                                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x4001);
1930                                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x4005);
1931
1932                                 return -EBUSY;
1933                         }
1934                 }
1935         }
1936
1937         return 0;
1938 }
1939
1940 static int tg3_phy_reset_chanpat(struct tg3 *tp)
1941 {
1942         int chan;
1943
1944         for (chan = 0; chan < 4; chan++) {
1945                 int i;
1946
1947                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
1948                              (chan * 0x2000) | 0x0200);
1949                 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0002);
1950                 for (i = 0; i < 6; i++)
1951                         tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x000);
1952                 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0202);
1953                 if (tg3_wait_macro_done(tp))
1954                         return -EBUSY;
1955         }
1956
1957         return 0;
1958 }
1959
1960 static int tg3_phy_reset_5703_4_5(struct tg3 *tp)
1961 {
1962         u32 reg32, phy9_orig;
1963         int retries, do_phy_reset, err;
1964
1965         retries = 10;
1966         do_phy_reset = 1;
1967         do {
1968                 if (do_phy_reset) {
1969                         err = tg3_bmcr_reset(tp);
1970                         if (err)
1971                                 return err;
1972                         do_phy_reset = 0;
1973                 }
1974
1975                 /* Disable transmitter and interrupt.  */
1976                 if (tg3_readphy(tp, MII_TG3_EXT_CTRL, &reg32))
1977                         continue;
1978
1979                 reg32 |= 0x3000;
1980                 tg3_writephy(tp, MII_TG3_EXT_CTRL, reg32);
1981
1982                 /* Set full-duplex, 1000 mbps.  */
1983                 tg3_writephy(tp, MII_BMCR,
1984                              BMCR_FULLDPLX | TG3_BMCR_SPEED1000);
1985
1986                 /* Set to master mode.  */
1987                 if (tg3_readphy(tp, MII_TG3_CTRL, &phy9_orig))
1988                         continue;
1989
1990                 tg3_writephy(tp, MII_TG3_CTRL,
1991                              (MII_TG3_CTRL_AS_MASTER |
1992                               MII_TG3_CTRL_ENABLE_AS_MASTER));
1993
1994                 err = TG3_PHY_AUXCTL_SMDSP_ENABLE(tp);
1995                 if (err)
1996                         return err;
1997
1998                 /* Block the PHY control access.  */
1999                 tg3_phydsp_write(tp, 0x8005, 0x0800);
2000
2001                 err = tg3_phy_write_and_check_testpat(tp, &do_phy_reset);
2002                 if (!err)
2003                         break;
2004         } while (--retries);
2005
2006         err = tg3_phy_reset_chanpat(tp);
2007         if (err)
2008                 return err;
2009
2010         tg3_phydsp_write(tp, 0x8005, 0x0000);
2011
2012         tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8200);
2013         tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0000);
2014
2015         TG3_PHY_AUXCTL_SMDSP_DISABLE(tp);
2016
2017         tg3_writephy(tp, MII_TG3_CTRL, phy9_orig);
2018
2019         if (!tg3_readphy(tp, MII_TG3_EXT_CTRL, &reg32)) {
2020                 reg32 &= ~0x3000;
2021                 tg3_writephy(tp, MII_TG3_EXT_CTRL, reg32);
2022         } else if (!err)
2023                 err = -EBUSY;
2024
2025         return err;
2026 }
2027
2028 /* This will reset the tigon3 PHY if there is no valid
2029  * link unless the FORCE argument is non-zero.
2030  */
2031 static int tg3_phy_reset(struct tg3 *tp)
2032 {
2033         u32 val, cpmuctrl;
2034         int err;
2035
2036         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
2037                 val = tr32(GRC_MISC_CFG);
2038                 tw32_f(GRC_MISC_CFG, val & ~GRC_MISC_CFG_EPHY_IDDQ);
2039                 udelay(40);
2040         }
2041         err  = tg3_readphy(tp, MII_BMSR, &val);
2042         err |= tg3_readphy(tp, MII_BMSR, &val);
2043         if (err != 0)
2044                 return -EBUSY;
2045
2046         if (netif_running(tp->dev) && netif_carrier_ok(tp->dev)) {
2047                 netif_carrier_off(tp->dev);
2048                 tg3_link_report(tp);
2049         }
2050
2051         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
2052             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
2053             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
2054                 err = tg3_phy_reset_5703_4_5(tp);
2055                 if (err)
2056                         return err;
2057                 goto out;
2058         }
2059
2060         cpmuctrl = 0;
2061         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 &&
2062             GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5784_AX) {
2063                 cpmuctrl = tr32(TG3_CPMU_CTRL);
2064                 if (cpmuctrl & CPMU_CTRL_GPHY_10MB_RXONLY)
2065                         tw32(TG3_CPMU_CTRL,
2066                              cpmuctrl & ~CPMU_CTRL_GPHY_10MB_RXONLY);
2067         }
2068
2069         err = tg3_bmcr_reset(tp);
2070         if (err)
2071                 return err;
2072
2073         if (cpmuctrl & CPMU_CTRL_GPHY_10MB_RXONLY) {
2074                 val = MII_TG3_DSP_EXP8_AEDW | MII_TG3_DSP_EXP8_REJ2MHz;
2075                 tg3_phydsp_write(tp, MII_TG3_DSP_EXP8, val);
2076
2077                 tw32(TG3_CPMU_CTRL, cpmuctrl);
2078         }
2079
2080         if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5784_AX ||
2081             GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5761_AX) {
2082                 val = tr32(TG3_CPMU_LSPD_1000MB_CLK);
2083                 if ((val & CPMU_LSPD_1000MB_MACCLK_MASK) ==
2084                     CPMU_LSPD_1000MB_MACCLK_12_5) {
2085                         val &= ~CPMU_LSPD_1000MB_MACCLK_MASK;
2086                         udelay(40);
2087                         tw32_f(TG3_CPMU_LSPD_1000MB_CLK, val);
2088                 }
2089         }
2090
2091         if (tg3_flag(tp, 5717_PLUS) &&
2092             (tp->phy_flags & TG3_PHYFLG_MII_SERDES))
2093                 return 0;
2094
2095         tg3_phy_apply_otp(tp);
2096
2097         if (tp->phy_flags & TG3_PHYFLG_ENABLE_APD)
2098                 tg3_phy_toggle_apd(tp, true);
2099         else
2100                 tg3_phy_toggle_apd(tp, false);
2101
2102 out:
2103         if ((tp->phy_flags & TG3_PHYFLG_ADC_BUG) &&
2104             !TG3_PHY_AUXCTL_SMDSP_ENABLE(tp)) {
2105                 tg3_phydsp_write(tp, 0x201f, 0x2aaa);
2106                 tg3_phydsp_write(tp, 0x000a, 0x0323);
2107                 TG3_PHY_AUXCTL_SMDSP_DISABLE(tp);
2108         }
2109
2110         if (tp->phy_flags & TG3_PHYFLG_5704_A0_BUG) {
2111                 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8d68);
2112                 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8d68);
2113         }
2114
2115         if (tp->phy_flags & TG3_PHYFLG_BER_BUG) {
2116                 if (!TG3_PHY_AUXCTL_SMDSP_ENABLE(tp)) {
2117                         tg3_phydsp_write(tp, 0x000a, 0x310b);
2118                         tg3_phydsp_write(tp, 0x201f, 0x9506);
2119                         tg3_phydsp_write(tp, 0x401f, 0x14e2);
2120                         TG3_PHY_AUXCTL_SMDSP_DISABLE(tp);
2121                 }
2122         } else if (tp->phy_flags & TG3_PHYFLG_JITTER_BUG) {
2123                 if (!TG3_PHY_AUXCTL_SMDSP_ENABLE(tp)) {
2124                         tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000a);
2125                         if (tp->phy_flags & TG3_PHYFLG_ADJUST_TRIM) {
2126                                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x110b);
2127                                 tg3_writephy(tp, MII_TG3_TEST1,
2128                                              MII_TG3_TEST1_TRIM_EN | 0x4);
2129                         } else
2130                                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x010b);
2131
2132                         TG3_PHY_AUXCTL_SMDSP_DISABLE(tp);
2133                 }
2134         }
2135
2136         /* Set Extended packet length bit (bit 14) on all chips that */
2137         /* support jumbo frames */
2138         if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) {
2139                 /* Cannot do read-modify-write on 5401 */
2140                 tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_AUXCTL, 0x4c20);
2141         } else if (tg3_flag(tp, JUMBO_CAPABLE)) {
2142                 /* Set bit 14 with read-modify-write to preserve other bits */
2143                 err = tg3_phy_auxctl_read(tp,
2144                                           MII_TG3_AUXCTL_SHDWSEL_AUXCTL, &val);
2145                 if (!err)
2146                         tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_AUXCTL,
2147                                            val | MII_TG3_AUXCTL_ACTL_EXTPKTLEN);
2148         }
2149
2150         /* Set phy register 0x10 bit 0 to high fifo elasticity to support
2151          * jumbo frames transmission.
2152          */
2153         if (tg3_flag(tp, JUMBO_CAPABLE)) {
2154                 if (!tg3_readphy(tp, MII_TG3_EXT_CTRL, &val))
2155                         tg3_writephy(tp, MII_TG3_EXT_CTRL,
2156                                      val | MII_TG3_EXT_CTRL_FIFO_ELASTIC);
2157         }
2158
2159         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
2160                 /* adjust output voltage */
2161                 tg3_writephy(tp, MII_TG3_FET_PTEST, 0x12);
2162         }
2163
2164         tg3_phy_toggle_automdix(tp, 1);
2165         tg3_phy_set_wirespeed(tp);
2166         return 0;
2167 }
2168
2169 static void tg3_frob_aux_power(struct tg3 *tp)
2170 {
2171         bool need_vaux = false;
2172
2173         /* The GPIOs do something completely different on 57765. */
2174         if (!tg3_flag(tp, IS_NIC) ||
2175             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
2176             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765)
2177                 return;
2178
2179         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
2180              GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714 ||
2181              GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
2182              GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) &&
2183             tp->pdev_peer != tp->pdev) {
2184                 struct net_device *dev_peer;
2185
2186                 dev_peer = pci_get_drvdata(tp->pdev_peer);
2187
2188                 /* remove_one() may have been run on the peer. */
2189                 if (dev_peer) {
2190                         struct tg3 *tp_peer = netdev_priv(dev_peer);
2191
2192                         if (tg3_flag(tp_peer, INIT_COMPLETE))
2193                                 return;
2194
2195                         if (tg3_flag(tp_peer, WOL_ENABLE) ||
2196                             tg3_flag(tp_peer, ENABLE_ASF))
2197                                 need_vaux = true;
2198                 }
2199         }
2200
2201         if (tg3_flag(tp, WOL_ENABLE) || tg3_flag(tp, ENABLE_ASF))
2202                 need_vaux = true;
2203
2204         if (need_vaux) {
2205                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
2206                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
2207                         tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
2208                                     (GRC_LCLCTRL_GPIO_OE0 |
2209                                      GRC_LCLCTRL_GPIO_OE1 |
2210                                      GRC_LCLCTRL_GPIO_OE2 |
2211                                      GRC_LCLCTRL_GPIO_OUTPUT0 |
2212                                      GRC_LCLCTRL_GPIO_OUTPUT1),
2213                                     100);
2214                 } else if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761 ||
2215                            tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761S) {
2216                         /* The 5761 non-e device swaps GPIO 0 and GPIO 2. */
2217                         u32 grc_local_ctrl = GRC_LCLCTRL_GPIO_OE0 |
2218                                              GRC_LCLCTRL_GPIO_OE1 |
2219                                              GRC_LCLCTRL_GPIO_OE2 |
2220                                              GRC_LCLCTRL_GPIO_OUTPUT0 |
2221                                              GRC_LCLCTRL_GPIO_OUTPUT1 |
2222                                              tp->grc_local_ctrl;
2223                         tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl, 100);
2224
2225                         grc_local_ctrl |= GRC_LCLCTRL_GPIO_OUTPUT2;
2226                         tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl, 100);
2227
2228                         grc_local_ctrl &= ~GRC_LCLCTRL_GPIO_OUTPUT0;
2229                         tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl, 100);
2230                 } else {
2231                         u32 no_gpio2;
2232                         u32 grc_local_ctrl = 0;
2233
2234                         /* Workaround to prevent overdrawing Amps. */
2235                         if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
2236                             ASIC_REV_5714) {
2237                                 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE3;
2238                                 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
2239                                             grc_local_ctrl, 100);
2240                         }
2241
2242                         /* On 5753 and variants, GPIO2 cannot be used. */
2243                         no_gpio2 = tp->nic_sram_data_cfg &
2244                                     NIC_SRAM_DATA_CFG_NO_GPIO2;
2245
2246                         grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE0 |
2247                                          GRC_LCLCTRL_GPIO_OE1 |
2248                                          GRC_LCLCTRL_GPIO_OE2 |
2249                                          GRC_LCLCTRL_GPIO_OUTPUT1 |
2250                                          GRC_LCLCTRL_GPIO_OUTPUT2;
2251                         if (no_gpio2) {
2252                                 grc_local_ctrl &= ~(GRC_LCLCTRL_GPIO_OE2 |
2253                                                     GRC_LCLCTRL_GPIO_OUTPUT2);
2254                         }
2255                         tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
2256                                                     grc_local_ctrl, 100);
2257
2258                         grc_local_ctrl |= GRC_LCLCTRL_GPIO_OUTPUT0;
2259
2260                         tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
2261                                                     grc_local_ctrl, 100);
2262
2263                         if (!no_gpio2) {
2264                                 grc_local_ctrl &= ~GRC_LCLCTRL_GPIO_OUTPUT2;
2265                                 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
2266                                             grc_local_ctrl, 100);
2267                         }
2268                 }
2269         } else {
2270                 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
2271                     GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701) {
2272                         tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
2273                                     (GRC_LCLCTRL_GPIO_OE1 |
2274                                      GRC_LCLCTRL_GPIO_OUTPUT1), 100);
2275
2276                         tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
2277                                     GRC_LCLCTRL_GPIO_OE1, 100);
2278
2279                         tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
2280                                     (GRC_LCLCTRL_GPIO_OE1 |
2281                                      GRC_LCLCTRL_GPIO_OUTPUT1), 100);
2282                 }
2283         }
2284 }
2285
2286 static int tg3_5700_link_polarity(struct tg3 *tp, u32 speed)
2287 {
2288         if (tp->led_ctrl == LED_CTRL_MODE_PHY_2)
2289                 return 1;
2290         else if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5411) {
2291                 if (speed != SPEED_10)
2292                         return 1;
2293         } else if (speed == SPEED_10)
2294                 return 1;
2295
2296         return 0;
2297 }
2298
2299 static int tg3_setup_phy(struct tg3 *, int);
2300
2301 #define RESET_KIND_SHUTDOWN     0
2302 #define RESET_KIND_INIT         1
2303 #define RESET_KIND_SUSPEND      2
2304
2305 static void tg3_write_sig_post_reset(struct tg3 *, int);
2306 static int tg3_halt_cpu(struct tg3 *, u32);
2307
2308 static void tg3_power_down_phy(struct tg3 *tp, bool do_low_power)
2309 {
2310         u32 val;
2311
2312         if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
2313                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
2314                         u32 sg_dig_ctrl = tr32(SG_DIG_CTRL);
2315                         u32 serdes_cfg = tr32(MAC_SERDES_CFG);
2316
2317                         sg_dig_ctrl |=
2318                                 SG_DIG_USING_HW_AUTONEG | SG_DIG_SOFT_RESET;
2319                         tw32(SG_DIG_CTRL, sg_dig_ctrl);
2320                         tw32(MAC_SERDES_CFG, serdes_cfg | (1 << 15));
2321                 }
2322                 return;
2323         }
2324
2325         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
2326                 tg3_bmcr_reset(tp);
2327                 val = tr32(GRC_MISC_CFG);
2328                 tw32_f(GRC_MISC_CFG, val | GRC_MISC_CFG_EPHY_IDDQ);
2329                 udelay(40);
2330                 return;
2331         } else if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
2332                 u32 phytest;
2333                 if (!tg3_readphy(tp, MII_TG3_FET_TEST, &phytest)) {
2334                         u32 phy;
2335
2336                         tg3_writephy(tp, MII_ADVERTISE, 0);
2337                         tg3_writephy(tp, MII_BMCR,
2338                                      BMCR_ANENABLE | BMCR_ANRESTART);
2339
2340                         tg3_writephy(tp, MII_TG3_FET_TEST,
2341                                      phytest | MII_TG3_FET_SHADOW_EN);
2342                         if (!tg3_readphy(tp, MII_TG3_FET_SHDW_AUXMODE4, &phy)) {
2343                                 phy |= MII_TG3_FET_SHDW_AUXMODE4_SBPD;
2344                                 tg3_writephy(tp,
2345                                              MII_TG3_FET_SHDW_AUXMODE4,
2346                                              phy);
2347                         }
2348                         tg3_writephy(tp, MII_TG3_FET_TEST, phytest);
2349                 }
2350                 return;
2351         } else if (do_low_power) {
2352                 tg3_writephy(tp, MII_TG3_EXT_CTRL,
2353                              MII_TG3_EXT_CTRL_FORCE_LED_OFF);
2354
2355                 val = MII_TG3_AUXCTL_PCTL_100TX_LPWR |
2356                       MII_TG3_AUXCTL_PCTL_SPR_ISOLATE |
2357                       MII_TG3_AUXCTL_PCTL_VREG_11V;
2358                 tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_PWRCTL, val);
2359         }
2360
2361         /* The PHY should not be powered down on some chips because
2362          * of bugs.
2363          */
2364         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
2365             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
2366             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780 &&
2367              (tp->phy_flags & TG3_PHYFLG_MII_SERDES)))
2368                 return;
2369
2370         if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5784_AX ||
2371             GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5761_AX) {
2372                 val = tr32(TG3_CPMU_LSPD_1000MB_CLK);
2373                 val &= ~CPMU_LSPD_1000MB_MACCLK_MASK;
2374                 val |= CPMU_LSPD_1000MB_MACCLK_12_5;
2375                 tw32_f(TG3_CPMU_LSPD_1000MB_CLK, val);
2376         }
2377
2378         tg3_writephy(tp, MII_BMCR, BMCR_PDOWN);
2379 }
2380
2381 /* tp->lock is held. */
2382 static int tg3_nvram_lock(struct tg3 *tp)
2383 {
2384         if (tg3_flag(tp, NVRAM)) {
2385                 int i;
2386
2387                 if (tp->nvram_lock_cnt == 0) {
2388                         tw32(NVRAM_SWARB, SWARB_REQ_SET1);
2389                         for (i = 0; i < 8000; i++) {
2390                                 if (tr32(NVRAM_SWARB) & SWARB_GNT1)
2391                                         break;
2392                                 udelay(20);
2393                         }
2394                         if (i == 8000) {
2395                                 tw32(NVRAM_SWARB, SWARB_REQ_CLR1);
2396                                 return -ENODEV;
2397                         }
2398                 }
2399                 tp->nvram_lock_cnt++;
2400         }
2401         return 0;
2402 }
2403
2404 /* tp->lock is held. */
2405 static void tg3_nvram_unlock(struct tg3 *tp)
2406 {
2407         if (tg3_flag(tp, NVRAM)) {
2408                 if (tp->nvram_lock_cnt > 0)
2409                         tp->nvram_lock_cnt--;
2410                 if (tp->nvram_lock_cnt == 0)
2411                         tw32_f(NVRAM_SWARB, SWARB_REQ_CLR1);
2412         }
2413 }
2414
2415 /* tp->lock is held. */
2416 static void tg3_enable_nvram_access(struct tg3 *tp)
2417 {
2418         if (tg3_flag(tp, 5750_PLUS) && !tg3_flag(tp, PROTECTED_NVRAM)) {
2419                 u32 nvaccess = tr32(NVRAM_ACCESS);
2420
2421                 tw32(NVRAM_ACCESS, nvaccess | ACCESS_ENABLE);
2422         }
2423 }
2424
2425 /* tp->lock is held. */
2426 static void tg3_disable_nvram_access(struct tg3 *tp)
2427 {
2428         if (tg3_flag(tp, 5750_PLUS) && !tg3_flag(tp, PROTECTED_NVRAM)) {
2429                 u32 nvaccess = tr32(NVRAM_ACCESS);
2430
2431                 tw32(NVRAM_ACCESS, nvaccess & ~ACCESS_ENABLE);
2432         }
2433 }
2434
2435 static int tg3_nvram_read_using_eeprom(struct tg3 *tp,
2436                                         u32 offset, u32 *val)
2437 {
2438         u32 tmp;
2439         int i;
2440
2441         if (offset > EEPROM_ADDR_ADDR_MASK || (offset % 4) != 0)
2442                 return -EINVAL;
2443
2444         tmp = tr32(GRC_EEPROM_ADDR) & ~(EEPROM_ADDR_ADDR_MASK |
2445                                         EEPROM_ADDR_DEVID_MASK |
2446                                         EEPROM_ADDR_READ);
2447         tw32(GRC_EEPROM_ADDR,
2448              tmp |
2449              (0 << EEPROM_ADDR_DEVID_SHIFT) |
2450              ((offset << EEPROM_ADDR_ADDR_SHIFT) &
2451               EEPROM_ADDR_ADDR_MASK) |
2452              EEPROM_ADDR_READ | EEPROM_ADDR_START);
2453
2454         for (i = 0; i < 1000; i++) {
2455                 tmp = tr32(GRC_EEPROM_ADDR);
2456
2457                 if (tmp & EEPROM_ADDR_COMPLETE)
2458                         break;
2459                 msleep(1);
2460         }
2461         if (!(tmp & EEPROM_ADDR_COMPLETE))
2462                 return -EBUSY;
2463
2464         tmp = tr32(GRC_EEPROM_DATA);
2465
2466         /*
2467          * The data will always be opposite the native endian
2468          * format.  Perform a blind byteswap to compensate.
2469          */
2470         *val = swab32(tmp);
2471
2472         return 0;
2473 }
2474
2475 #define NVRAM_CMD_TIMEOUT 10000
2476
2477 static int tg3_nvram_exec_cmd(struct tg3 *tp, u32 nvram_cmd)
2478 {
2479         int i;
2480
2481         tw32(NVRAM_CMD, nvram_cmd);
2482         for (i = 0; i < NVRAM_CMD_TIMEOUT; i++) {
2483                 udelay(10);
2484                 if (tr32(NVRAM_CMD) & NVRAM_CMD_DONE) {
2485                         udelay(10);
2486                         break;
2487                 }
2488         }
2489
2490         if (i == NVRAM_CMD_TIMEOUT)
2491                 return -EBUSY;
2492
2493         return 0;
2494 }
2495
2496 static u32 tg3_nvram_phys_addr(struct tg3 *tp, u32 addr)
2497 {
2498         if (tg3_flag(tp, NVRAM) &&
2499             tg3_flag(tp, NVRAM_BUFFERED) &&
2500             tg3_flag(tp, FLASH) &&
2501             !tg3_flag(tp, NO_NVRAM_ADDR_TRANS) &&
2502             (tp->nvram_jedecnum == JEDEC_ATMEL))
2503
2504                 addr = ((addr / tp->nvram_pagesize) <<
2505                         ATMEL_AT45DB0X1B_PAGE_POS) +
2506                        (addr % tp->nvram_pagesize);
2507
2508         return addr;
2509 }
2510
2511 static u32 tg3_nvram_logical_addr(struct tg3 *tp, u32 addr)
2512 {
2513         if (tg3_flag(tp, NVRAM) &&
2514             tg3_flag(tp, NVRAM_BUFFERED) &&
2515             tg3_flag(tp, FLASH) &&
2516             !tg3_flag(tp, NO_NVRAM_ADDR_TRANS) &&
2517             (tp->nvram_jedecnum == JEDEC_ATMEL))
2518
2519                 addr = ((addr >> ATMEL_AT45DB0X1B_PAGE_POS) *
2520                         tp->nvram_pagesize) +
2521                        (addr & ((1 << ATMEL_AT45DB0X1B_PAGE_POS) - 1));
2522
2523         return addr;
2524 }
2525
2526 /* NOTE: Data read in from NVRAM is byteswapped according to
2527  * the byteswapping settings for all other register accesses.
2528  * tg3 devices are BE devices, so on a BE machine, the data
2529  * returned will be exactly as it is seen in NVRAM.  On a LE
2530  * machine, the 32-bit value will be byteswapped.
2531  */
2532 static int tg3_nvram_read(struct tg3 *tp, u32 offset, u32 *val)
2533 {
2534         int ret;
2535
2536         if (!tg3_flag(tp, NVRAM))
2537                 return tg3_nvram_read_using_eeprom(tp, offset, val);
2538
2539         offset = tg3_nvram_phys_addr(tp, offset);
2540
2541         if (offset > NVRAM_ADDR_MSK)
2542                 return -EINVAL;
2543
2544         ret = tg3_nvram_lock(tp);
2545         if (ret)
2546                 return ret;
2547
2548         tg3_enable_nvram_access(tp);
2549
2550         tw32(NVRAM_ADDR, offset);
2551         ret = tg3_nvram_exec_cmd(tp, NVRAM_CMD_RD | NVRAM_CMD_GO |
2552                 NVRAM_CMD_FIRST | NVRAM_CMD_LAST | NVRAM_CMD_DONE);
2553
2554         if (ret == 0)
2555                 *val = tr32(NVRAM_RDDATA);
2556
2557         tg3_disable_nvram_access(tp);
2558
2559         tg3_nvram_unlock(tp);
2560
2561         return ret;
2562 }
2563
2564 /* Ensures NVRAM data is in bytestream format. */
2565 static int tg3_nvram_read_be32(struct tg3 *tp, u32 offset, __be32 *val)
2566 {
2567         u32 v;
2568         int res = tg3_nvram_read(tp, offset, &v);
2569         if (!res)
2570                 *val = cpu_to_be32(v);
2571         return res;
2572 }
2573
2574 /* tp->lock is held. */
2575 static void __tg3_set_mac_addr(struct tg3 *tp, int skip_mac_1)
2576 {
2577         u32 addr_high, addr_low;
2578         int i;
2579
2580         addr_high = ((tp->dev->dev_addr[0] << 8) |
2581                      tp->dev->dev_addr[1]);
2582         addr_low = ((tp->dev->dev_addr[2] << 24) |
2583                     (tp->dev->dev_addr[3] << 16) |
2584                     (tp->dev->dev_addr[4] <<  8) |
2585                     (tp->dev->dev_addr[5] <<  0));
2586         for (i = 0; i < 4; i++) {
2587                 if (i == 1 && skip_mac_1)
2588                         continue;
2589                 tw32(MAC_ADDR_0_HIGH + (i * 8), addr_high);
2590                 tw32(MAC_ADDR_0_LOW + (i * 8), addr_low);
2591         }
2592
2593         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
2594             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
2595                 for (i = 0; i < 12; i++) {
2596                         tw32(MAC_EXTADDR_0_HIGH + (i * 8), addr_high);
2597                         tw32(MAC_EXTADDR_0_LOW + (i * 8), addr_low);
2598                 }
2599         }
2600
2601         addr_high = (tp->dev->dev_addr[0] +
2602                      tp->dev->dev_addr[1] +
2603                      tp->dev->dev_addr[2] +
2604                      tp->dev->dev_addr[3] +
2605                      tp->dev->dev_addr[4] +
2606                      tp->dev->dev_addr[5]) &
2607                 TX_BACKOFF_SEED_MASK;
2608         tw32(MAC_TX_BACKOFF_SEED, addr_high);
2609 }
2610
2611 static void tg3_enable_register_access(struct tg3 *tp)
2612 {
2613         /*
2614          * Make sure register accesses (indirect or otherwise) will function
2615          * correctly.
2616          */
2617         pci_write_config_dword(tp->pdev,
2618                                TG3PCI_MISC_HOST_CTRL, tp->misc_host_ctrl);
2619 }
2620
2621 static int tg3_power_up(struct tg3 *tp)
2622 {
2623         tg3_enable_register_access(tp);
2624
2625         pci_set_power_state(tp->pdev, PCI_D0);
2626
2627         /* Switch out of Vaux if it is a NIC */
2628         if (tg3_flag(tp, IS_NIC))
2629                 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl, 100);
2630
2631         return 0;
2632 }
2633
2634 static int tg3_power_down_prepare(struct tg3 *tp)
2635 {
2636         u32 misc_host_ctrl;
2637         bool device_should_wake, do_low_power;
2638
2639         tg3_enable_register_access(tp);
2640
2641         /* Restore the CLKREQ setting. */
2642         if (tg3_flag(tp, CLKREQ_BUG)) {
2643                 u16 lnkctl;
2644
2645                 pci_read_config_word(tp->pdev,
2646                                      tp->pcie_cap + PCI_EXP_LNKCTL,
2647                                      &lnkctl);
2648                 lnkctl |= PCI_EXP_LNKCTL_CLKREQ_EN;
2649                 pci_write_config_word(tp->pdev,
2650                                       tp->pcie_cap + PCI_EXP_LNKCTL,
2651                                       lnkctl);
2652         }
2653
2654         misc_host_ctrl = tr32(TG3PCI_MISC_HOST_CTRL);
2655         tw32(TG3PCI_MISC_HOST_CTRL,
2656              misc_host_ctrl | MISC_HOST_CTRL_MASK_PCI_INT);
2657
2658         device_should_wake = device_may_wakeup(&tp->pdev->dev) &&
2659                              tg3_flag(tp, WOL_ENABLE);
2660
2661         if (tg3_flag(tp, USE_PHYLIB)) {
2662                 do_low_power = false;
2663                 if ((tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) &&
2664                     !(tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)) {
2665                         struct phy_device *phydev;
2666                         u32 phyid, advertising;
2667
2668                         phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
2669
2670                         tp->phy_flags |= TG3_PHYFLG_IS_LOW_POWER;
2671
2672                         tp->link_config.orig_speed = phydev->speed;
2673                         tp->link_config.orig_duplex = phydev->duplex;
2674                         tp->link_config.orig_autoneg = phydev->autoneg;
2675                         tp->link_config.orig_advertising = phydev->advertising;
2676
2677                         advertising = ADVERTISED_TP |
2678                                       ADVERTISED_Pause |
2679                                       ADVERTISED_Autoneg |
2680                                       ADVERTISED_10baseT_Half;
2681
2682                         if (tg3_flag(tp, ENABLE_ASF) || device_should_wake) {
2683                                 if (tg3_flag(tp, WOL_SPEED_100MB))
2684                                         advertising |=
2685                                                 ADVERTISED_100baseT_Half |
2686                                                 ADVERTISED_100baseT_Full |
2687                                                 ADVERTISED_10baseT_Full;
2688                                 else
2689                                         advertising |= ADVERTISED_10baseT_Full;
2690                         }
2691
2692                         phydev->advertising = advertising;
2693
2694                         phy_start_aneg(phydev);
2695
2696                         phyid = phydev->drv->phy_id & phydev->drv->phy_id_mask;
2697                         if (phyid != PHY_ID_BCMAC131) {
2698                                 phyid &= PHY_BCM_OUI_MASK;
2699                                 if (phyid == PHY_BCM_OUI_1 ||
2700                                     phyid == PHY_BCM_OUI_2 ||
2701                                     phyid == PHY_BCM_OUI_3)
2702                                         do_low_power = true;
2703                         }
2704                 }
2705         } else {
2706                 do_low_power = true;
2707
2708                 if (!(tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)) {
2709                         tp->phy_flags |= TG3_PHYFLG_IS_LOW_POWER;
2710                         tp->link_config.orig_speed = tp->link_config.speed;
2711                         tp->link_config.orig_duplex = tp->link_config.duplex;
2712                         tp->link_config.orig_autoneg = tp->link_config.autoneg;
2713                 }
2714
2715                 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES)) {
2716                         tp->link_config.speed = SPEED_10;
2717                         tp->link_config.duplex = DUPLEX_HALF;
2718                         tp->link_config.autoneg = AUTONEG_ENABLE;
2719                         tg3_setup_phy(tp, 0);
2720                 }
2721         }
2722
2723         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
2724                 u32 val;
2725
2726                 val = tr32(GRC_VCPU_EXT_CTRL);
2727                 tw32(GRC_VCPU_EXT_CTRL, val | GRC_VCPU_EXT_CTRL_DISABLE_WOL);
2728         } else if (!tg3_flag(tp, ENABLE_ASF)) {
2729                 int i;
2730                 u32 val;
2731
2732                 for (i = 0; i < 200; i++) {
2733                         tg3_read_mem(tp, NIC_SRAM_FW_ASF_STATUS_MBOX, &val);
2734                         if (val == ~NIC_SRAM_FIRMWARE_MBOX_MAGIC1)
2735                                 break;
2736                         msleep(1);
2737                 }
2738         }
2739         if (tg3_flag(tp, WOL_CAP))
2740                 tg3_write_mem(tp, NIC_SRAM_WOL_MBOX, WOL_SIGNATURE |
2741                                                      WOL_DRV_STATE_SHUTDOWN |
2742                                                      WOL_DRV_WOL |
2743                                                      WOL_SET_MAGIC_PKT);
2744
2745         if (device_should_wake) {
2746                 u32 mac_mode;
2747
2748                 if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES)) {
2749                         if (do_low_power &&
2750                             !(tp->phy_flags & TG3_PHYFLG_IS_FET)) {
2751                                 tg3_phy_auxctl_write(tp,
2752                                                MII_TG3_AUXCTL_SHDWSEL_PWRCTL,
2753                                                MII_TG3_AUXCTL_PCTL_WOL_EN |
2754                                                MII_TG3_AUXCTL_PCTL_100TX_LPWR |
2755                                                MII_TG3_AUXCTL_PCTL_CL_AB_TXDAC);
2756                                 udelay(40);
2757                         }
2758
2759                         if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
2760                                 mac_mode = MAC_MODE_PORT_MODE_GMII;
2761                         else
2762                                 mac_mode = MAC_MODE_PORT_MODE_MII;
2763
2764                         mac_mode |= tp->mac_mode & MAC_MODE_LINK_POLARITY;
2765                         if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
2766                             ASIC_REV_5700) {
2767                                 u32 speed = tg3_flag(tp, WOL_SPEED_100MB) ?
2768                                              SPEED_100 : SPEED_10;
2769                                 if (tg3_5700_link_polarity(tp, speed))
2770                                         mac_mode |= MAC_MODE_LINK_POLARITY;
2771                                 else
2772                                         mac_mode &= ~MAC_MODE_LINK_POLARITY;
2773                         }
2774                 } else {
2775                         mac_mode = MAC_MODE_PORT_MODE_TBI;
2776                 }
2777
2778                 if (!tg3_flag(tp, 5750_PLUS))
2779                         tw32(MAC_LED_CTRL, tp->led_ctrl);
2780
2781                 mac_mode |= MAC_MODE_MAGIC_PKT_ENABLE;
2782                 if ((tg3_flag(tp, 5705_PLUS) && !tg3_flag(tp, 5780_CLASS)) &&
2783                     (tg3_flag(tp, ENABLE_ASF) || tg3_flag(tp, ENABLE_APE)))
2784                         mac_mode |= MAC_MODE_KEEP_FRAME_IN_WOL;
2785
2786                 if (tg3_flag(tp, ENABLE_APE))
2787                         mac_mode |= MAC_MODE_APE_TX_EN |
2788                                     MAC_MODE_APE_RX_EN |
2789                                     MAC_MODE_TDE_ENABLE;
2790
2791                 tw32_f(MAC_MODE, mac_mode);
2792                 udelay(100);
2793
2794                 tw32_f(MAC_RX_MODE, RX_MODE_ENABLE);
2795                 udelay(10);
2796         }
2797
2798         if (!tg3_flag(tp, WOL_SPEED_100MB) &&
2799             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
2800              GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)) {
2801                 u32 base_val;
2802
2803                 base_val = tp->pci_clock_ctrl;
2804                 base_val |= (CLOCK_CTRL_RXCLK_DISABLE |
2805                              CLOCK_CTRL_TXCLK_DISABLE);
2806
2807                 tw32_wait_f(TG3PCI_CLOCK_CTRL, base_val | CLOCK_CTRL_ALTCLK |
2808                             CLOCK_CTRL_PWRDOWN_PLL133, 40);
2809         } else if (tg3_flag(tp, 5780_CLASS) ||
2810                    tg3_flag(tp, CPMU_PRESENT) ||
2811                    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
2812                 /* do nothing */
2813         } else if (!(tg3_flag(tp, 5750_PLUS) && tg3_flag(tp, ENABLE_ASF))) {
2814                 u32 newbits1, newbits2;
2815
2816                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
2817                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
2818                         newbits1 = (CLOCK_CTRL_RXCLK_DISABLE |
2819                                     CLOCK_CTRL_TXCLK_DISABLE |
2820                                     CLOCK_CTRL_ALTCLK);
2821                         newbits2 = newbits1 | CLOCK_CTRL_44MHZ_CORE;
2822                 } else if (tg3_flag(tp, 5705_PLUS)) {
2823                         newbits1 = CLOCK_CTRL_625_CORE;
2824                         newbits2 = newbits1 | CLOCK_CTRL_ALTCLK;
2825                 } else {
2826                         newbits1 = CLOCK_CTRL_ALTCLK;
2827                         newbits2 = newbits1 | CLOCK_CTRL_44MHZ_CORE;
2828                 }
2829
2830                 tw32_wait_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl | newbits1,
2831                             40);
2832
2833                 tw32_wait_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl | newbits2,
2834                             40);
2835
2836                 if (!tg3_flag(tp, 5705_PLUS)) {
2837                         u32 newbits3;
2838
2839                         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
2840                             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
2841                                 newbits3 = (CLOCK_CTRL_RXCLK_DISABLE |
2842                                             CLOCK_CTRL_TXCLK_DISABLE |
2843                                             CLOCK_CTRL_44MHZ_CORE);
2844                         } else {
2845                                 newbits3 = CLOCK_CTRL_44MHZ_CORE;
2846                         }
2847
2848                         tw32_wait_f(TG3PCI_CLOCK_CTRL,
2849                                     tp->pci_clock_ctrl | newbits3, 40);
2850                 }
2851         }
2852
2853         if (!(device_should_wake) && !tg3_flag(tp, ENABLE_ASF))
2854                 tg3_power_down_phy(tp, do_low_power);
2855
2856         tg3_frob_aux_power(tp);
2857
2858         /* Workaround for unstable PLL clock */
2859         if ((GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_AX) ||
2860             (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_BX)) {
2861                 u32 val = tr32(0x7d00);
2862
2863                 val &= ~((1 << 16) | (1 << 4) | (1 << 2) | (1 << 1) | 1);
2864                 tw32(0x7d00, val);
2865                 if (!tg3_flag(tp, ENABLE_ASF)) {
2866                         int err;
2867
2868                         err = tg3_nvram_lock(tp);
2869                         tg3_halt_cpu(tp, RX_CPU_BASE);
2870                         if (!err)
2871                                 tg3_nvram_unlock(tp);
2872                 }
2873         }
2874
2875         tg3_write_sig_post_reset(tp, RESET_KIND_SHUTDOWN);
2876
2877         return 0;
2878 }
2879
2880 static void tg3_power_down(struct tg3 *tp)
2881 {
2882         tg3_power_down_prepare(tp);
2883
2884         pci_wake_from_d3(tp->pdev, tg3_flag(tp, WOL_ENABLE));
2885         pci_set_power_state(tp->pdev, PCI_D3hot);
2886 }
2887
2888 static void tg3_aux_stat_to_speed_duplex(struct tg3 *tp, u32 val, u16 *speed, u8 *duplex)
2889 {
2890         switch (val & MII_TG3_AUX_STAT_SPDMASK) {
2891         case MII_TG3_AUX_STAT_10HALF:
2892                 *speed = SPEED_10;
2893                 *duplex = DUPLEX_HALF;
2894                 break;
2895
2896         case MII_TG3_AUX_STAT_10FULL:
2897                 *speed = SPEED_10;
2898                 *duplex = DUPLEX_FULL;
2899                 break;
2900
2901         case MII_TG3_AUX_STAT_100HALF:
2902                 *speed = SPEED_100;
2903                 *duplex = DUPLEX_HALF;
2904                 break;
2905
2906         case MII_TG3_AUX_STAT_100FULL:
2907                 *speed = SPEED_100;
2908                 *duplex = DUPLEX_FULL;
2909                 break;
2910
2911         case MII_TG3_AUX_STAT_1000HALF:
2912                 *speed = SPEED_1000;
2913                 *duplex = DUPLEX_HALF;
2914                 break;
2915
2916         case MII_TG3_AUX_STAT_1000FULL:
2917                 *speed = SPEED_1000;
2918                 *duplex = DUPLEX_FULL;
2919                 break;
2920
2921         default:
2922                 if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
2923                         *speed = (val & MII_TG3_AUX_STAT_100) ? SPEED_100 :
2924                                  SPEED_10;
2925                         *duplex = (val & MII_TG3_AUX_STAT_FULL) ? DUPLEX_FULL :
2926                                   DUPLEX_HALF;
2927                         break;
2928                 }
2929                 *speed = SPEED_INVALID;
2930                 *duplex = DUPLEX_INVALID;
2931                 break;
2932         }
2933 }
2934
2935 static int tg3_phy_autoneg_cfg(struct tg3 *tp, u32 advertise, u32 flowctrl)
2936 {
2937         int err = 0;
2938         u32 val, new_adv;
2939
2940         new_adv = ADVERTISE_CSMA;
2941         if (advertise & ADVERTISED_10baseT_Half)
2942                 new_adv |= ADVERTISE_10HALF;
2943         if (advertise & ADVERTISED_10baseT_Full)
2944                 new_adv |= ADVERTISE_10FULL;
2945         if (advertise & ADVERTISED_100baseT_Half)
2946                 new_adv |= ADVERTISE_100HALF;
2947         if (advertise & ADVERTISED_100baseT_Full)
2948                 new_adv |= ADVERTISE_100FULL;
2949
2950         new_adv |= tg3_advert_flowctrl_1000T(flowctrl);
2951
2952         err = tg3_writephy(tp, MII_ADVERTISE, new_adv);
2953         if (err)
2954                 goto done;
2955
2956         if (tp->phy_flags & TG3_PHYFLG_10_100_ONLY)
2957                 goto done;
2958
2959         new_adv = 0;
2960         if (advertise & ADVERTISED_1000baseT_Half)
2961                 new_adv |= MII_TG3_CTRL_ADV_1000_HALF;
2962         if (advertise & ADVERTISED_1000baseT_Full)
2963                 new_adv |= MII_TG3_CTRL_ADV_1000_FULL;
2964
2965         if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
2966             tp->pci_chip_rev_id == CHIPREV_ID_5701_B0)
2967                 new_adv |= (MII_TG3_CTRL_AS_MASTER |
2968                             MII_TG3_CTRL_ENABLE_AS_MASTER);
2969
2970         err = tg3_writephy(tp, MII_TG3_CTRL, new_adv);
2971         if (err)
2972                 goto done;
2973
2974         if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP))
2975                 goto done;
2976
2977         tw32(TG3_CPMU_EEE_MODE,
2978              tr32(TG3_CPMU_EEE_MODE) & ~TG3_CPMU_EEEMD_LPI_ENABLE);
2979
2980         err = TG3_PHY_AUXCTL_SMDSP_ENABLE(tp);
2981         if (!err) {
2982                 u32 err2;
2983
2984                 switch (GET_ASIC_REV(tp->pci_chip_rev_id)) {
2985                 case ASIC_REV_5717:
2986                 case ASIC_REV_57765:
2987                         if (!tg3_phydsp_read(tp, MII_TG3_DSP_CH34TP2, &val))
2988                                 tg3_phydsp_write(tp, MII_TG3_DSP_CH34TP2, val |
2989                                                  MII_TG3_DSP_CH34TP2_HIBW01);
2990                         /* Fall through */
2991                 case ASIC_REV_5719:
2992                         val = MII_TG3_DSP_TAP26_ALNOKO |
2993                               MII_TG3_DSP_TAP26_RMRXSTO |
2994                               MII_TG3_DSP_TAP26_OPCSINPT;
2995                         tg3_phydsp_write(tp, MII_TG3_DSP_TAP26, val);
2996                 }
2997
2998                 val = 0;
2999                 /* Advertise 100-BaseTX EEE ability */
3000                 if (advertise & ADVERTISED_100baseT_Full)
3001                         val |= MDIO_AN_EEE_ADV_100TX;
3002                 /* Advertise 1000-BaseT EEE ability */
3003                 if (advertise & ADVERTISED_1000baseT_Full)
3004                         val |= MDIO_AN_EEE_ADV_1000T;
3005                 err = tg3_phy_cl45_write(tp, MDIO_MMD_AN, MDIO_AN_EEE_ADV, val);
3006
3007                 err2 = TG3_PHY_AUXCTL_SMDSP_DISABLE(tp);
3008                 if (!err)
3009                         err = err2;
3010         }
3011
3012 done:
3013         return err;
3014 }
3015
3016 static void tg3_phy_copper_begin(struct tg3 *tp)
3017 {
3018         u32 new_adv;
3019         int i;
3020
3021         if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) {
3022                 new_adv = ADVERTISED_10baseT_Half |
3023                           ADVERTISED_10baseT_Full;
3024                 if (tg3_flag(tp, WOL_SPEED_100MB))
3025                         new_adv |= ADVERTISED_100baseT_Half |
3026                                    ADVERTISED_100baseT_Full;
3027
3028                 tg3_phy_autoneg_cfg(tp, new_adv,
3029                                     FLOW_CTRL_TX | FLOW_CTRL_RX);
3030         } else if (tp->link_config.speed == SPEED_INVALID) {
3031                 if (tp->phy_flags & TG3_PHYFLG_10_100_ONLY)
3032                         tp->link_config.advertising &=
3033                                 ~(ADVERTISED_1000baseT_Half |
3034                                   ADVERTISED_1000baseT_Full);
3035
3036                 tg3_phy_autoneg_cfg(tp, tp->link_config.advertising,
3037                                     tp->link_config.flowctrl);
3038         } else {
3039                 /* Asking for a specific link mode. */
3040                 if (tp->link_config.speed == SPEED_1000) {
3041                         if (tp->link_config.duplex == DUPLEX_FULL)
3042                                 new_adv = ADVERTISED_1000baseT_Full;
3043                         else
3044                                 new_adv = ADVERTISED_1000baseT_Half;
3045                 } else if (tp->link_config.speed == SPEED_100) {
3046                         if (tp->link_config.duplex == DUPLEX_FULL)
3047                                 new_adv = ADVERTISED_100baseT_Full;
3048                         else
3049                                 new_adv = ADVERTISED_100baseT_Half;
3050                 } else {
3051                         if (tp->link_config.duplex == DUPLEX_FULL)
3052                                 new_adv = ADVERTISED_10baseT_Full;
3053                         else
3054                                 new_adv = ADVERTISED_10baseT_Half;
3055                 }
3056
3057                 tg3_phy_autoneg_cfg(tp, new_adv,
3058                                     tp->link_config.flowctrl);
3059         }
3060
3061         if (tp->link_config.autoneg == AUTONEG_DISABLE &&
3062             tp->link_config.speed != SPEED_INVALID) {
3063                 u32 bmcr, orig_bmcr;
3064
3065                 tp->link_config.active_speed = tp->link_config.speed;
3066                 tp->link_config.active_duplex = tp->link_config.duplex;
3067
3068                 bmcr = 0;
3069                 switch (tp->link_config.speed) {
3070                 default:
3071                 case SPEED_10:
3072                         break;
3073
3074                 case SPEED_100:
3075                         bmcr |= BMCR_SPEED100;
3076                         break;
3077
3078                 case SPEED_1000:
3079                         bmcr |= TG3_BMCR_SPEED1000;
3080                         break;
3081                 }
3082
3083                 if (tp->link_config.duplex == DUPLEX_FULL)
3084                         bmcr |= BMCR_FULLDPLX;
3085
3086                 if (!tg3_readphy(tp, MII_BMCR, &orig_bmcr) &&
3087                     (bmcr != orig_bmcr)) {
3088                         tg3_writephy(tp, MII_BMCR, BMCR_LOOPBACK);
3089                         for (i = 0; i < 1500; i++) {
3090                                 u32 tmp;
3091
3092                                 udelay(10);
3093                                 if (tg3_readphy(tp, MII_BMSR, &tmp) ||
3094                                     tg3_readphy(tp, MII_BMSR, &tmp))
3095                                         continue;
3096                                 if (!(tmp & BMSR_LSTATUS)) {
3097                                         udelay(40);
3098                                         break;
3099                                 }
3100                         }
3101                         tg3_writephy(tp, MII_BMCR, bmcr);
3102                         udelay(40);
3103                 }
3104         } else {
3105                 tg3_writephy(tp, MII_BMCR,
3106                              BMCR_ANENABLE | BMCR_ANRESTART);
3107         }
3108 }
3109
3110 static int tg3_init_5401phy_dsp(struct tg3 *tp)
3111 {
3112         int err;
3113
3114         /* Turn off tap power management. */
3115         /* Set Extended packet length bit */
3116         err = tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_AUXCTL, 0x4c20);
3117
3118         err |= tg3_phydsp_write(tp, 0x0012, 0x1804);
3119         err |= tg3_phydsp_write(tp, 0x0013, 0x1204);
3120         err |= tg3_phydsp_write(tp, 0x8006, 0x0132);
3121         err |= tg3_phydsp_write(tp, 0x8006, 0x0232);
3122         err |= tg3_phydsp_write(tp, 0x201f, 0x0a20);
3123
3124         udelay(40);
3125
3126         return err;
3127 }
3128
3129 static int tg3_copper_is_advertising_all(struct tg3 *tp, u32 mask)
3130 {
3131         u32 adv_reg, all_mask = 0;
3132
3133         if (mask & ADVERTISED_10baseT_Half)
3134                 all_mask |= ADVERTISE_10HALF;
3135         if (mask & ADVERTISED_10baseT_Full)
3136                 all_mask |= ADVERTISE_10FULL;
3137         if (mask & ADVERTISED_100baseT_Half)
3138                 all_mask |= ADVERTISE_100HALF;
3139         if (mask & ADVERTISED_100baseT_Full)
3140                 all_mask |= ADVERTISE_100FULL;
3141
3142         if (tg3_readphy(tp, MII_ADVERTISE, &adv_reg))
3143                 return 0;
3144
3145         if ((adv_reg & all_mask) != all_mask)
3146                 return 0;
3147         if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
3148                 u32 tg3_ctrl;
3149
3150                 all_mask = 0;
3151                 if (mask & ADVERTISED_1000baseT_Half)
3152                         all_mask |= ADVERTISE_1000HALF;
3153                 if (mask & ADVERTISED_1000baseT_Full)
3154                         all_mask |= ADVERTISE_1000FULL;
3155
3156                 if (tg3_readphy(tp, MII_TG3_CTRL, &tg3_ctrl))
3157                         return 0;
3158
3159                 if ((tg3_ctrl & all_mask) != all_mask)
3160                         return 0;
3161         }
3162         return 1;
3163 }
3164
3165 static int tg3_adv_1000T_flowctrl_ok(struct tg3 *tp, u32 *lcladv, u32 *rmtadv)
3166 {
3167         u32 curadv, reqadv;
3168
3169         if (tg3_readphy(tp, MII_ADVERTISE, lcladv))
3170                 return 1;
3171
3172         curadv = *lcladv & (ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM);
3173         reqadv = tg3_advert_flowctrl_1000T(tp->link_config.flowctrl);
3174
3175         if (tp->link_config.active_duplex == DUPLEX_FULL) {
3176                 if (curadv != reqadv)
3177                         return 0;
3178
3179                 if (tg3_flag(tp, PAUSE_AUTONEG))
3180                         tg3_readphy(tp, MII_LPA, rmtadv);
3181         } else {
3182                 /* Reprogram the advertisement register, even if it
3183                  * does not affect the current link.  If the link
3184                  * gets renegotiated in the future, we can save an
3185                  * additional renegotiation cycle by advertising
3186                  * it correctly in the first place.
3187                  */
3188                 if (curadv != reqadv) {
3189                         *lcladv &= ~(ADVERTISE_PAUSE_CAP |
3190                                      ADVERTISE_PAUSE_ASYM);
3191                         tg3_writephy(tp, MII_ADVERTISE, *lcladv | reqadv);
3192                 }
3193         }
3194
3195         return 1;
3196 }
3197
3198 static int tg3_setup_copper_phy(struct tg3 *tp, int force_reset)
3199 {
3200         int current_link_up;
3201         u32 bmsr, val;
3202         u32 lcl_adv, rmt_adv;
3203         u16 current_speed;
3204         u8 current_duplex;
3205         int i, err;
3206
3207         tw32(MAC_EVENT, 0);
3208
3209         tw32_f(MAC_STATUS,
3210              (MAC_STATUS_SYNC_CHANGED |
3211               MAC_STATUS_CFG_CHANGED |
3212               MAC_STATUS_MI_COMPLETION |
3213               MAC_STATUS_LNKSTATE_CHANGED));
3214         udelay(40);
3215
3216         if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
3217                 tw32_f(MAC_MI_MODE,
3218                      (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
3219                 udelay(80);
3220         }
3221
3222         tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_PWRCTL, 0);
3223
3224         /* Some third-party PHYs need to be reset on link going
3225          * down.
3226          */
3227         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
3228              GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
3229              GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) &&
3230             netif_carrier_ok(tp->dev)) {
3231                 tg3_readphy(tp, MII_BMSR, &bmsr);
3232                 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
3233                     !(bmsr & BMSR_LSTATUS))
3234                         force_reset = 1;
3235         }
3236         if (force_reset)
3237                 tg3_phy_reset(tp);
3238
3239         if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) {
3240                 tg3_readphy(tp, MII_BMSR, &bmsr);
3241                 if (tg3_readphy(tp, MII_BMSR, &bmsr) ||
3242                     !tg3_flag(tp, INIT_COMPLETE))
3243                         bmsr = 0;
3244
3245                 if (!(bmsr & BMSR_LSTATUS)) {
3246                         err = tg3_init_5401phy_dsp(tp);
3247                         if (err)
3248                                 return err;
3249
3250                         tg3_readphy(tp, MII_BMSR, &bmsr);
3251                         for (i = 0; i < 1000; i++) {
3252                                 udelay(10);
3253                                 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
3254                                     (bmsr & BMSR_LSTATUS)) {
3255                                         udelay(40);
3256                                         break;
3257                                 }
3258                         }
3259
3260                         if ((tp->phy_id & TG3_PHY_ID_REV_MASK) ==
3261                             TG3_PHY_REV_BCM5401_B0 &&
3262                             !(bmsr & BMSR_LSTATUS) &&
3263                             tp->link_config.active_speed == SPEED_1000) {
3264                                 err = tg3_phy_reset(tp);
3265                                 if (!err)
3266                                         err = tg3_init_5401phy_dsp(tp);
3267                                 if (err)
3268                                         return err;
3269                         }
3270                 }
3271         } else if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
3272                    tp->pci_chip_rev_id == CHIPREV_ID_5701_B0) {
3273                 /* 5701 {A0,B0} CRC bug workaround */
3274                 tg3_writephy(tp, 0x15, 0x0a75);
3275                 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8c68);
3276                 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8d68);
3277                 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8c68);
3278         }
3279
3280         /* Clear pending interrupts... */
3281         tg3_readphy(tp, MII_TG3_ISTAT, &val);
3282         tg3_readphy(tp, MII_TG3_ISTAT, &val);
3283
3284         if (tp->phy_flags & TG3_PHYFLG_USE_MI_INTERRUPT)
3285                 tg3_writephy(tp, MII_TG3_IMASK, ~MII_TG3_INT_LINKCHG);
3286         else if (!(tp->phy_flags & TG3_PHYFLG_IS_FET))
3287                 tg3_writephy(tp, MII_TG3_IMASK, ~0);
3288
3289         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
3290             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
3291                 if (tp->led_ctrl == LED_CTRL_MODE_PHY_1)
3292                         tg3_writephy(tp, MII_TG3_EXT_CTRL,
3293                                      MII_TG3_EXT_CTRL_LNK3_LED_MODE);
3294                 else
3295                         tg3_writephy(tp, MII_TG3_EXT_CTRL, 0);
3296         }
3297
3298         current_link_up = 0;
3299         current_speed = SPEED_INVALID;
3300         current_duplex = DUPLEX_INVALID;
3301
3302         if (tp->phy_flags & TG3_PHYFLG_CAPACITIVE_COUPLING) {
3303                 err = tg3_phy_auxctl_read(tp,
3304                                           MII_TG3_AUXCTL_SHDWSEL_MISCTEST,
3305                                           &val);
3306                 if (!err && !(val & (1 << 10))) {
3307                         tg3_phy_auxctl_write(tp,
3308                                              MII_TG3_AUXCTL_SHDWSEL_MISCTEST,
3309                                              val | (1 << 10));
3310                         goto relink;
3311                 }
3312         }
3313
3314         bmsr = 0;
3315         for (i = 0; i < 100; i++) {
3316                 tg3_readphy(tp, MII_BMSR, &bmsr);
3317                 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
3318                     (bmsr & BMSR_LSTATUS))
3319                         break;
3320                 udelay(40);
3321         }
3322
3323         if (bmsr & BMSR_LSTATUS) {
3324                 u32 aux_stat, bmcr;
3325
3326                 tg3_readphy(tp, MII_TG3_AUX_STAT, &aux_stat);
3327                 for (i = 0; i < 2000; i++) {
3328                         udelay(10);
3329                         if (!tg3_readphy(tp, MII_TG3_AUX_STAT, &aux_stat) &&
3330                             aux_stat)
3331                                 break;
3332                 }
3333
3334                 tg3_aux_stat_to_speed_duplex(tp, aux_stat,
3335                                              &current_speed,
3336                                              &current_duplex);
3337
3338                 bmcr = 0;
3339                 for (i = 0; i < 200; i++) {
3340                         tg3_readphy(tp, MII_BMCR, &bmcr);
3341                         if (tg3_readphy(tp, MII_BMCR, &bmcr))
3342                                 continue;
3343                         if (bmcr && bmcr != 0x7fff)
3344                                 break;
3345                         udelay(10);
3346                 }
3347
3348                 lcl_adv = 0;
3349                 rmt_adv = 0;
3350
3351                 tp->link_config.active_speed = current_speed;
3352                 tp->link_config.active_duplex = current_duplex;
3353
3354                 if (tp->link_config.autoneg == AUTONEG_ENABLE) {
3355                         if ((bmcr & BMCR_ANENABLE) &&
3356                             tg3_copper_is_advertising_all(tp,
3357                                                 tp->link_config.advertising)) {
3358                                 if (tg3_adv_1000T_flowctrl_ok(tp, &lcl_adv,
3359                                                                   &rmt_adv))
3360                                         current_link_up = 1;
3361                         }
3362                 } else {
3363                         if (!(bmcr & BMCR_ANENABLE) &&
3364                             tp->link_config.speed == current_speed &&
3365                             tp->link_config.duplex == current_duplex &&
3366                             tp->link_config.flowctrl ==
3367                             tp->link_config.active_flowctrl) {
3368                                 current_link_up = 1;
3369                         }
3370                 }
3371
3372                 if (current_link_up == 1 &&
3373                     tp->link_config.active_duplex == DUPLEX_FULL)
3374                         tg3_setup_flow_control(tp, lcl_adv, rmt_adv);
3375         }
3376
3377 relink:
3378         if (current_link_up == 0 || (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)) {
3379                 tg3_phy_copper_begin(tp);
3380
3381                 tg3_readphy(tp, MII_BMSR, &bmsr);
3382                 if ((!tg3_readphy(tp, MII_BMSR, &bmsr) && (bmsr & BMSR_LSTATUS)) ||
3383                     (tp->mac_mode & MAC_MODE_PORT_INT_LPBACK))
3384                         current_link_up = 1;
3385         }
3386
3387         tp->mac_mode &= ~MAC_MODE_PORT_MODE_MASK;
3388         if (current_link_up == 1) {
3389                 if (tp->link_config.active_speed == SPEED_100 ||
3390                     tp->link_config.active_speed == SPEED_10)
3391                         tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
3392                 else
3393                         tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
3394         } else if (tp->phy_flags & TG3_PHYFLG_IS_FET)
3395                 tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
3396         else
3397                 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
3398
3399         tp->mac_mode &= ~MAC_MODE_HALF_DUPLEX;
3400         if (tp->link_config.active_duplex == DUPLEX_HALF)
3401                 tp->mac_mode |= MAC_MODE_HALF_DUPLEX;
3402
3403         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700) {
3404                 if (current_link_up == 1 &&
3405                     tg3_5700_link_polarity(tp, tp->link_config.active_speed))
3406                         tp->mac_mode |= MAC_MODE_LINK_POLARITY;
3407                 else
3408                         tp->mac_mode &= ~MAC_MODE_LINK_POLARITY;
3409         }
3410
3411         /* ??? Without this setting Netgear GA302T PHY does not
3412          * ??? send/receive packets...
3413          */
3414         if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5411 &&
3415             tp->pci_chip_rev_id == CHIPREV_ID_5700_ALTIMA) {
3416                 tp->mi_mode |= MAC_MI_MODE_AUTO_POLL;
3417                 tw32_f(MAC_MI_MODE, tp->mi_mode);
3418                 udelay(80);
3419         }
3420
3421         tw32_f(MAC_MODE, tp->mac_mode);
3422         udelay(40);
3423
3424         tg3_phy_eee_adjust(tp, current_link_up);
3425
3426         if (tg3_flag(tp, USE_LINKCHG_REG)) {
3427                 /* Polled via timer. */
3428                 tw32_f(MAC_EVENT, 0);
3429         } else {
3430                 tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
3431         }
3432         udelay(40);
3433
3434         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 &&
3435             current_link_up == 1 &&
3436             tp->link_config.active_speed == SPEED_1000 &&
3437             (tg3_flag(tp, PCIX_MODE) || tg3_flag(tp, PCI_HIGH_SPEED))) {
3438                 udelay(120);
3439                 tw32_f(MAC_STATUS,
3440                      (MAC_STATUS_SYNC_CHANGED |
3441                       MAC_STATUS_CFG_CHANGED));
3442                 udelay(40);
3443                 tg3_write_mem(tp,
3444                               NIC_SRAM_FIRMWARE_MBOX,
3445                               NIC_SRAM_FIRMWARE_MBOX_MAGIC2);
3446         }
3447
3448         /* Prevent send BD corruption. */
3449         if (tg3_flag(tp, CLKREQ_BUG)) {
3450                 u16 oldlnkctl, newlnkctl;
3451
3452                 pci_read_config_word(tp->pdev,
3453                                      tp->pcie_cap + PCI_EXP_LNKCTL,
3454                                      &oldlnkctl);
3455                 if (tp->link_config.active_speed == SPEED_100 ||
3456                     tp->link_config.active_speed == SPEED_10)
3457                         newlnkctl = oldlnkctl & ~PCI_EXP_LNKCTL_CLKREQ_EN;
3458                 else
3459                         newlnkctl = oldlnkctl | PCI_EXP_LNKCTL_CLKREQ_EN;
3460                 if (newlnkctl != oldlnkctl)
3461                         pci_write_config_word(tp->pdev,
3462                                               tp->pcie_cap + PCI_EXP_LNKCTL,
3463                                               newlnkctl);
3464         }
3465
3466         if (current_link_up != netif_carrier_ok(tp->dev)) {
3467                 if (current_link_up)
3468                         netif_carrier_on(tp->dev);
3469                 else
3470                         netif_carrier_off(tp->dev);
3471                 tg3_link_report(tp);
3472         }
3473
3474         return 0;
3475 }
3476
3477 struct tg3_fiber_aneginfo {
3478         int state;
3479 #define ANEG_STATE_UNKNOWN              0
3480 #define ANEG_STATE_AN_ENABLE            1
3481 #define ANEG_STATE_RESTART_INIT         2
3482 #define ANEG_STATE_RESTART              3
3483 #define ANEG_STATE_DISABLE_LINK_OK      4
3484 #define ANEG_STATE_ABILITY_DETECT_INIT  5
3485 #define ANEG_STATE_ABILITY_DETECT       6
3486 #define ANEG_STATE_ACK_DETECT_INIT      7
3487 #define ANEG_STATE_ACK_DETECT           8
3488 #define ANEG_STATE_COMPLETE_ACK_INIT    9
3489 #define ANEG_STATE_COMPLETE_ACK         10
3490 #define ANEG_STATE_IDLE_DETECT_INIT     11
3491 #define ANEG_STATE_IDLE_DETECT          12
3492 #define ANEG_STATE_LINK_OK              13
3493 #define ANEG_STATE_NEXT_PAGE_WAIT_INIT  14
3494 #define ANEG_STATE_NEXT_PAGE_WAIT       15
3495
3496         u32 flags;
3497 #define MR_AN_ENABLE            0x00000001
3498 #define MR_RESTART_AN           0x00000002
3499 #define MR_AN_COMPLETE          0x00000004
3500 #define MR_PAGE_RX              0x00000008
3501 #define MR_NP_LOADED            0x00000010
3502 #define MR_TOGGLE_TX            0x00000020
3503 #define MR_LP_ADV_FULL_DUPLEX   0x00000040
3504 #define MR_LP_ADV_HALF_DUPLEX   0x00000080
3505 #define MR_LP_ADV_SYM_PAUSE     0x00000100
3506 #define MR_LP_ADV_ASYM_PAUSE    0x00000200
3507 #define MR_LP_ADV_REMOTE_FAULT1 0x00000400
3508 #define MR_LP_ADV_REMOTE_FAULT2 0x00000800
3509 #define MR_LP_ADV_NEXT_PAGE     0x00001000
3510 #define MR_TOGGLE_RX            0x00002000
3511 #define MR_NP_RX                0x00004000
3512
3513 #define MR_LINK_OK              0x80000000
3514
3515         unsigned long link_time, cur_time;
3516
3517         u32 ability_match_cfg;
3518         int ability_match_count;
3519
3520         char ability_match, idle_match, ack_match;
3521
3522         u32 txconfig, rxconfig;
3523 #define ANEG_CFG_NP             0x00000080
3524 #define ANEG_CFG_ACK            0x00000040
3525 #define ANEG_CFG_RF2            0x00000020
3526 #define ANEG_CFG_RF1            0x00000010
3527 #define ANEG_CFG_PS2            0x00000001
3528 #define ANEG_CFG_PS1            0x00008000
3529 #define ANEG_CFG_HD             0x00004000
3530 #define ANEG_CFG_FD             0x00002000
3531 #define ANEG_CFG_INVAL          0x00001f06
3532
3533 };
3534 #define ANEG_OK         0
3535 #define ANEG_DONE       1
3536 #define ANEG_TIMER_ENAB 2
3537 #define ANEG_FAILED     -1
3538
3539 #define ANEG_STATE_SETTLE_TIME  10000
3540
3541 static int tg3_fiber_aneg_smachine(struct tg3 *tp,
3542                                    struct tg3_fiber_aneginfo *ap)
3543 {
3544         u16 flowctrl;
3545         unsigned long delta;
3546         u32 rx_cfg_reg;
3547         int ret;
3548
3549         if (ap->state == ANEG_STATE_UNKNOWN) {
3550                 ap->rxconfig = 0;
3551                 ap->link_time = 0;
3552                 ap->cur_time = 0;
3553                 ap->ability_match_cfg = 0;
3554                 ap->ability_match_count = 0;
3555                 ap->ability_match = 0;
3556                 ap->idle_match = 0;
3557                 ap->ack_match = 0;
3558         }
3559         ap->cur_time++;
3560
3561         if (tr32(MAC_STATUS) & MAC_STATUS_RCVD_CFG) {
3562                 rx_cfg_reg = tr32(MAC_RX_AUTO_NEG);
3563
3564                 if (rx_cfg_reg != ap->ability_match_cfg) {
3565                         ap->ability_match_cfg = rx_cfg_reg;
3566                         ap->ability_match = 0;
3567                         ap->ability_match_count = 0;
3568                 } else {
3569                         if (++ap->ability_match_count > 1) {
3570                                 ap->ability_match = 1;
3571                                 ap->ability_match_cfg = rx_cfg_reg;
3572                         }
3573                 }
3574                 if (rx_cfg_reg & ANEG_CFG_ACK)
3575                         ap->ack_match = 1;
3576                 else
3577                         ap->ack_match = 0;
3578
3579                 ap->idle_match = 0;
3580         } else {
3581                 ap->idle_match = 1;
3582                 ap->ability_match_cfg = 0;
3583                 ap->ability_match_count = 0;
3584                 ap->ability_match = 0;
3585                 ap->ack_match = 0;
3586
3587                 rx_cfg_reg = 0;
3588         }
3589
3590         ap->rxconfig = rx_cfg_reg;
3591         ret = ANEG_OK;
3592
3593         switch (ap->state) {
3594         case ANEG_STATE_UNKNOWN:
3595                 if (ap->flags & (MR_AN_ENABLE | MR_RESTART_AN))
3596                         ap->state = ANEG_STATE_AN_ENABLE;
3597
3598                 /* fallthru */
3599         case ANEG_STATE_AN_ENABLE:
3600                 ap->flags &= ~(MR_AN_COMPLETE | MR_PAGE_RX);
3601                 if (ap->flags & MR_AN_ENABLE) {
3602                         ap->link_time = 0;
3603                         ap->cur_time = 0;
3604                         ap->ability_match_cfg = 0;
3605                         ap->ability_match_count = 0;
3606                         ap->ability_match = 0;
3607                         ap->idle_match = 0;
3608                         ap->ack_match = 0;
3609
3610                         ap->state = ANEG_STATE_RESTART_INIT;
3611                 } else {
3612                         ap->state = ANEG_STATE_DISABLE_LINK_OK;
3613                 }
3614                 break;
3615
3616         case ANEG_STATE_RESTART_INIT:
3617                 ap->link_time = ap->cur_time;
3618                 ap->flags &= ~(MR_NP_LOADED);
3619                 ap->txconfig = 0;
3620                 tw32(MAC_TX_AUTO_NEG, 0);
3621                 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
3622                 tw32_f(MAC_MODE, tp->mac_mode);
3623                 udelay(40);
3624
3625                 ret = ANEG_TIMER_ENAB;
3626                 ap->state = ANEG_STATE_RESTART;
3627
3628                 /* fallthru */
3629         case ANEG_STATE_RESTART:
3630                 delta = ap->cur_time - ap->link_time;
3631                 if (delta > ANEG_STATE_SETTLE_TIME)
3632                         ap->state = ANEG_STATE_ABILITY_DETECT_INIT;
3633                 else
3634                         ret = ANEG_TIMER_ENAB;
3635                 break;
3636
3637         case ANEG_STATE_DISABLE_LINK_OK:
3638                 ret = ANEG_DONE;
3639                 break;
3640
3641         case ANEG_STATE_ABILITY_DETECT_INIT:
3642                 ap->flags &= ~(MR_TOGGLE_TX);
3643                 ap->txconfig = ANEG_CFG_FD;
3644                 flowctrl = tg3_advert_flowctrl_1000X(tp->link_config.flowctrl);
3645                 if (flowctrl & ADVERTISE_1000XPAUSE)
3646                         ap->txconfig |= ANEG_CFG_PS1;
3647                 if (flowctrl & ADVERTISE_1000XPSE_ASYM)
3648                         ap->txconfig |= ANEG_CFG_PS2;
3649                 tw32(MAC_TX_AUTO_NEG, ap->txconfig);
3650                 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
3651                 tw32_f(MAC_MODE, tp->mac_mode);
3652                 udelay(40);
3653
3654                 ap->state = ANEG_STATE_ABILITY_DETECT;
3655                 break;
3656
3657         case ANEG_STATE_ABILITY_DETECT:
3658                 if (ap->ability_match != 0 && ap->rxconfig != 0)
3659                         ap->state = ANEG_STATE_ACK_DETECT_INIT;
3660                 break;
3661
3662         case ANEG_STATE_ACK_DETECT_INIT:
3663                 ap->txconfig |= ANEG_CFG_ACK;
3664                 tw32(MAC_TX_AUTO_NEG, ap->txconfig);
3665                 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
3666                 tw32_f(MAC_MODE, tp->mac_mode);
3667                 udelay(40);
3668
3669                 ap->state = ANEG_STATE_ACK_DETECT;
3670
3671                 /* fallthru */
3672         case ANEG_STATE_ACK_DETECT:
3673                 if (ap->ack_match != 0) {
3674                         if ((ap->rxconfig & ~ANEG_CFG_ACK) ==
3675                             (ap->ability_match_cfg & ~ANEG_CFG_ACK)) {
3676                                 ap->state = ANEG_STATE_COMPLETE_ACK_INIT;
3677                         } else {
3678                                 ap->state = ANEG_STATE_AN_ENABLE;
3679                         }
3680                 } else if (ap->ability_match != 0 &&
3681                            ap->rxconfig == 0) {
3682                         ap->state = ANEG_STATE_AN_ENABLE;
3683                 }
3684                 break;
3685
3686         case ANEG_STATE_COMPLETE_ACK_INIT:
3687                 if (ap->rxconfig & ANEG_CFG_INVAL) {
3688                         ret = ANEG_FAILED;
3689                         break;
3690                 }
3691                 ap->flags &= ~(MR_LP_ADV_FULL_DUPLEX |
3692                                MR_LP_ADV_HALF_DUPLEX |
3693                                MR_LP_ADV_SYM_PAUSE |
3694                                MR_LP_ADV_ASYM_PAUSE |
3695                                MR_LP_ADV_REMOTE_FAULT1 |
3696                                MR_LP_ADV_REMOTE_FAULT2 |
3697                                MR_LP_ADV_NEXT_PAGE |
3698                                MR_TOGGLE_RX |
3699                                MR_NP_RX);
3700                 if (ap->rxconfig & ANEG_CFG_FD)
3701                         ap->flags |= MR_LP_ADV_FULL_DUPLEX;
3702                 if (ap->rxconfig & ANEG_CFG_HD)
3703                         ap->flags |= MR_LP_ADV_HALF_DUPLEX;
3704                 if (ap->rxconfig & ANEG_CFG_PS1)
3705                         ap->flags |= MR_LP_ADV_SYM_PAUSE;
3706                 if (ap->rxconfig & ANEG_CFG_PS2)
3707                         ap->flags |= MR_LP_ADV_ASYM_PAUSE;
3708                 if (ap->rxconfig & ANEG_CFG_RF1)
3709                         ap->flags |= MR_LP_ADV_REMOTE_FAULT1;
3710                 if (ap->rxconfig & ANEG_CFG_RF2)
3711                         ap->flags |= MR_LP_ADV_REMOTE_FAULT2;
3712                 if (ap->rxconfig & ANEG_CFG_NP)
3713                         ap->flags |= MR_LP_ADV_NEXT_PAGE;
3714
3715                 ap->link_time = ap->cur_time;
3716
3717                 ap->flags ^= (MR_TOGGLE_TX);
3718                 if (ap->rxconfig & 0x0008)
3719                         ap->flags |= MR_TOGGLE_RX;
3720                 if (ap->rxconfig & ANEG_CFG_NP)
3721                         ap->flags |= MR_NP_RX;
3722                 ap->flags |= MR_PAGE_RX;
3723
3724                 ap->state = ANEG_STATE_COMPLETE_ACK;
3725                 ret = ANEG_TIMER_ENAB;
3726                 break;
3727
3728         case ANEG_STATE_COMPLETE_ACK:
3729                 if (ap->ability_match != 0 &&
3730                     ap->rxconfig == 0) {
3731                         ap->state = ANEG_STATE_AN_ENABLE;
3732                         break;
3733                 }
3734                 delta = ap->cur_time - ap->link_time;
3735                 if (delta > ANEG_STATE_SETTLE_TIME) {
3736                         if (!(ap->flags & (MR_LP_ADV_NEXT_PAGE))) {
3737                                 ap->state = ANEG_STATE_IDLE_DETECT_INIT;
3738                         } else {
3739                                 if ((ap->txconfig & ANEG_CFG_NP) == 0 &&
3740                                     !(ap->flags & MR_NP_RX)) {
3741                                         ap->state = ANEG_STATE_IDLE_DETECT_INIT;
3742                                 } else {
3743                                         ret = ANEG_FAILED;
3744                                 }
3745                         }
3746                 }
3747                 break;
3748
3749         case ANEG_STATE_IDLE_DETECT_INIT:
3750                 ap->link_time = ap->cur_time;
3751                 tp->mac_mode &= ~MAC_MODE_SEND_CONFIGS;
3752                 tw32_f(MAC_MODE, tp->mac_mode);
3753                 udelay(40);
3754
3755                 ap->state = ANEG_STATE_IDLE_DETECT;
3756                 ret = ANEG_TIMER_ENAB;
3757                 break;
3758
3759         case ANEG_STATE_IDLE_DETECT:
3760                 if (ap->ability_match != 0 &&
3761                     ap->rxconfig == 0) {
3762                         ap->state = ANEG_STATE_AN_ENABLE;
3763                         break;
3764                 }
3765                 delta = ap->cur_time - ap->link_time;
3766                 if (delta > ANEG_STATE_SETTLE_TIME) {
3767                         /* XXX another gem from the Broadcom driver :( */
3768                         ap->state = ANEG_STATE_LINK_OK;
3769                 }
3770                 break;
3771
3772         case ANEG_STATE_LINK_OK:
3773                 ap->flags |= (MR_AN_COMPLETE | MR_LINK_OK);
3774                 ret = ANEG_DONE;
3775                 break;
3776
3777         case ANEG_STATE_NEXT_PAGE_WAIT_INIT:
3778                 /* ??? unimplemented */
3779                 break;
3780
3781         case ANEG_STATE_NEXT_PAGE_WAIT:
3782                 /* ??? unimplemented */
3783                 break;
3784
3785         default:
3786                 ret = ANEG_FAILED;
3787                 break;
3788         }
3789
3790         return ret;
3791 }
3792
3793 static int fiber_autoneg(struct tg3 *tp, u32 *txflags, u32 *rxflags)
3794 {
3795         int res = 0;
3796         struct tg3_fiber_aneginfo aninfo;
3797         int status = ANEG_FAILED;
3798         unsigned int tick;
3799         u32 tmp;
3800
3801         tw32_f(MAC_TX_AUTO_NEG, 0);
3802
3803         tmp = tp->mac_mode & ~MAC_MODE_PORT_MODE_MASK;
3804         tw32_f(MAC_MODE, tmp | MAC_MODE_PORT_MODE_GMII);
3805         udelay(40);
3806
3807         tw32_f(MAC_MODE, tp->mac_mode | MAC_MODE_SEND_CONFIGS);
3808         udelay(40);
3809
3810         memset(&aninfo, 0, sizeof(aninfo));
3811         aninfo.flags |= MR_AN_ENABLE;
3812         aninfo.state = ANEG_STATE_UNKNOWN;
3813         aninfo.cur_time = 0;
3814         tick = 0;
3815         while (++tick < 195000) {
3816                 status = tg3_fiber_aneg_smachine(tp, &aninfo);
3817                 if (status == ANEG_DONE || status == ANEG_FAILED)
3818                         break;
3819
3820                 udelay(1);
3821         }
3822
3823         tp->mac_mode &= ~MAC_MODE_SEND_CONFIGS;
3824         tw32_f(MAC_MODE, tp->mac_mode);
3825         udelay(40);
3826
3827         *txflags = aninfo.txconfig;
3828         *rxflags = aninfo.flags;
3829
3830         if (status == ANEG_DONE &&
3831             (aninfo.flags & (MR_AN_COMPLETE | MR_LINK_OK |
3832                              MR_LP_ADV_FULL_DUPLEX)))
3833                 res = 1;
3834
3835         return res;
3836 }
3837
3838 static void tg3_init_bcm8002(struct tg3 *tp)
3839 {
3840         u32 mac_status = tr32(MAC_STATUS);
3841         int i;
3842
3843         /* Reset when initting first time or we have a link. */
3844         if (tg3_flag(tp, INIT_COMPLETE) &&
3845             !(mac_status & MAC_STATUS_PCS_SYNCED))
3846                 return;
3847
3848         /* Set PLL lock range. */
3849         tg3_writephy(tp, 0x16, 0x8007);
3850
3851         /* SW reset */
3852         tg3_writephy(tp, MII_BMCR, BMCR_RESET);
3853
3854         /* Wait for reset to complete. */
3855         /* XXX schedule_timeout() ... */
3856         for (i = 0; i < 500; i++)
3857                 udelay(10);
3858
3859         /* Config mode; select PMA/Ch 1 regs. */
3860         tg3_writephy(tp, 0x10, 0x8411);
3861
3862         /* Enable auto-lock and comdet, select txclk for tx. */
3863         tg3_writephy(tp, 0x11, 0x0a10);
3864
3865         tg3_writephy(tp, 0x18, 0x00a0);
3866         tg3_writephy(tp, 0x16, 0x41ff);
3867
3868         /* Assert and deassert POR. */
3869         tg3_writephy(tp, 0x13, 0x0400);
3870         udelay(40);
3871         tg3_writephy(tp, 0x13, 0x0000);
3872
3873         tg3_writephy(tp, 0x11, 0x0a50);
3874         udelay(40);
3875         tg3_writephy(tp, 0x11, 0x0a10);
3876
3877         /* Wait for signal to stabilize */
3878         /* XXX schedule_timeout() ... */
3879         for (i = 0; i < 15000; i++)
3880                 udelay(10);
3881
3882         /* Deselect the channel register so we can read the PHYID
3883          * later.
3884          */
3885         tg3_writephy(tp, 0x10, 0x8011);
3886 }
3887
3888 static int tg3_setup_fiber_hw_autoneg(struct tg3 *tp, u32 mac_status)
3889 {
3890         u16 flowctrl;
3891         u32 sg_dig_ctrl, sg_dig_status;
3892         u32 serdes_cfg, expected_sg_dig_ctrl;
3893         int workaround, port_a;
3894         int current_link_up;
3895
3896         serdes_cfg = 0;
3897         expected_sg_dig_ctrl = 0;
3898         workaround = 0;
3899         port_a = 1;
3900         current_link_up = 0;
3901
3902         if (tp->pci_chip_rev_id != CHIPREV_ID_5704_A0 &&
3903             tp->pci_chip_rev_id != CHIPREV_ID_5704_A1) {
3904                 workaround = 1;
3905                 if (tr32(TG3PCI_DUAL_MAC_CTRL) & DUAL_MAC_CTRL_ID)
3906                         port_a = 0;
3907
3908                 /* preserve bits 0-11,13,14 for signal pre-emphasis */
3909                 /* preserve bits 20-23 for voltage regulator */
3910                 serdes_cfg = tr32(MAC_SERDES_CFG) & 0x00f06fff;
3911         }
3912
3913         sg_dig_ctrl = tr32(SG_DIG_CTRL);
3914
3915         if (tp->link_config.autoneg != AUTONEG_ENABLE) {
3916                 if (sg_dig_ctrl & SG_DIG_USING_HW_AUTONEG) {
3917                         if (workaround) {
3918                                 u32 val = serdes_cfg;
3919
3920                                 if (port_a)
3921                                         val |= 0xc010000;
3922                                 else
3923                                         val |= 0x4010000;
3924                                 tw32_f(MAC_SERDES_CFG, val);
3925                         }
3926
3927                         tw32_f(SG_DIG_CTRL, SG_DIG_COMMON_SETUP);
3928                 }
3929                 if (mac_status & MAC_STATUS_PCS_SYNCED) {
3930                         tg3_setup_flow_control(tp, 0, 0);
3931                         current_link_up = 1;
3932                 }
3933                 goto out;
3934         }
3935
3936         /* Want auto-negotiation.  */
3937         expected_sg_dig_ctrl = SG_DIG_USING_HW_AUTONEG | SG_DIG_COMMON_SETUP;
3938
3939         flowctrl = tg3_advert_flowctrl_1000X(tp->link_config.flowctrl);
3940         if (flowctrl & ADVERTISE_1000XPAUSE)
3941                 expected_sg_dig_ctrl |= SG_DIG_PAUSE_CAP;
3942         if (flowctrl & ADVERTISE_1000XPSE_ASYM)
3943                 expected_sg_dig_ctrl |= SG_DIG_ASYM_PAUSE;
3944
3945         if (sg_dig_ctrl != expected_sg_dig_ctrl) {
3946                 if ((tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT) &&
3947                     tp->serdes_counter &&
3948                     ((mac_status & (MAC_STATUS_PCS_SYNCED |
3949                                     MAC_STATUS_RCVD_CFG)) ==
3950                      MAC_STATUS_PCS_SYNCED)) {
3951                         tp->serdes_counter--;
3952                         current_link_up = 1;
3953                         goto out;
3954                 }
3955 restart_autoneg:
3956                 if (workaround)
3957                         tw32_f(MAC_SERDES_CFG, serdes_cfg | 0xc011000);
3958                 tw32_f(SG_DIG_CTRL, expected_sg_dig_ctrl | SG_DIG_SOFT_RESET);
3959                 udelay(5);
3960                 tw32_f(SG_DIG_CTRL, expected_sg_dig_ctrl);
3961
3962                 tp->serdes_counter = SERDES_AN_TIMEOUT_5704S;
3963                 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
3964         } else if (mac_status & (MAC_STATUS_PCS_SYNCED |
3965                                  MAC_STATUS_SIGNAL_DET)) {
3966                 sg_dig_status = tr32(SG_DIG_STATUS);
3967                 mac_status = tr32(MAC_STATUS);
3968
3969                 if ((sg_dig_status & SG_DIG_AUTONEG_COMPLETE) &&
3970                     (mac_status & MAC_STATUS_PCS_SYNCED)) {
3971                         u32 local_adv = 0, remote_adv = 0;
3972
3973                         if (sg_dig_ctrl & SG_DIG_PAUSE_CAP)
3974                                 local_adv |= ADVERTISE_1000XPAUSE;
3975                         if (sg_dig_ctrl & SG_DIG_ASYM_PAUSE)
3976                                 local_adv |= ADVERTISE_1000XPSE_ASYM;
3977
3978                         if (sg_dig_status & SG_DIG_PARTNER_PAUSE_CAPABLE)
3979                                 remote_adv |= LPA_1000XPAUSE;
3980                         if (sg_dig_status & SG_DIG_PARTNER_ASYM_PAUSE)
3981                                 remote_adv |= LPA_1000XPAUSE_ASYM;
3982
3983                         tg3_setup_flow_control(tp, local_adv, remote_adv);
3984                         current_link_up = 1;
3985                         tp->serdes_counter = 0;
3986                         tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
3987                 } else if (!(sg_dig_status & SG_DIG_AUTONEG_COMPLETE)) {
3988                         if (tp->serdes_counter)
3989                                 tp->serdes_counter--;
3990                         else {
3991                                 if (workaround) {
3992                                         u32 val = serdes_cfg;
3993
3994                                         if (port_a)
3995                                                 val |= 0xc010000;
3996                                         else
3997                                                 val |= 0x4010000;
3998
3999                                         tw32_f(MAC_SERDES_CFG, val);
4000                                 }
4001
4002                                 tw32_f(SG_DIG_CTRL, SG_DIG_COMMON_SETUP);
4003                                 udelay(40);
4004
4005                                 /* Link parallel detection - link is up */
4006                                 /* only if we have PCS_SYNC and not */
4007                                 /* receiving config code words */
4008                                 mac_status = tr32(MAC_STATUS);
4009                                 if ((mac_status & MAC_STATUS_PCS_SYNCED) &&
4010                                     !(mac_status & MAC_STATUS_RCVD_CFG)) {
4011                                         tg3_setup_flow_control(tp, 0, 0);
4012                                         current_link_up = 1;
4013                                         tp->phy_flags |=
4014                                                 TG3_PHYFLG_PARALLEL_DETECT;
4015                                         tp->serdes_counter =
4016                                                 SERDES_PARALLEL_DET_TIMEOUT;
4017                                 } else
4018                                         goto restart_autoneg;
4019                         }
4020                 }
4021         } else {
4022                 tp->serdes_counter = SERDES_AN_TIMEOUT_5704S;
4023                 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
4024         }
4025
4026 out:
4027         return current_link_up;
4028 }
4029
4030 static int tg3_setup_fiber_by_hand(struct tg3 *tp, u32 mac_status)
4031 {
4032         int current_link_up = 0;
4033
4034         if (!(mac_status & MAC_STATUS_PCS_SYNCED))
4035                 goto out;
4036
4037         if (tp->link_config.autoneg == AUTONEG_ENABLE) {
4038                 u32 txflags, rxflags;
4039                 int i;
4040
4041                 if (fiber_autoneg(tp, &txflags, &rxflags)) {
4042                         u32 local_adv = 0, remote_adv = 0;
4043
4044                         if (txflags & ANEG_CFG_PS1)
4045                                 local_adv |= ADVERTISE_1000XPAUSE;
4046                         if (txflags & ANEG_CFG_PS2)
4047                                 local_adv |= ADVERTISE_1000XPSE_ASYM;
4048
4049                         if (rxflags & MR_LP_ADV_SYM_PAUSE)
4050                                 remote_adv |= LPA_1000XPAUSE;
4051                         if (rxflags & MR_LP_ADV_ASYM_PAUSE)
4052                                 remote_adv |= LPA_1000XPAUSE_ASYM;
4053
4054                         tg3_setup_flow_control(tp, local_adv, remote_adv);
4055
4056                         current_link_up = 1;
4057                 }
4058                 for (i = 0; i < 30; i++) {
4059                         udelay(20);
4060                         tw32_f(MAC_STATUS,
4061                                (MAC_STATUS_SYNC_CHANGED |
4062                                 MAC_STATUS_CFG_CHANGED));
4063                         udelay(40);
4064                         if ((tr32(MAC_STATUS) &
4065                              (MAC_STATUS_SYNC_CHANGED |
4066                               MAC_STATUS_CFG_CHANGED)) == 0)
4067                                 break;
4068                 }
4069
4070                 mac_status = tr32(MAC_STATUS);
4071                 if (current_link_up == 0 &&
4072                     (mac_status & MAC_STATUS_PCS_SYNCED) &&
4073                     !(mac_status & MAC_STATUS_RCVD_CFG))
4074                         current_link_up = 1;
4075         } else {
4076                 tg3_setup_flow_control(tp, 0, 0);
4077
4078                 /* Forcing 1000FD link up. */
4079                 current_link_up = 1;
4080
4081                 tw32_f(MAC_MODE, (tp->mac_mode | MAC_MODE_SEND_CONFIGS));
4082                 udelay(40);
4083
4084                 tw32_f(MAC_MODE, tp->mac_mode);
4085                 udelay(40);
4086         }
4087
4088 out:
4089         return current_link_up;
4090 }
4091
4092 static int tg3_setup_fiber_phy(struct tg3 *tp, int force_reset)
4093 {
4094         u32 orig_pause_cfg;
4095         u16 orig_active_speed;
4096         u8 orig_active_duplex;
4097         u32 mac_status;
4098         int current_link_up;
4099         int i;
4100
4101         orig_pause_cfg = tp->link_config.active_flowctrl;
4102         orig_active_speed = tp->link_config.active_speed;
4103         orig_active_duplex = tp->link_config.active_duplex;
4104
4105         if (!tg3_flag(tp, HW_AUTONEG) &&
4106             netif_carrier_ok(tp->dev) &&
4107             tg3_flag(tp, INIT_COMPLETE)) {
4108                 mac_status = tr32(MAC_STATUS);
4109                 mac_status &= (MAC_STATUS_PCS_SYNCED |
4110                                MAC_STATUS_SIGNAL_DET |
4111                                MAC_STATUS_CFG_CHANGED |
4112                                MAC_STATUS_RCVD_CFG);
4113                 if (mac_status == (MAC_STATUS_PCS_SYNCED |
4114                                    MAC_STATUS_SIGNAL_DET)) {
4115                         tw32_f(MAC_STATUS, (MAC_STATUS_SYNC_CHANGED |
4116                                             MAC_STATUS_CFG_CHANGED));
4117                         return 0;
4118                 }
4119         }
4120
4121         tw32_f(MAC_TX_AUTO_NEG, 0);
4122
4123         tp->mac_mode &= ~(MAC_MODE_PORT_MODE_MASK | MAC_MODE_HALF_DUPLEX);
4124         tp->mac_mode |= MAC_MODE_PORT_MODE_TBI;
4125         tw32_f(MAC_MODE, tp->mac_mode);
4126         udelay(40);
4127
4128         if (tp->phy_id == TG3_PHY_ID_BCM8002)
4129                 tg3_init_bcm8002(tp);
4130
4131         /* Enable link change event even when serdes polling.  */
4132         tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
4133         udelay(40);
4134
4135         current_link_up = 0;
4136         mac_status = tr32(MAC_STATUS);
4137
4138         if (tg3_flag(tp, HW_AUTONEG))
4139                 current_link_up = tg3_setup_fiber_hw_autoneg(tp, mac_status);
4140         else
4141                 current_link_up = tg3_setup_fiber_by_hand(tp, mac_status);
4142
4143         tp->napi[0].hw_status->status =
4144                 (SD_STATUS_UPDATED |
4145                  (tp->napi[0].hw_status->status & ~SD_STATUS_LINK_CHG));
4146
4147         for (i = 0; i < 100; i++) {
4148                 tw32_f(MAC_STATUS, (MAC_STATUS_SYNC_CHANGED |
4149                                     MAC_STATUS_CFG_CHANGED));
4150                 udelay(5);
4151                 if ((tr32(MAC_STATUS) & (MAC_STATUS_SYNC_CHANGED |
4152                                          MAC_STATUS_CFG_CHANGED |
4153                                          MAC_STATUS_LNKSTATE_CHANGED)) == 0)
4154                         break;
4155         }
4156
4157         mac_status = tr32(MAC_STATUS);
4158         if ((mac_status & MAC_STATUS_PCS_SYNCED) == 0) {
4159                 current_link_up = 0;
4160                 if (tp->link_config.autoneg == AUTONEG_ENABLE &&
4161                     tp->serdes_counter == 0) {
4162                         tw32_f(MAC_MODE, (tp->mac_mode |
4163                                           MAC_MODE_SEND_CONFIGS));
4164                         udelay(1);
4165                         tw32_f(MAC_MODE, tp->mac_mode);
4166                 }
4167         }
4168
4169         if (current_link_up == 1) {
4170                 tp->link_config.active_speed = SPEED_1000;
4171                 tp->link_config.active_duplex = DUPLEX_FULL;
4172                 tw32(MAC_LED_CTRL, (tp->led_ctrl |
4173                                     LED_CTRL_LNKLED_OVERRIDE |
4174                                     LED_CTRL_1000MBPS_ON));
4175         } else {
4176                 tp->link_config.active_speed = SPEED_INVALID;
4177                 tp->link_config.active_duplex = DUPLEX_INVALID;
4178                 tw32(MAC_LED_CTRL, (tp->led_ctrl |
4179                                     LED_CTRL_LNKLED_OVERRIDE |
4180                                     LED_CTRL_TRAFFIC_OVERRIDE));
4181         }
4182
4183         if (current_link_up != netif_carrier_ok(tp->dev)) {
4184                 if (current_link_up)
4185                         netif_carrier_on(tp->dev);
4186                 else
4187                         netif_carrier_off(tp->dev);
4188                 tg3_link_report(tp);
4189         } else {
4190                 u32 now_pause_cfg = tp->link_config.active_flowctrl;
4191                 if (orig_pause_cfg != now_pause_cfg ||
4192                     orig_active_speed != tp->link_config.active_speed ||
4193                     orig_active_duplex != tp->link_config.active_duplex)
4194                         tg3_link_report(tp);
4195         }
4196
4197         return 0;
4198 }
4199
4200 static int tg3_setup_fiber_mii_phy(struct tg3 *tp, int force_reset)
4201 {
4202         int current_link_up, err = 0;
4203         u32 bmsr, bmcr;
4204         u16 current_speed;
4205         u8 current_duplex;
4206         u32 local_adv, remote_adv;
4207
4208         tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
4209         tw32_f(MAC_MODE, tp->mac_mode);
4210         udelay(40);
4211
4212         tw32(MAC_EVENT, 0);
4213
4214         tw32_f(MAC_STATUS,
4215              (MAC_STATUS_SYNC_CHANGED |
4216               MAC_STATUS_CFG_CHANGED |
4217               MAC_STATUS_MI_COMPLETION |
4218               MAC_STATUS_LNKSTATE_CHANGED));
4219         udelay(40);
4220
4221         if (force_reset)
4222                 tg3_phy_reset(tp);
4223
4224         current_link_up = 0;
4225         current_speed = SPEED_INVALID;
4226         current_duplex = DUPLEX_INVALID;
4227
4228         err |= tg3_readphy(tp, MII_BMSR, &bmsr);
4229         err |= tg3_readphy(tp, MII_BMSR, &bmsr);
4230         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) {
4231                 if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
4232                         bmsr |= BMSR_LSTATUS;
4233                 else
4234                         bmsr &= ~BMSR_LSTATUS;
4235         }
4236
4237         err |= tg3_readphy(tp, MII_BMCR, &bmcr);
4238
4239         if ((tp->link_config.autoneg == AUTONEG_ENABLE) && !force_reset &&
4240             (tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT)) {
4241                 /* do nothing, just check for link up at the end */
4242         } else if (tp->link_config.autoneg == AUTONEG_ENABLE) {
4243                 u32 adv, new_adv;
4244
4245                 err |= tg3_readphy(tp, MII_ADVERTISE, &adv);
4246                 new_adv = adv & ~(ADVERTISE_1000XFULL | ADVERTISE_1000XHALF |
4247                                   ADVERTISE_1000XPAUSE |
4248                                   ADVERTISE_1000XPSE_ASYM |
4249                                   ADVERTISE_SLCT);
4250
4251                 new_adv |= tg3_advert_flowctrl_1000X(tp->link_config.flowctrl);
4252
4253                 if (tp->link_config.advertising & ADVERTISED_1000baseT_Half)
4254                         new_adv |= ADVERTISE_1000XHALF;
4255                 if (tp->link_config.advertising & ADVERTISED_1000baseT_Full)
4256                         new_adv |= ADVERTISE_1000XFULL;
4257
4258                 if ((new_adv != adv) || !(bmcr & BMCR_ANENABLE)) {
4259                         tg3_writephy(tp, MII_ADVERTISE, new_adv);
4260                         bmcr |= BMCR_ANENABLE | BMCR_ANRESTART;
4261                         tg3_writephy(tp, MII_BMCR, bmcr);
4262
4263                         tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
4264                         tp->serdes_counter = SERDES_AN_TIMEOUT_5714S;
4265                         tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
4266
4267                         return err;
4268                 }
4269         } else {
4270                 u32 new_bmcr;
4271
4272                 bmcr &= ~BMCR_SPEED1000;
4273                 new_bmcr = bmcr & ~(BMCR_ANENABLE | BMCR_FULLDPLX);
4274
4275                 if (tp->link_config.duplex == DUPLEX_FULL)
4276                         new_bmcr |= BMCR_FULLDPLX;
4277
4278                 if (new_bmcr != bmcr) {
4279                         /* BMCR_SPEED1000 is a reserved bit that needs
4280                          * to be set on write.
4281                          */
4282                         new_bmcr |= BMCR_SPEED1000;
4283
4284                         /* Force a linkdown */
4285                         if (netif_carrier_ok(tp->dev)) {
4286                                 u32 adv;
4287
4288                                 err |= tg3_readphy(tp, MII_ADVERTISE, &adv);
4289                                 adv &= ~(ADVERTISE_1000XFULL |
4290                                          ADVERTISE_1000XHALF |
4291                                          ADVERTISE_SLCT);
4292                                 tg3_writephy(tp, MII_ADVERTISE, adv);
4293                                 tg3_writephy(tp, MII_BMCR, bmcr |
4294                                                            BMCR_ANRESTART |
4295                                                            BMCR_ANENABLE);
4296                                 udelay(10);
4297                                 netif_carrier_off(tp->dev);
4298                         }
4299                         tg3_writephy(tp, MII_BMCR, new_bmcr);
4300                         bmcr = new_bmcr;
4301                         err |= tg3_readphy(tp, MII_BMSR, &bmsr);
4302                         err |= tg3_readphy(tp, MII_BMSR, &bmsr);
4303                         if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
4304                             ASIC_REV_5714) {
4305                                 if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
4306                                         bmsr |= BMSR_LSTATUS;
4307                                 else
4308                                         bmsr &= ~BMSR_LSTATUS;
4309                         }
4310                         tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
4311                 }
4312         }
4313
4314         if (bmsr & BMSR_LSTATUS) {
4315                 current_speed = SPEED_1000;
4316                 current_link_up = 1;
4317                 if (bmcr & BMCR_FULLDPLX)
4318                         current_duplex = DUPLEX_FULL;
4319                 else
4320                         current_duplex = DUPLEX_HALF;
4321
4322                 local_adv = 0;
4323                 remote_adv = 0;
4324
4325                 if (bmcr & BMCR_ANENABLE) {
4326                         u32 common;
4327
4328                         err |= tg3_readphy(tp, MII_ADVERTISE, &local_adv);
4329                         err |= tg3_readphy(tp, MII_LPA, &remote_adv);
4330                         common = local_adv & remote_adv;
4331                         if (common & (ADVERTISE_1000XHALF |
4332                                       ADVERTISE_1000XFULL)) {
4333                                 if (common & ADVERTISE_1000XFULL)
4334                                         current_duplex = DUPLEX_FULL;
4335                                 else
4336                                         current_duplex = DUPLEX_HALF;
4337                         } else if (!tg3_flag(tp, 5780_CLASS)) {
4338                                 /* Link is up via parallel detect */
4339                         } else {
4340                                 current_link_up = 0;
4341                         }
4342                 }
4343         }
4344
4345         if (current_link_up == 1 && current_duplex == DUPLEX_FULL)
4346                 tg3_setup_flow_control(tp, local_adv, remote_adv);
4347
4348         tp->mac_mode &= ~MAC_MODE_HALF_DUPLEX;
4349         if (tp->link_config.active_duplex == DUPLEX_HALF)
4350                 tp->mac_mode |= MAC_MODE_HALF_DUPLEX;
4351
4352         tw32_f(MAC_MODE, tp->mac_mode);
4353         udelay(40);
4354
4355         tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
4356
4357         tp->link_config.active_speed = current_speed;
4358         tp->link_config.active_duplex = current_duplex;
4359
4360         if (current_link_up != netif_carrier_ok(tp->dev)) {
4361                 if (current_link_up)
4362                         netif_carrier_on(tp->dev);
4363                 else {
4364                         netif_carrier_off(tp->dev);
4365                         tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
4366                 }
4367                 tg3_link_report(tp);
4368         }
4369         return err;
4370 }
4371
4372 static void tg3_serdes_parallel_detect(struct tg3 *tp)
4373 {
4374         if (tp->serdes_counter) {
4375                 /* Give autoneg time to complete. */
4376                 tp->serdes_counter--;
4377                 return;
4378         }
4379
4380         if (!netif_carrier_ok(tp->dev) &&
4381             (tp->link_config.autoneg == AUTONEG_ENABLE)) {
4382                 u32 bmcr;
4383
4384                 tg3_readphy(tp, MII_BMCR, &bmcr);
4385                 if (bmcr & BMCR_ANENABLE) {
4386                         u32 phy1, phy2;
4387
4388                         /* Select shadow register 0x1f */
4389                         tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x7c00);
4390                         tg3_readphy(tp, MII_TG3_MISC_SHDW, &phy1);
4391
4392                         /* Select expansion interrupt status register */
4393                         tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
4394                                          MII_TG3_DSP_EXP1_INT_STAT);
4395                         tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &phy2);
4396                         tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &phy2);
4397
4398                         if ((phy1 & 0x10) && !(phy2 & 0x20)) {
4399                                 /* We have signal detect and not receiving
4400                                  * config code words, link is up by parallel
4401                                  * detection.
4402                                  */
4403
4404                                 bmcr &= ~BMCR_ANENABLE;
4405                                 bmcr |= BMCR_SPEED1000 | BMCR_FULLDPLX;
4406                                 tg3_writephy(tp, MII_BMCR, bmcr);
4407                                 tp->phy_flags |= TG3_PHYFLG_PARALLEL_DETECT;
4408                         }
4409                 }
4410         } else if (netif_carrier_ok(tp->dev) &&
4411                    (tp->link_config.autoneg == AUTONEG_ENABLE) &&
4412                    (tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT)) {
4413                 u32 phy2;
4414
4415                 /* Select expansion interrupt status register */
4416                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
4417                                  MII_TG3_DSP_EXP1_INT_STAT);
4418                 tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &phy2);
4419                 if (phy2 & 0x20) {
4420                         u32 bmcr;
4421
4422                         /* Config code words received, turn on autoneg. */
4423                         tg3_readphy(tp, MII_BMCR, &bmcr);
4424                         tg3_writephy(tp, MII_BMCR, bmcr | BMCR_ANENABLE);
4425
4426                         tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
4427
4428                 }
4429         }
4430 }
4431
4432 static int tg3_setup_phy(struct tg3 *tp, int force_reset)
4433 {
4434         u32 val;
4435         int err;
4436
4437         if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
4438                 err = tg3_setup_fiber_phy(tp, force_reset);
4439         else if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
4440                 err = tg3_setup_fiber_mii_phy(tp, force_reset);
4441         else
4442                 err = tg3_setup_copper_phy(tp, force_reset);
4443
4444         if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5784_AX) {
4445                 u32 scale;
4446
4447                 val = tr32(TG3_CPMU_CLCK_STAT) & CPMU_CLCK_STAT_MAC_CLCK_MASK;
4448                 if (val == CPMU_CLCK_STAT_MAC_CLCK_62_5)
4449                         scale = 65;
4450                 else if (val == CPMU_CLCK_STAT_MAC_CLCK_6_25)
4451                         scale = 6;
4452                 else
4453                         scale = 12;
4454
4455                 val = tr32(GRC_MISC_CFG) & ~GRC_MISC_CFG_PRESCALAR_MASK;
4456                 val |= (scale << GRC_MISC_CFG_PRESCALAR_SHIFT);
4457                 tw32(GRC_MISC_CFG, val);
4458         }
4459
4460         val = (2 << TX_LENGTHS_IPG_CRS_SHIFT) |
4461               (6 << TX_LENGTHS_IPG_SHIFT);
4462         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720)
4463                 val |= tr32(MAC_TX_LENGTHS) &
4464                        (TX_LENGTHS_JMB_FRM_LEN_MSK |
4465                         TX_LENGTHS_CNT_DWN_VAL_MSK);
4466
4467         if (tp->link_config.active_speed == SPEED_1000 &&
4468             tp->link_config.active_duplex == DUPLEX_HALF)
4469                 tw32(MAC_TX_LENGTHS, val |
4470                      (0xff << TX_LENGTHS_SLOT_TIME_SHIFT));
4471         else
4472                 tw32(MAC_TX_LENGTHS, val |
4473                      (32 << TX_LENGTHS_SLOT_TIME_SHIFT));
4474
4475         if (!tg3_flag(tp, 5705_PLUS)) {
4476                 if (netif_carrier_ok(tp->dev)) {
4477                         tw32(HOSTCC_STAT_COAL_TICKS,
4478                              tp->coal.stats_block_coalesce_usecs);
4479                 } else {
4480                         tw32(HOSTCC_STAT_COAL_TICKS, 0);
4481                 }
4482         }
4483
4484         if (tg3_flag(tp, ASPM_WORKAROUND)) {
4485                 val = tr32(PCIE_PWR_MGMT_THRESH);
4486                 if (!netif_carrier_ok(tp->dev))
4487                         val = (val & ~PCIE_PWR_MGMT_L1_THRESH_MSK) |
4488                               tp->pwrmgmt_thresh;
4489                 else
4490                         val |= PCIE_PWR_MGMT_L1_THRESH_MSK;
4491                 tw32(PCIE_PWR_MGMT_THRESH, val);
4492         }
4493
4494         return err;
4495 }
4496
4497 static inline int tg3_irq_sync(struct tg3 *tp)
4498 {
4499         return tp->irq_sync;
4500 }
4501
4502 static inline void tg3_rd32_loop(struct tg3 *tp, u32 *dst, u32 off, u32 len)
4503 {
4504         int i;
4505
4506         dst = (u32 *)((u8 *)dst + off);
4507         for (i = 0; i < len; i += sizeof(u32))
4508                 *dst++ = tr32(off + i);
4509 }
4510
4511 static void tg3_dump_legacy_regs(struct tg3 *tp, u32 *regs)
4512 {
4513         tg3_rd32_loop(tp, regs, TG3PCI_VENDOR, 0xb0);
4514         tg3_rd32_loop(tp, regs, MAILBOX_INTERRUPT_0, 0x200);
4515         tg3_rd32_loop(tp, regs, MAC_MODE, 0x4f0);
4516         tg3_rd32_loop(tp, regs, SNDDATAI_MODE, 0xe0);
4517         tg3_rd32_loop(tp, regs, SNDDATAC_MODE, 0x04);
4518         tg3_rd32_loop(tp, regs, SNDBDS_MODE, 0x80);
4519         tg3_rd32_loop(tp, regs, SNDBDI_MODE, 0x48);
4520         tg3_rd32_loop(tp, regs, SNDBDC_MODE, 0x04);
4521         tg3_rd32_loop(tp, regs, RCVLPC_MODE, 0x20);
4522         tg3_rd32_loop(tp, regs, RCVLPC_SELLST_BASE, 0x15c);
4523         tg3_rd32_loop(tp, regs, RCVDBDI_MODE, 0x0c);
4524         tg3_rd32_loop(tp, regs, RCVDBDI_JUMBO_BD, 0x3c);
4525         tg3_rd32_loop(tp, regs, RCVDBDI_BD_PROD_IDX_0, 0x44);
4526         tg3_rd32_loop(tp, regs, RCVDCC_MODE, 0x04);
4527         tg3_rd32_loop(tp, regs, RCVBDI_MODE, 0x20);
4528         tg3_rd32_loop(tp, regs, RCVCC_MODE, 0x14);
4529         tg3_rd32_loop(tp, regs, RCVLSC_MODE, 0x08);
4530         tg3_rd32_loop(tp, regs, MBFREE_MODE, 0x08);
4531         tg3_rd32_loop(tp, regs, HOSTCC_MODE, 0x100);
4532
4533         if (tg3_flag(tp, SUPPORT_MSIX))
4534                 tg3_rd32_loop(tp, regs, HOSTCC_RXCOL_TICKS_VEC1, 0x180);
4535
4536         tg3_rd32_loop(tp, regs, MEMARB_MODE, 0x10);
4537         tg3_rd32_loop(tp, regs, BUFMGR_MODE, 0x58);
4538         tg3_rd32_loop(tp, regs, RDMAC_MODE, 0x08);
4539         tg3_rd32_loop(tp, regs, WDMAC_MODE, 0x08);
4540         tg3_rd32_loop(tp, regs, RX_CPU_MODE, 0x04);
4541         tg3_rd32_loop(tp, regs, RX_CPU_STATE, 0x04);
4542         tg3_rd32_loop(tp, regs, RX_CPU_PGMCTR, 0x04);
4543         tg3_rd32_loop(tp, regs, RX_CPU_HWBKPT, 0x04);
4544
4545         if (!tg3_flag(tp, 5705_PLUS)) {
4546                 tg3_rd32_loop(tp, regs, TX_CPU_MODE, 0x04);
4547                 tg3_rd32_loop(tp, regs, TX_CPU_STATE, 0x04);
4548                 tg3_rd32_loop(tp, regs, TX_CPU_PGMCTR, 0x04);
4549         }
4550
4551         tg3_rd32_loop(tp, regs, GRCMBOX_INTERRUPT_0, 0x110);
4552         tg3_rd32_loop(tp, regs, FTQ_RESET, 0x120);
4553         tg3_rd32_loop(tp, regs, MSGINT_MODE, 0x0c);
4554         tg3_rd32_loop(tp, regs, DMAC_MODE, 0x04);
4555         tg3_rd32_loop(tp, regs, GRC_MODE, 0x4c);
4556
4557         if (tg3_flag(tp, NVRAM))
4558                 tg3_rd32_loop(tp, regs, NVRAM_CMD, 0x24);
4559 }
4560
4561 static void tg3_dump_state(struct tg3 *tp)
4562 {
4563         int i;
4564         u32 *regs;
4565
4566         regs = kzalloc(TG3_REG_BLK_SIZE, GFP_ATOMIC);
4567         if (!regs) {
4568                 netdev_err(tp->dev, "Failed allocating register dump buffer\n");
4569                 return;
4570         }
4571
4572         if (tg3_flag(tp, PCI_EXPRESS)) {
4573                 /* Read up to but not including private PCI registers */
4574                 for (i = 0; i < TG3_PCIE_TLDLPL_PORT; i += sizeof(u32))
4575                         regs[i / sizeof(u32)] = tr32(i);
4576         } else
4577                 tg3_dump_legacy_regs(tp, regs);
4578
4579         for (i = 0; i < TG3_REG_BLK_SIZE / sizeof(u32); i += 4) {
4580                 if (!regs[i + 0] && !regs[i + 1] &&
4581                     !regs[i + 2] && !regs[i + 3])
4582                         continue;
4583
4584                 netdev_err(tp->dev, "0x%08x: 0x%08x, 0x%08x, 0x%08x, 0x%08x\n",
4585                            i * 4,
4586                            regs[i + 0], regs[i + 1], regs[i + 2], regs[i + 3]);
4587         }
4588
4589         kfree(regs);
4590
4591         for (i = 0; i < tp->irq_cnt; i++) {
4592                 struct tg3_napi *tnapi = &tp->napi[i];
4593
4594                 /* SW status block */
4595                 netdev_err(tp->dev,
4596                          "%d: Host status block [%08x:%08x:(%04x:%04x:%04x):(%04x:%04x)]\n",
4597                            i,
4598                            tnapi->hw_status->status,
4599                            tnapi->hw_status->status_tag,
4600                            tnapi->hw_status->rx_jumbo_consumer,
4601                            tnapi->hw_status->rx_consumer,
4602                            tnapi->hw_status->rx_mini_consumer,
4603                            tnapi->hw_status->idx[0].rx_producer,
4604                            tnapi->hw_status->idx[0].tx_consumer);
4605
4606                 netdev_err(tp->dev,
4607                 "%d: NAPI info [%08x:%08x:(%04x:%04x:%04x):%04x:(%04x:%04x:%04x:%04x)]\n",
4608                            i,
4609                            tnapi->last_tag, tnapi->last_irq_tag,
4610                            tnapi->tx_prod, tnapi->tx_cons, tnapi->tx_pending,
4611                            tnapi->rx_rcb_ptr,
4612                            tnapi->prodring.rx_std_prod_idx,
4613                            tnapi->prodring.rx_std_cons_idx,
4614                            tnapi->prodring.rx_jmb_prod_idx,
4615                            tnapi->prodring.rx_jmb_cons_idx);
4616         }
4617 }
4618
4619 /* This is called whenever we suspect that the system chipset is re-
4620  * ordering the sequence of MMIO to the tx send mailbox. The symptom
4621  * is bogus tx completions. We try to recover by setting the
4622  * TG3_FLAG_MBOX_WRITE_REORDER flag and resetting the chip later
4623  * in the workqueue.
4624  */
4625 static void tg3_tx_recover(struct tg3 *tp)
4626 {
4627         BUG_ON(tg3_flag(tp, MBOX_WRITE_REORDER) ||
4628                tp->write32_tx_mbox == tg3_write_indirect_mbox);
4629
4630         netdev_warn(tp->dev,
4631                     "The system may be re-ordering memory-mapped I/O "
4632                     "cycles to the network device, attempting to recover. "
4633                     "Please report the problem to the driver maintainer "
4634                     "and include system chipset information.\n");
4635
4636         spin_lock(&tp->lock);
4637         tg3_flag_set(tp, TX_RECOVERY_PENDING);
4638         spin_unlock(&tp->lock);
4639 }
4640
4641 static inline u32 tg3_tx_avail(struct tg3_napi *tnapi)
4642 {
4643         /* Tell compiler to fetch tx indices from memory. */
4644         barrier();
4645         return tnapi->tx_pending -
4646                ((tnapi->tx_prod - tnapi->tx_cons) & (TG3_TX_RING_SIZE - 1));
4647 }
4648
4649 /* Tigon3 never reports partial packet sends.  So we do not
4650  * need special logic to handle SKBs that have not had all
4651  * of their frags sent yet, like SunGEM does.
4652  */
4653 static void tg3_tx(struct tg3_napi *tnapi)
4654 {
4655         struct tg3 *tp = tnapi->tp;
4656         u32 hw_idx = tnapi->hw_status->idx[0].tx_consumer;
4657         u32 sw_idx = tnapi->tx_cons;
4658         struct netdev_queue *txq;
4659         int index = tnapi - tp->napi;
4660
4661         if (tg3_flag(tp, ENABLE_TSS))
4662                 index--;
4663
4664         txq = netdev_get_tx_queue(tp->dev, index);
4665
4666         while (sw_idx != hw_idx) {
4667                 struct ring_info *ri = &tnapi->tx_buffers[sw_idx];
4668                 struct sk_buff *skb = ri->skb;
4669                 int i, tx_bug = 0;
4670
4671                 if (unlikely(skb == NULL)) {
4672                         tg3_tx_recover(tp);
4673                         return;
4674                 }
4675
4676                 pci_unmap_single(tp->pdev,
4677                                  dma_unmap_addr(ri, mapping),
4678                                  skb_headlen(skb),
4679                                  PCI_DMA_TODEVICE);
4680
4681                 ri->skb = NULL;
4682
4683                 sw_idx = NEXT_TX(sw_idx);
4684
4685                 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
4686                         ri = &tnapi->tx_buffers[sw_idx];
4687                         if (unlikely(ri->skb != NULL || sw_idx == hw_idx))
4688                                 tx_bug = 1;
4689
4690                         pci_unmap_page(tp->pdev,
4691                                        dma_unmap_addr(ri, mapping),
4692                                        skb_shinfo(skb)->frags[i].size,
4693                                        PCI_DMA_TODEVICE);
4694                         sw_idx = NEXT_TX(sw_idx);
4695                 }
4696
4697                 dev_kfree_skb(skb);
4698
4699                 if (unlikely(tx_bug)) {
4700                         tg3_tx_recover(tp);
4701                         return;
4702                 }
4703         }
4704
4705         tnapi->tx_cons = sw_idx;
4706
4707         /* Need to make the tx_cons update visible to tg3_start_xmit()
4708          * before checking for netif_queue_stopped().  Without the
4709          * memory barrier, there is a small possibility that tg3_start_xmit()
4710          * will miss it and cause the queue to be stopped forever.
4711          */
4712         smp_mb();
4713
4714         if (unlikely(netif_tx_queue_stopped(txq) &&
4715                      (tg3_tx_avail(tnapi) > TG3_TX_WAKEUP_THRESH(tnapi)))) {
4716                 __netif_tx_lock(txq, smp_processor_id());
4717                 if (netif_tx_queue_stopped(txq) &&
4718                     (tg3_tx_avail(tnapi) > TG3_TX_WAKEUP_THRESH(tnapi)))
4719                         netif_tx_wake_queue(txq);
4720                 __netif_tx_unlock(txq);
4721         }
4722 }
4723
4724 static void tg3_rx_skb_free(struct tg3 *tp, struct ring_info *ri, u32 map_sz)
4725 {
4726         if (!ri->skb)
4727                 return;
4728
4729         pci_unmap_single(tp->pdev, dma_unmap_addr(ri, mapping),
4730                          map_sz, PCI_DMA_FROMDEVICE);
4731         dev_kfree_skb_any(ri->skb);
4732         ri->skb = NULL;
4733 }
4734
4735 /* Returns size of skb allocated or < 0 on error.
4736  *
4737  * We only need to fill in the address because the other members
4738  * of the RX descriptor are invariant, see tg3_init_rings.
4739  *
4740  * Note the purposeful assymetry of cpu vs. chip accesses.  For
4741  * posting buffers we only dirty the first cache line of the RX
4742  * descriptor (containing the address).  Whereas for the RX status
4743  * buffers the cpu only reads the last cacheline of the RX descriptor
4744  * (to fetch the error flags, vlan tag, checksum, and opaque cookie).
4745  */
4746 static int tg3_alloc_rx_skb(struct tg3 *tp, struct tg3_rx_prodring_set *tpr,
4747                             u32 opaque_key, u32 dest_idx_unmasked)
4748 {
4749         struct tg3_rx_buffer_desc *desc;
4750         struct ring_info *map;
4751         struct sk_buff *skb;
4752         dma_addr_t mapping;
4753         int skb_size, dest_idx;
4754
4755         switch (opaque_key) {
4756         case RXD_OPAQUE_RING_STD:
4757                 dest_idx = dest_idx_unmasked & tp->rx_std_ring_mask;
4758                 desc = &tpr->rx_std[dest_idx];
4759                 map = &tpr->rx_std_buffers[dest_idx];
4760                 skb_size = tp->rx_pkt_map_sz;
4761                 break;
4762
4763         case RXD_OPAQUE_RING_JUMBO:
4764                 dest_idx = dest_idx_unmasked & tp->rx_jmb_ring_mask;
4765                 desc = &tpr->rx_jmb[dest_idx].std;
4766                 map = &tpr->rx_jmb_buffers[dest_idx];
4767                 skb_size = TG3_RX_JMB_MAP_SZ;
4768                 break;
4769
4770         default:
4771                 return -EINVAL;
4772         }
4773
4774         /* Do not overwrite any of the map or rp information
4775          * until we are sure we can commit to a new buffer.
4776          *
4777          * Callers depend upon this behavior and assume that
4778          * we leave everything unchanged if we fail.
4779          */
4780         skb = netdev_alloc_skb(tp->dev, skb_size + tp->rx_offset);
4781         if (skb == NULL)
4782                 return -ENOMEM;
4783
4784         skb_reserve(skb, tp->rx_offset);
4785
4786         mapping = pci_map_single(tp->pdev, skb->data, skb_size,
4787                                  PCI_DMA_FROMDEVICE);
4788         if (pci_dma_mapping_error(tp->pdev, mapping)) {
4789                 dev_kfree_skb(skb);
4790                 return -EIO;
4791         }
4792
4793         map->skb = skb;
4794         dma_unmap_addr_set(map, mapping, mapping);
4795
4796         desc->addr_hi = ((u64)mapping >> 32);
4797         desc->addr_lo = ((u64)mapping & 0xffffffff);
4798
4799         return skb_size;
4800 }
4801
4802 /* We only need to move over in the address because the other
4803  * members of the RX descriptor are invariant.  See notes above
4804  * tg3_alloc_rx_skb for full details.
4805  */
4806 static void tg3_recycle_rx(struct tg3_napi *tnapi,
4807                            struct tg3_rx_prodring_set *dpr,
4808                            u32 opaque_key, int src_idx,
4809                            u32 dest_idx_unmasked)
4810 {
4811         struct tg3 *tp = tnapi->tp;
4812         struct tg3_rx_buffer_desc *src_desc, *dest_desc;
4813         struct ring_info *src_map, *dest_map;
4814         struct tg3_rx_prodring_set *spr = &tp->napi[0].prodring;
4815         int dest_idx;
4816
4817         switch (opaque_key) {
4818         case RXD_OPAQUE_RING_STD:
4819                 dest_idx = dest_idx_unmasked & tp->rx_std_ring_mask;
4820                 dest_desc = &dpr->rx_std[dest_idx];
4821                 dest_map = &dpr->rx_std_buffers[dest_idx];
4822                 src_desc = &spr->rx_std[src_idx];
4823                 src_map = &spr->rx_std_buffers[src_idx];
4824                 break;
4825
4826         case RXD_OPAQUE_RING_JUMBO:
4827                 dest_idx = dest_idx_unmasked & tp->rx_jmb_ring_mask;
4828                 dest_desc = &dpr->rx_jmb[dest_idx].std;
4829                 dest_map = &dpr->rx_jmb_buffers[dest_idx];
4830                 src_desc = &spr->rx_jmb[src_idx].std;
4831                 src_map = &spr->rx_jmb_buffers[src_idx];
4832                 break;
4833
4834         default:
4835                 return;
4836         }
4837
4838         dest_map->skb = src_map->skb;
4839         dma_unmap_addr_set(dest_map, mapping,
4840                            dma_unmap_addr(src_map, mapping));
4841         dest_desc->addr_hi = src_desc->addr_hi;
4842         dest_desc->addr_lo = src_desc->addr_lo;
4843
4844         /* Ensure that the update to the skb happens after the physical
4845          * addresses have been transferred to the new BD location.
4846          */
4847         smp_wmb();
4848
4849         src_map->skb = NULL;
4850 }
4851
4852 /* The RX ring scheme is composed of multiple rings which post fresh
4853  * buffers to the chip, and one special ring the chip uses to report
4854  * status back to the host.
4855  *
4856  * The special ring reports the status of received packets to the
4857  * host.  The chip does not write into the original descriptor the
4858  * RX buffer was obtained from.  The chip simply takes the original
4859  * descriptor as provided by the host, updates the status and length
4860  * field, then writes this into the next status ring entry.
4861  *
4862  * Each ring the host uses to post buffers to the chip is described
4863  * by a TG3_BDINFO entry in the chips SRAM area.  When a packet arrives,
4864  * it is first placed into the on-chip ram.  When the packet's length
4865  * is known, it walks down the TG3_BDINFO entries to select the ring.
4866  * Each TG3_BDINFO specifies a MAXLEN field and the first TG3_BDINFO
4867  * which is within the range of the new packet's length is chosen.
4868  *
4869  * The "separate ring for rx status" scheme may sound queer, but it makes
4870  * sense from a cache coherency perspective.  If only the host writes
4871  * to the buffer post rings, and only the chip writes to the rx status
4872  * rings, then cache lines never move beyond shared-modified state.
4873  * If both the host and chip were to write into the same ring, cache line
4874  * eviction could occur since both entities want it in an exclusive state.
4875  */
4876 static int tg3_rx(struct tg3_napi *tnapi, int budget)
4877 {
4878         struct tg3 *tp = tnapi->tp;
4879         u32 work_mask, rx_std_posted = 0;
4880         u32 std_prod_idx, jmb_prod_idx;
4881         u32 sw_idx = tnapi->rx_rcb_ptr;
4882         u16 hw_idx;
4883         int received;
4884         struct tg3_rx_prodring_set *tpr = &tnapi->prodring;
4885
4886         hw_idx = *(tnapi->rx_rcb_prod_idx);
4887         /*
4888          * We need to order the read of hw_idx and the read of
4889          * the opaque cookie.
4890          */
4891         rmb();
4892         work_mask = 0;
4893         received = 0;
4894         std_prod_idx = tpr->rx_std_prod_idx;
4895         jmb_prod_idx = tpr->rx_jmb_prod_idx;
4896         while (sw_idx != hw_idx && budget > 0) {
4897                 struct ring_info *ri;
4898                 struct tg3_rx_buffer_desc *desc = &tnapi->rx_rcb[sw_idx];
4899                 unsigned int len;
4900                 struct sk_buff *skb;
4901                 dma_addr_t dma_addr;
4902                 u32 opaque_key, desc_idx, *post_ptr;
4903
4904                 desc_idx = desc->opaque & RXD_OPAQUE_INDEX_MASK;
4905                 opaque_key = desc->opaque & RXD_OPAQUE_RING_MASK;
4906                 if (opaque_key == RXD_OPAQUE_RING_STD) {
4907                         ri = &tp->napi[0].prodring.rx_std_buffers[desc_idx];
4908                         dma_addr = dma_unmap_addr(ri, mapping);
4909                         skb = ri->skb;
4910                         post_ptr = &std_prod_idx;
4911                         rx_std_posted++;
4912                 } else if (opaque_key == RXD_OPAQUE_RING_JUMBO) {
4913                         ri = &tp->napi[0].prodring.rx_jmb_buffers[desc_idx];
4914                         dma_addr = dma_unmap_addr(ri, mapping);
4915                         skb = ri->skb;
4916                         post_ptr = &jmb_prod_idx;
4917                 } else
4918                         goto next_pkt_nopost;
4919
4920                 work_mask |= opaque_key;
4921
4922                 if ((desc->err_vlan & RXD_ERR_MASK) != 0 &&
4923                     (desc->err_vlan != RXD_ERR_ODD_NIBBLE_RCVD_MII)) {
4924                 drop_it:
4925                         tg3_recycle_rx(tnapi, tpr, opaque_key,
4926                                        desc_idx, *post_ptr);
4927                 drop_it_no_recycle:
4928                         /* Other statistics kept track of by card. */
4929                         tp->rx_dropped++;
4930                         goto next_pkt;
4931                 }
4932
4933                 len = ((desc->idx_len & RXD_LEN_MASK) >> RXD_LEN_SHIFT) -
4934                       ETH_FCS_LEN;
4935
4936                 if (len > TG3_RX_COPY_THRESH(tp)) {
4937                         int skb_size;
4938
4939                         skb_size = tg3_alloc_rx_skb(tp, tpr, opaque_key,
4940                                                     *post_ptr);
4941                         if (skb_size < 0)
4942                                 goto drop_it;
4943
4944                         pci_unmap_single(tp->pdev, dma_addr, skb_size,
4945                                          PCI_DMA_FROMDEVICE);
4946
4947                         /* Ensure that the update to the skb happens
4948                          * after the usage of the old DMA mapping.
4949                          */
4950                         smp_wmb();
4951
4952                         ri->skb = NULL;
4953
4954                         skb_put(skb, len);
4955                 } else {
4956                         struct sk_buff *copy_skb;
4957
4958                         tg3_recycle_rx(tnapi, tpr, opaque_key,
4959                                        desc_idx, *post_ptr);
4960
4961                         copy_skb = netdev_alloc_skb(tp->dev, len +
4962                                                     TG3_RAW_IP_ALIGN);
4963                         if (copy_skb == NULL)
4964                                 goto drop_it_no_recycle;
4965
4966                         skb_reserve(copy_skb, TG3_RAW_IP_ALIGN);
4967                         skb_put(copy_skb, len);
4968                         pci_dma_sync_single_for_cpu(tp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE);
4969                         skb_copy_from_linear_data(skb, copy_skb->data, len);
4970                         pci_dma_sync_single_for_device(tp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE);
4971
4972                         /* We'll reuse the original ring buffer. */
4973                         skb = copy_skb;
4974                 }
4975
4976                 if ((tp->dev->features & NETIF_F_RXCSUM) &&
4977                     (desc->type_flags & RXD_FLAG_TCPUDP_CSUM) &&
4978                     (((desc->ip_tcp_csum & RXD_TCPCSUM_MASK)
4979                       >> RXD_TCPCSUM_SHIFT) == 0xffff))
4980                         skb->ip_summed = CHECKSUM_UNNECESSARY;
4981                 else
4982                         skb_checksum_none_assert(skb);
4983
4984                 skb->protocol = eth_type_trans(skb, tp->dev);
4985
4986                 if (len > (tp->dev->mtu + ETH_HLEN) &&
4987                     skb->protocol != htons(ETH_P_8021Q)) {
4988                         dev_kfree_skb(skb);
4989                         goto drop_it_no_recycle;
4990                 }
4991
4992                 if (desc->type_flags & RXD_FLAG_VLAN &&
4993                     !(tp->rx_mode & RX_MODE_KEEP_VLAN_TAG))
4994                         __vlan_hwaccel_put_tag(skb,
4995                                                desc->err_vlan & RXD_VLAN_MASK);
4996
4997                 napi_gro_receive(&tnapi->napi, skb);
4998
4999                 received++;
5000                 budget--;
5001
5002 next_pkt:
5003                 (*post_ptr)++;
5004
5005                 if (unlikely(rx_std_posted >= tp->rx_std_max_post)) {
5006                         tpr->rx_std_prod_idx = std_prod_idx &
5007                                                tp->rx_std_ring_mask;
5008                         tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG,
5009                                      tpr->rx_std_prod_idx);
5010                         work_mask &= ~RXD_OPAQUE_RING_STD;
5011                         rx_std_posted = 0;
5012                 }
5013 next_pkt_nopost:
5014                 sw_idx++;
5015                 sw_idx &= tp->rx_ret_ring_mask;
5016
5017                 /* Refresh hw_idx to see if there is new work */
5018                 if (sw_idx == hw_idx) {
5019                         hw_idx = *(tnapi->rx_rcb_prod_idx);
5020                         rmb();
5021                 }
5022         }
5023
5024         /* ACK the status ring. */
5025         tnapi->rx_rcb_ptr = sw_idx;
5026         tw32_rx_mbox(tnapi->consmbox, sw_idx);
5027
5028         /* Refill RX ring(s). */
5029         if (!tg3_flag(tp, ENABLE_RSS)) {
5030                 if (work_mask & RXD_OPAQUE_RING_STD) {
5031                         tpr->rx_std_prod_idx = std_prod_idx &
5032                                                tp->rx_std_ring_mask;
5033                         tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG,
5034                                      tpr->rx_std_prod_idx);
5035                 }
5036                 if (work_mask & RXD_OPAQUE_RING_JUMBO) {
5037                         tpr->rx_jmb_prod_idx = jmb_prod_idx &
5038                                                tp->rx_jmb_ring_mask;
5039                         tw32_rx_mbox(TG3_RX_JMB_PROD_IDX_REG,
5040                                      tpr->rx_jmb_prod_idx);
5041                 }
5042                 mmiowb();
5043         } else if (work_mask) {
5044                 /* rx_std_buffers[] and rx_jmb_buffers[] entries must be
5045                  * updated before the producer indices can be updated.
5046                  */
5047                 smp_wmb();
5048
5049                 tpr->rx_std_prod_idx = std_prod_idx & tp->rx_std_ring_mask;
5050                 tpr->rx_jmb_prod_idx = jmb_prod_idx & tp->rx_jmb_ring_mask;
5051
5052                 if (tnapi != &tp->napi[1])
5053                         napi_schedule(&tp->napi[1].napi);
5054         }
5055
5056         return received;
5057 }
5058
5059 static void tg3_poll_link(struct tg3 *tp)
5060 {
5061         /* handle link change and other phy events */
5062         if (!(tg3_flag(tp, USE_LINKCHG_REG) || tg3_flag(tp, POLL_SERDES))) {
5063                 struct tg3_hw_status *sblk = tp->napi[0].hw_status;
5064
5065                 if (sblk->status & SD_STATUS_LINK_CHG) {
5066                         sblk->status = SD_STATUS_UPDATED |
5067                                        (sblk->status & ~SD_STATUS_LINK_CHG);
5068                         spin_lock(&tp->lock);
5069                         if (tg3_flag(tp, USE_PHYLIB)) {
5070                                 tw32_f(MAC_STATUS,
5071                                      (MAC_STATUS_SYNC_CHANGED |
5072                                       MAC_STATUS_CFG_CHANGED |
5073                                       MAC_STATUS_MI_COMPLETION |
5074                                       MAC_STATUS_LNKSTATE_CHANGED));
5075                                 udelay(40);
5076                         } else
5077                                 tg3_setup_phy(tp, 0);
5078                         spin_unlock(&tp->lock);
5079                 }
5080         }
5081 }
5082
5083 static int tg3_rx_prodring_xfer(struct tg3 *tp,
5084                                 struct tg3_rx_prodring_set *dpr,
5085                                 struct tg3_rx_prodring_set *spr)
5086 {
5087         u32 si, di, cpycnt, src_prod_idx;
5088         int i, err = 0;
5089
5090         while (1) {
5091                 src_prod_idx = spr->rx_std_prod_idx;
5092
5093                 /* Make sure updates to the rx_std_buffers[] entries and the
5094                  * standard producer index are seen in the correct order.
5095                  */
5096                 smp_rmb();
5097
5098                 if (spr->rx_std_cons_idx == src_prod_idx)
5099                         break;
5100
5101                 if (spr->rx_std_cons_idx < src_prod_idx)
5102                         cpycnt = src_prod_idx - spr->rx_std_cons_idx;
5103                 else
5104                         cpycnt = tp->rx_std_ring_mask + 1 -
5105                                  spr->rx_std_cons_idx;
5106
5107                 cpycnt = min(cpycnt,
5108                              tp->rx_std_ring_mask + 1 - dpr->rx_std_prod_idx);
5109
5110                 si = spr->rx_std_cons_idx;
5111                 di = dpr->rx_std_prod_idx;
5112
5113                 for (i = di; i < di + cpycnt; i++) {
5114                         if (dpr->rx_std_buffers[i].skb) {
5115                                 cpycnt = i - di;
5116                                 err = -ENOSPC;
5117                                 break;
5118                         }
5119                 }
5120
5121                 if (!cpycnt)
5122                         break;
5123
5124                 /* Ensure that updates to the rx_std_buffers ring and the
5125                  * shadowed hardware producer ring from tg3_recycle_skb() are
5126                  * ordered correctly WRT the skb check above.
5127                  */
5128                 smp_rmb();
5129
5130                 memcpy(&dpr->rx_std_buffers[di],
5131                        &spr->rx_std_buffers[si],
5132                        cpycnt * sizeof(struct ring_info));
5133
5134                 for (i = 0; i < cpycnt; i++, di++, si++) {
5135                         struct tg3_rx_buffer_desc *sbd, *dbd;
5136                         sbd = &spr->rx_std[si];
5137                         dbd = &dpr->rx_std[di];
5138                         dbd->addr_hi = sbd->addr_hi;
5139                         dbd->addr_lo = sbd->addr_lo;
5140                 }
5141
5142                 spr->rx_std_cons_idx = (spr->rx_std_cons_idx + cpycnt) &
5143                                        tp->rx_std_ring_mask;
5144                 dpr->rx_std_prod_idx = (dpr->rx_std_prod_idx + cpycnt) &
5145                                        tp->rx_std_ring_mask;
5146         }
5147
5148         while (1) {
5149                 src_prod_idx = spr->rx_jmb_prod_idx;
5150
5151                 /* Make sure updates to the rx_jmb_buffers[] entries and
5152                  * the jumbo producer index are seen in the correct order.
5153                  */
5154                 smp_rmb();
5155
5156                 if (spr->rx_jmb_cons_idx == src_prod_idx)
5157                         break;
5158
5159                 if (spr->rx_jmb_cons_idx < src_prod_idx)
5160                         cpycnt = src_prod_idx - spr->rx_jmb_cons_idx;
5161                 else
5162                         cpycnt = tp->rx_jmb_ring_mask + 1 -
5163                                  spr->rx_jmb_cons_idx;
5164
5165                 cpycnt = min(cpycnt,
5166                              tp->rx_jmb_ring_mask + 1 - dpr->rx_jmb_prod_idx);
5167
5168                 si = spr->rx_jmb_cons_idx;
5169                 di = dpr->rx_jmb_prod_idx;
5170
5171                 for (i = di; i < di + cpycnt; i++) {
5172                         if (dpr->rx_jmb_buffers[i].skb) {
5173                                 cpycnt = i - di;
5174                                 err = -ENOSPC;
5175                                 break;
5176                         }
5177                 }
5178
5179                 if (!cpycnt)
5180                         break;
5181
5182                 /* Ensure that updates to the rx_jmb_buffers ring and the
5183                  * shadowed hardware producer ring from tg3_recycle_skb() are
5184                  * ordered correctly WRT the skb check above.
5185                  */
5186                 smp_rmb();
5187
5188                 memcpy(&dpr->rx_jmb_buffers[di],
5189                        &spr->rx_jmb_buffers[si],
5190                        cpycnt * sizeof(struct ring_info));
5191
5192                 for (i = 0; i < cpycnt; i++, di++, si++) {
5193                         struct tg3_rx_buffer_desc *sbd, *dbd;
5194                         sbd = &spr->rx_jmb[si].std;
5195                         dbd = &dpr->rx_jmb[di].std;
5196                         dbd->addr_hi = sbd->addr_hi;
5197                         dbd->addr_lo = sbd->addr_lo;
5198                 }
5199
5200                 spr->rx_jmb_cons_idx = (spr->rx_jmb_cons_idx + cpycnt) &
5201                                        tp->rx_jmb_ring_mask;
5202                 dpr->rx_jmb_prod_idx = (dpr->rx_jmb_prod_idx + cpycnt) &
5203                                        tp->rx_jmb_ring_mask;
5204         }
5205
5206         return err;
5207 }
5208
5209 static int tg3_poll_work(struct tg3_napi *tnapi, int work_done, int budget)
5210 {
5211         struct tg3 *tp = tnapi->tp;
5212
5213         /* run TX completion thread */
5214         if (tnapi->hw_status->idx[0].tx_consumer != tnapi->tx_cons) {
5215                 tg3_tx(tnapi);
5216                 if (unlikely(tg3_flag(tp, TX_RECOVERY_PENDING)))
5217                         return work_done;
5218         }
5219
5220         /* run RX thread, within the bounds set by NAPI.
5221          * All RX "locking" is done by ensuring outside
5222          * code synchronizes with tg3->napi.poll()
5223          */
5224         if (*(tnapi->rx_rcb_prod_idx) != tnapi->rx_rcb_ptr)
5225                 work_done += tg3_rx(tnapi, budget - work_done);
5226
5227         if (tg3_flag(tp, ENABLE_RSS) && tnapi == &tp->napi[1]) {
5228                 struct tg3_rx_prodring_set *dpr = &tp->napi[0].prodring;
5229                 int i, err = 0;
5230                 u32 std_prod_idx = dpr->rx_std_prod_idx;
5231                 u32 jmb_prod_idx = dpr->rx_jmb_prod_idx;
5232
5233                 for (i = 1; i < tp->irq_cnt; i++)
5234                         err |= tg3_rx_prodring_xfer(tp, dpr,
5235                                                     &tp->napi[i].prodring);
5236
5237                 wmb();
5238
5239                 if (std_prod_idx != dpr->rx_std_prod_idx)
5240                         tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG,
5241                                      dpr->rx_std_prod_idx);
5242
5243                 if (jmb_prod_idx != dpr->rx_jmb_prod_idx)
5244                         tw32_rx_mbox(TG3_RX_JMB_PROD_IDX_REG,
5245                                      dpr->rx_jmb_prod_idx);
5246
5247                 mmiowb();
5248
5249                 if (err)
5250                         tw32_f(HOSTCC_MODE, tp->coal_now);
5251         }
5252
5253         return work_done;
5254 }
5255
5256 static int tg3_poll_msix(struct napi_struct *napi, int budget)
5257 {
5258         struct tg3_napi *tnapi = container_of(napi, struct tg3_napi, napi);
5259         struct tg3 *tp = tnapi->tp;
5260         int work_done = 0;
5261         struct tg3_hw_status *sblk = tnapi->hw_status;
5262
5263         while (1) {
5264                 work_done = tg3_poll_work(tnapi, work_done, budget);
5265
5266                 if (unlikely(tg3_flag(tp, TX_RECOVERY_PENDING)))
5267                         goto tx_recovery;
5268
5269                 if (unlikely(work_done >= budget))
5270                         break;
5271
5272                 /* tp->last_tag is used in tg3_int_reenable() below
5273                  * to tell the hw how much work has been processed,
5274                  * so we must read it before checking for more work.
5275                  */
5276                 tnapi->last_tag = sblk->status_tag;
5277                 tnapi->last_irq_tag = tnapi->last_tag;
5278                 rmb();
5279
5280                 /* check for RX/TX work to do */
5281                 if (likely(sblk->idx[0].tx_consumer == tnapi->tx_cons &&
5282                            *(tnapi->rx_rcb_prod_idx) == tnapi->rx_rcb_ptr)) {
5283                         napi_complete(napi);
5284                         /* Reenable interrupts. */
5285                         tw32_mailbox(tnapi->int_mbox, tnapi->last_tag << 24);
5286                         mmiowb();
5287                         break;
5288                 }
5289         }
5290
5291         return work_done;
5292
5293 tx_recovery:
5294         /* work_done is guaranteed to be less than budget. */
5295         napi_complete(napi);
5296         schedule_work(&tp->reset_task);
5297         return work_done;
5298 }
5299
5300 static void tg3_process_error(struct tg3 *tp)
5301 {
5302         u32 val;
5303         bool real_error = false;
5304
5305         if (tg3_flag(tp, ERROR_PROCESSED))
5306                 return;
5307
5308         /* Check Flow Attention register */
5309         val = tr32(HOSTCC_FLOW_ATTN);
5310         if (val & ~HOSTCC_FLOW_ATTN_MBUF_LWM) {
5311                 netdev_err(tp->dev, "FLOW Attention error.  Resetting chip.\n");
5312                 real_error = true;
5313         }
5314
5315         if (tr32(MSGINT_STATUS) & ~MSGINT_STATUS_MSI_REQ) {
5316                 netdev_err(tp->dev, "MSI Status error.  Resetting chip.\n");
5317                 real_error = true;
5318         }
5319
5320         if (tr32(RDMAC_STATUS) || tr32(WDMAC_STATUS)) {
5321                 netdev_err(tp->dev, "DMA Status error.  Resetting chip.\n");
5322                 real_error = true;
5323         }
5324
5325         if (!real_error)
5326                 return;
5327
5328         tg3_dump_state(tp);
5329
5330         tg3_flag_set(tp, ERROR_PROCESSED);
5331         schedule_work(&tp->reset_task);
5332 }
5333
5334 static int tg3_poll(struct napi_struct *napi, int budget)
5335 {
5336         struct tg3_napi *tnapi = container_of(napi, struct tg3_napi, napi);
5337         struct tg3 *tp = tnapi->tp;
5338         int work_done = 0;
5339         struct tg3_hw_status *sblk = tnapi->hw_status;
5340
5341         while (1) {
5342                 if (sblk->status & SD_STATUS_ERROR)
5343                         tg3_process_error(tp);
5344
5345                 tg3_poll_link(tp);
5346
5347                 work_done = tg3_poll_work(tnapi, work_done, budget);
5348
5349                 if (unlikely(tg3_flag(tp, TX_RECOVERY_PENDING)))
5350                         goto tx_recovery;
5351
5352                 if (unlikely(work_done >= budget))
5353                         break;
5354
5355                 if (tg3_flag(tp, TAGGED_STATUS)) {
5356                         /* tp->last_tag is used in tg3_int_reenable() below
5357                          * to tell the hw how much work has been processed,
5358                          * so we must read it before checking for more work.
5359                          */
5360                         tnapi->last_tag = sblk->status_tag;
5361                         tnapi->last_irq_tag = tnapi->last_tag;
5362                         rmb();
5363                 } else
5364                         sblk->status &= ~SD_STATUS_UPDATED;
5365
5366                 if (likely(!tg3_has_work(tnapi))) {
5367                         napi_complete(napi);
5368                         tg3_int_reenable(tnapi);
5369                         break;
5370                 }
5371         }
5372
5373         return work_done;
5374
5375 tx_recovery:
5376         /* work_done is guaranteed to be less than budget. */
5377         napi_complete(napi);
5378         schedule_work(&tp->reset_task);
5379         return work_done;
5380 }
5381
5382 static void tg3_napi_disable(struct tg3 *tp)
5383 {
5384         int i;
5385
5386         for (i = tp->irq_cnt - 1; i >= 0; i--)
5387                 napi_disable(&tp->napi[i].napi);
5388 }
5389
5390 static void tg3_napi_enable(struct tg3 *tp)
5391 {
5392         int i;
5393
5394         for (i = 0; i < tp->irq_cnt; i++)
5395                 napi_enable(&tp->napi[i].napi);
5396 }
5397
5398 static void tg3_napi_init(struct tg3 *tp)
5399 {
5400         int i;
5401
5402         netif_napi_add(tp->dev, &tp->napi[0].napi, tg3_poll, 64);
5403         for (i = 1; i < tp->irq_cnt; i++)
5404                 netif_napi_add(tp->dev, &tp->napi[i].napi, tg3_poll_msix, 64);
5405 }
5406
5407 static void tg3_napi_fini(struct tg3 *tp)
5408 {
5409         int i;
5410
5411         for (i = 0; i < tp->irq_cnt; i++)
5412                 netif_napi_del(&tp->napi[i].napi);
5413 }
5414
5415 static inline void tg3_netif_stop(struct tg3 *tp)
5416 {
5417         tp->dev->trans_start = jiffies; /* prevent tx timeout */
5418         tg3_napi_disable(tp);
5419         netif_tx_disable(tp->dev);
5420 }
5421
5422 static inline void tg3_netif_start(struct tg3 *tp)
5423 {
5424         /* NOTE: unconditional netif_tx_wake_all_queues is only
5425          * appropriate so long as all callers are assured to
5426          * have free tx slots (such as after tg3_init_hw)
5427          */
5428         netif_tx_wake_all_queues(tp->dev);
5429
5430         tg3_napi_enable(tp);
5431         tp->napi[0].hw_status->status |= SD_STATUS_UPDATED;
5432         tg3_enable_ints(tp);
5433 }
5434
5435 static void tg3_irq_quiesce(struct tg3 *tp)
5436 {
5437         int i;
5438
5439         BUG_ON(tp->irq_sync);
5440
5441         tp->irq_sync = 1;
5442         smp_mb();
5443
5444         for (i = 0; i < tp->irq_cnt; i++)
5445                 synchronize_irq(tp->napi[i].irq_vec);
5446 }
5447
5448 /* Fully shutdown all tg3 driver activity elsewhere in the system.
5449  * If irq_sync is non-zero, then the IRQ handler must be synchronized
5450  * with as well.  Most of the time, this is not necessary except when
5451  * shutting down the device.
5452  */
5453 static inline void tg3_full_lock(struct tg3 *tp, int irq_sync)
5454 {
5455         spin_lock_bh(&tp->lock);
5456         if (irq_sync)
5457                 tg3_irq_quiesce(tp);
5458 }
5459
5460 static inline void tg3_full_unlock(struct tg3 *tp)
5461 {
5462         spin_unlock_bh(&tp->lock);
5463 }
5464
5465 /* One-shot MSI handler - Chip automatically disables interrupt
5466  * after sending MSI so driver doesn't have to do it.
5467  */
5468 static irqreturn_t tg3_msi_1shot(int irq, void *dev_id)
5469 {
5470         struct tg3_napi *tnapi = dev_id;
5471         struct tg3 *tp = tnapi->tp;
5472
5473         prefetch(tnapi->hw_status);
5474         if (tnapi->rx_rcb)
5475                 prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
5476
5477         if (likely(!tg3_irq_sync(tp)))
5478                 napi_schedule(&tnapi->napi);
5479
5480         return IRQ_HANDLED;
5481 }
5482
5483 /* MSI ISR - No need to check for interrupt sharing and no need to
5484  * flush status block and interrupt mailbox. PCI ordering rules
5485  * guarantee that MSI will arrive after the status block.
5486  */
5487 static irqreturn_t tg3_msi(int irq, void *dev_id)
5488 {
5489         struct tg3_napi *tnapi = dev_id;
5490         struct tg3 *tp = tnapi->tp;
5491
5492         prefetch(tnapi->hw_status);
5493         if (tnapi->rx_rcb)
5494                 prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
5495         /*
5496          * Writing any value to intr-mbox-0 clears PCI INTA# and
5497          * chip-internal interrupt pending events.
5498          * Writing non-zero to intr-mbox-0 additional tells the
5499          * NIC to stop sending us irqs, engaging "in-intr-handler"
5500          * event coalescing.
5501          */
5502         tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
5503         if (likely(!tg3_irq_sync(tp)))
5504                 napi_schedule(&tnapi->napi);
5505
5506         return IRQ_RETVAL(1);
5507 }
5508
5509 static irqreturn_t tg3_interrupt(int irq, void *dev_id)
5510 {
5511         struct tg3_napi *tnapi = dev_id;
5512         struct tg3 *tp = tnapi->tp;
5513         struct tg3_hw_status *sblk = tnapi->hw_status;
5514         unsigned int handled = 1;
5515
5516         /* In INTx mode, it is possible for the interrupt to arrive at
5517          * the CPU before the status block posted prior to the interrupt.
5518          * Reading the PCI State register will confirm whether the
5519          * interrupt is ours and will flush the status block.
5520          */
5521         if (unlikely(!(sblk->status & SD_STATUS_UPDATED))) {
5522                 if (tg3_flag(tp, CHIP_RESETTING) ||
5523                     (tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
5524                         handled = 0;
5525                         goto out;
5526                 }
5527         }
5528
5529         /*
5530          * Writing any value to intr-mbox-0 clears PCI INTA# and
5531          * chip-internal interrupt pending events.
5532          * Writing non-zero to intr-mbox-0 additional tells the
5533          * NIC to stop sending us irqs, engaging "in-intr-handler"
5534          * event coalescing.
5535          *
5536          * Flush the mailbox to de-assert the IRQ immediately to prevent
5537          * spurious interrupts.  The flush impacts performance but
5538          * excessive spurious interrupts can be worse in some cases.
5539          */
5540         tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
5541         if (tg3_irq_sync(tp))
5542                 goto out;
5543         sblk->status &= ~SD_STATUS_UPDATED;
5544         if (likely(tg3_has_work(tnapi))) {
5545                 prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
5546                 napi_schedule(&tnapi->napi);
5547         } else {
5548                 /* No work, shared interrupt perhaps?  re-enable
5549                  * interrupts, and flush that PCI write
5550                  */
5551                 tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
5552                                0x00000000);
5553         }
5554 out:
5555         return IRQ_RETVAL(handled);
5556 }
5557
5558 static irqreturn_t tg3_interrupt_tagged(int irq, void *dev_id)
5559 {
5560         struct tg3_napi *tnapi = dev_id;
5561         struct tg3 *tp = tnapi->tp;
5562         struct tg3_hw_status *sblk = tnapi->hw_status;
5563         unsigned int handled = 1;
5564
5565         /* In INTx mode, it is possible for the interrupt to arrive at
5566          * the CPU before the status block posted prior to the interrupt.
5567          * Reading the PCI State register will confirm whether the
5568          * interrupt is ours and will flush the status block.
5569          */
5570         if (unlikely(sblk->status_tag == tnapi->last_irq_tag)) {
5571                 if (tg3_flag(tp, CHIP_RESETTING) ||
5572                     (tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
5573                         handled = 0;
5574                         goto out;
5575                 }
5576         }
5577
5578         /*
5579          * writing any value to intr-mbox-0 clears PCI INTA# and
5580          * chip-internal interrupt pending events.
5581          * writing non-zero to intr-mbox-0 additional tells the
5582          * NIC to stop sending us irqs, engaging "in-intr-handler"
5583          * event coalescing.
5584          *
5585          * Flush the mailbox to de-assert the IRQ immediately to prevent
5586          * spurious interrupts.  The flush impacts performance but
5587          * excessive spurious interrupts can be worse in some cases.
5588          */
5589         tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
5590
5591         /*
5592          * In a shared interrupt configuration, sometimes other devices'
5593          * interrupts will scream.  We record the current status tag here
5594          * so that the above check can report that the screaming interrupts
5595          * are unhandled.  Eventually they will be silenced.
5596          */
5597         tnapi->last_irq_tag = sblk->status_tag;
5598
5599         if (tg3_irq_sync(tp))
5600                 goto out;
5601
5602         prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
5603
5604         napi_schedule(&tnapi->napi);
5605
5606 out:
5607         return IRQ_RETVAL(handled);
5608 }
5609
5610 /* ISR for interrupt test */
5611 static irqreturn_t tg3_test_isr(int irq, void *dev_id)
5612 {
5613         struct tg3_napi *tnapi = dev_id;
5614         struct tg3 *tp = tnapi->tp;
5615         struct tg3_hw_status *sblk = tnapi->hw_status;
5616
5617         if ((sblk->status & SD_STATUS_UPDATED) ||
5618             !(tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
5619                 tg3_disable_ints(tp);
5620                 return IRQ_RETVAL(1);
5621         }
5622         return IRQ_RETVAL(0);
5623 }
5624
5625 static int tg3_init_hw(struct tg3 *, int);
5626 static int tg3_halt(struct tg3 *, int, int);
5627
5628 /* Restart hardware after configuration changes, self-test, etc.
5629  * Invoked with tp->lock held.
5630  */
5631 static int tg3_restart_hw(struct tg3 *tp, int reset_phy)
5632         __releases(tp->lock)
5633         __acquires(tp->lock)
5634 {
5635         int err;
5636
5637         err = tg3_init_hw(tp, reset_phy);
5638         if (err) {
5639                 netdev_err(tp->dev,
5640                            "Failed to re-initialize device, aborting\n");
5641                 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
5642                 tg3_full_unlock(tp);
5643                 del_timer_sync(&tp->timer);
5644                 tp->irq_sync = 0;
5645                 tg3_napi_enable(tp);
5646                 dev_close(tp->dev);
5647                 tg3_full_lock(tp, 0);
5648         }
5649         return err;
5650 }
5651
5652 #ifdef CONFIG_NET_POLL_CONTROLLER
5653 static void tg3_poll_controller(struct net_device *dev)
5654 {
5655         int i;
5656         struct tg3 *tp = netdev_priv(dev);
5657
5658         for (i = 0; i < tp->irq_cnt; i++)
5659                 tg3_interrupt(tp->napi[i].irq_vec, &tp->napi[i]);
5660 }
5661 #endif
5662
5663 static void tg3_reset_task(struct work_struct *work)
5664 {
5665         struct tg3 *tp = container_of(work, struct tg3, reset_task);
5666         int err;
5667         unsigned int restart_timer;
5668
5669         tg3_full_lock(tp, 0);
5670
5671         if (!netif_running(tp->dev)) {
5672                 tg3_full_unlock(tp);
5673                 return;
5674         }
5675
5676         tg3_full_unlock(tp);
5677
5678         tg3_phy_stop(tp);
5679
5680         tg3_netif_stop(tp);
5681
5682         tg3_full_lock(tp, 1);
5683
5684         restart_timer = tg3_flag(tp, RESTART_TIMER);
5685         tg3_flag_clear(tp, RESTART_TIMER);
5686
5687         if (tg3_flag(tp, TX_RECOVERY_PENDING)) {
5688                 tp->write32_tx_mbox = tg3_write32_tx_mbox;
5689                 tp->write32_rx_mbox = tg3_write_flush_reg32;
5690                 tg3_flag_set(tp, MBOX_WRITE_REORDER);
5691                 tg3_flag_clear(tp, TX_RECOVERY_PENDING);
5692         }
5693
5694         tg3_halt(tp, RESET_KIND_SHUTDOWN, 0);
5695         err = tg3_init_hw(tp, 1);
5696         if (err)
5697                 goto out;
5698
5699         tg3_netif_start(tp);
5700
5701         if (restart_timer)
5702                 mod_timer(&tp->timer, jiffies + 1);
5703
5704 out:
5705         tg3_full_unlock(tp);
5706
5707         if (!err)
5708                 tg3_phy_start(tp);
5709 }
5710
5711 static void tg3_tx_timeout(struct net_device *dev)
5712 {
5713         struct tg3 *tp = netdev_priv(dev);
5714
5715         if (netif_msg_tx_err(tp)) {
5716                 netdev_err(dev, "transmit timed out, resetting\n");
5717                 tg3_dump_state(tp);
5718         }
5719
5720         schedule_work(&tp->reset_task);
5721 }
5722
5723 /* Test for DMA buffers crossing any 4GB boundaries: 4G, 8G, etc */
5724 static inline int tg3_4g_overflow_test(dma_addr_t mapping, int len)
5725 {
5726         u32 base = (u32) mapping & 0xffffffff;
5727
5728         return (base > 0xffffdcc0) && (base + len + 8 < base);
5729 }
5730
5731 /* Test for DMA addresses > 40-bit */
5732 static inline int tg3_40bit_overflow_test(struct tg3 *tp, dma_addr_t mapping,
5733                                           int len)
5734 {
5735 #if defined(CONFIG_HIGHMEM) && (BITS_PER_LONG == 64)
5736         if (tg3_flag(tp, 40BIT_DMA_BUG))
5737                 return ((u64) mapping + len) > DMA_BIT_MASK(40);
5738         return 0;
5739 #else
5740         return 0;
5741 #endif
5742 }
5743
5744 static void tg3_set_txd(struct tg3_napi *tnapi, int entry,
5745                         dma_addr_t mapping, int len, u32 flags,
5746                         u32 mss_and_is_end)
5747 {
5748         struct tg3_tx_buffer_desc *txd = &tnapi->tx_ring[entry];
5749         int is_end = (mss_and_is_end & 0x1);
5750         u32 mss = (mss_and_is_end >> 1);
5751         u32 vlan_tag = 0;
5752
5753         if (is_end)
5754                 flags |= TXD_FLAG_END;
5755         if (flags & TXD_FLAG_VLAN) {
5756                 vlan_tag = flags >> 16;
5757                 flags &= 0xffff;
5758         }
5759         vlan_tag |= (mss << TXD_MSS_SHIFT);
5760
5761         txd->addr_hi = ((u64) mapping >> 32);
5762         txd->addr_lo = ((u64) mapping & 0xffffffff);
5763         txd->len_flags = (len << TXD_LEN_SHIFT) | flags;
5764         txd->vlan_tag = vlan_tag << TXD_VLAN_TAG_SHIFT;
5765 }
5766
5767 static void tg3_skb_error_unmap(struct tg3_napi *tnapi,
5768                                 struct sk_buff *skb, int last)
5769 {
5770         int i;
5771         u32 entry = tnapi->tx_prod;
5772         struct ring_info *txb = &tnapi->tx_buffers[entry];
5773
5774         pci_unmap_single(tnapi->tp->pdev,
5775                          dma_unmap_addr(txb, mapping),
5776                          skb_headlen(skb),
5777                          PCI_DMA_TODEVICE);
5778         for (i = 0; i < last; i++) {
5779                 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
5780
5781                 entry = NEXT_TX(entry);
5782                 txb = &tnapi->tx_buffers[entry];
5783
5784                 pci_unmap_page(tnapi->tp->pdev,
5785                                dma_unmap_addr(txb, mapping),
5786                                frag->size, PCI_DMA_TODEVICE);
5787         }
5788 }
5789
5790 /* Workaround 4GB and 40-bit hardware DMA bugs. */
5791 static int tigon3_dma_hwbug_workaround(struct tg3_napi *tnapi,
5792                                        struct sk_buff *skb,
5793                                        u32 base_flags, u32 mss)
5794 {
5795         struct tg3 *tp = tnapi->tp;
5796         struct sk_buff *new_skb;
5797         dma_addr_t new_addr = 0;
5798         u32 entry = tnapi->tx_prod;
5799         int ret = 0;
5800
5801         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701)
5802                 new_skb = skb_copy(skb, GFP_ATOMIC);
5803         else {
5804                 int more_headroom = 4 - ((unsigned long)skb->data & 3);
5805
5806                 new_skb = skb_copy_expand(skb,
5807                                           skb_headroom(skb) + more_headroom,
5808                                           skb_tailroom(skb), GFP_ATOMIC);
5809         }
5810
5811         if (!new_skb) {
5812                 ret = -1;
5813         } else {
5814                 /* New SKB is guaranteed to be linear. */
5815                 new_addr = pci_map_single(tp->pdev, new_skb->data, new_skb->len,
5816                                           PCI_DMA_TODEVICE);
5817                 /* Make sure the mapping succeeded */
5818                 if (pci_dma_mapping_error(tp->pdev, new_addr)) {
5819                         ret = -1;
5820                         dev_kfree_skb(new_skb);
5821
5822                 /* Make sure new skb does not cross any 4G boundaries.
5823                  * Drop the packet if it does.
5824                  */
5825                 } else if (tg3_flag(tp, 4G_DMA_BNDRY_BUG) &&
5826                            tg3_4g_overflow_test(new_addr, new_skb->len)) {
5827                         pci_unmap_single(tp->pdev, new_addr, new_skb->len,
5828                                          PCI_DMA_TODEVICE);
5829                         ret = -1;
5830                         dev_kfree