tg3: Fix EEE interoperability workaround
[linux-2.6.git] / drivers / net / tg3.c
1 /*
2  * tg3.c: Broadcom Tigon3 ethernet driver.
3  *
4  * Copyright (C) 2001, 2002, 2003, 2004 David S. Miller (davem@redhat.com)
5  * Copyright (C) 2001, 2002, 2003 Jeff Garzik (jgarzik@pobox.com)
6  * Copyright (C) 2004 Sun Microsystems Inc.
7  * Copyright (C) 2005-2011 Broadcom Corporation.
8  *
9  * Firmware is:
10  *      Derived from proprietary unpublished source code,
11  *      Copyright (C) 2000-2003 Broadcom Corporation.
12  *
13  *      Permission is hereby granted for the distribution of this firmware
14  *      data in hexadecimal or equivalent format, provided this copyright
15  *      notice is accompanying it.
16  */
17
18
19 #include <linux/module.h>
20 #include <linux/moduleparam.h>
21 #include <linux/stringify.h>
22 #include <linux/kernel.h>
23 #include <linux/types.h>
24 #include <linux/compiler.h>
25 #include <linux/slab.h>
26 #include <linux/delay.h>
27 #include <linux/in.h>
28 #include <linux/init.h>
29 #include <linux/ioport.h>
30 #include <linux/pci.h>
31 #include <linux/netdevice.h>
32 #include <linux/etherdevice.h>
33 #include <linux/skbuff.h>
34 #include <linux/ethtool.h>
35 #include <linux/mdio.h>
36 #include <linux/mii.h>
37 #include <linux/phy.h>
38 #include <linux/brcmphy.h>
39 #include <linux/if_vlan.h>
40 #include <linux/ip.h>
41 #include <linux/tcp.h>
42 #include <linux/workqueue.h>
43 #include <linux/prefetch.h>
44 #include <linux/dma-mapping.h>
45 #include <linux/firmware.h>
46
47 #include <net/checksum.h>
48 #include <net/ip.h>
49
50 #include <asm/system.h>
51 #include <linux/io.h>
52 #include <asm/byteorder.h>
53 #include <linux/uaccess.h>
54
55 #ifdef CONFIG_SPARC
56 #include <asm/idprom.h>
57 #include <asm/prom.h>
58 #endif
59
60 #define BAR_0   0
61 #define BAR_2   2
62
63 #include "tg3.h"
64
65 /* Functions & macros to verify TG3_FLAGS types */
66
67 static inline int _tg3_flag(enum TG3_FLAGS flag, unsigned long *bits)
68 {
69         return test_bit(flag, bits);
70 }
71
72 static inline void _tg3_flag_set(enum TG3_FLAGS flag, unsigned long *bits)
73 {
74         set_bit(flag, bits);
75 }
76
77 static inline void _tg3_flag_clear(enum TG3_FLAGS flag, unsigned long *bits)
78 {
79         clear_bit(flag, bits);
80 }
81
82 #define tg3_flag(tp, flag)                              \
83         _tg3_flag(TG3_FLAG_##flag, (tp)->tg3_flags)
84 #define tg3_flag_set(tp, flag)                          \
85         _tg3_flag_set(TG3_FLAG_##flag, (tp)->tg3_flags)
86 #define tg3_flag_clear(tp, flag)                        \
87         _tg3_flag_clear(TG3_FLAG_##flag, (tp)->tg3_flags)
88
89 #define DRV_MODULE_NAME         "tg3"
90 #define TG3_MAJ_NUM                     3
91 #define TG3_MIN_NUM                     118
92 #define DRV_MODULE_VERSION      \
93         __stringify(TG3_MAJ_NUM) "." __stringify(TG3_MIN_NUM)
94 #define DRV_MODULE_RELDATE      "April 22, 2011"
95
96 #define TG3_DEF_MAC_MODE        0
97 #define TG3_DEF_RX_MODE         0
98 #define TG3_DEF_TX_MODE         0
99 #define TG3_DEF_MSG_ENABLE        \
100         (NETIF_MSG_DRV          | \
101          NETIF_MSG_PROBE        | \
102          NETIF_MSG_LINK         | \
103          NETIF_MSG_TIMER        | \
104          NETIF_MSG_IFDOWN       | \
105          NETIF_MSG_IFUP         | \
106          NETIF_MSG_RX_ERR       | \
107          NETIF_MSG_TX_ERR)
108
109 /* length of time before we decide the hardware is borked,
110  * and dev->tx_timeout() should be called to fix the problem
111  */
112
113 #define TG3_TX_TIMEOUT                  (5 * HZ)
114
115 /* hardware minimum and maximum for a single frame's data payload */
116 #define TG3_MIN_MTU                     60
117 #define TG3_MAX_MTU(tp) \
118         (tg3_flag(tp, JUMBO_CAPABLE) ? 9000 : 1500)
119
120 /* These numbers seem to be hard coded in the NIC firmware somehow.
121  * You can't change the ring sizes, but you can change where you place
122  * them in the NIC onboard memory.
123  */
124 #define TG3_RX_STD_RING_SIZE(tp) \
125         (tg3_flag(tp, LRG_PROD_RING_CAP) ? \
126          TG3_RX_STD_MAX_SIZE_5717 : TG3_RX_STD_MAX_SIZE_5700)
127 #define TG3_DEF_RX_RING_PENDING         200
128 #define TG3_RX_JMB_RING_SIZE(tp) \
129         (tg3_flag(tp, LRG_PROD_RING_CAP) ? \
130          TG3_RX_JMB_MAX_SIZE_5717 : TG3_RX_JMB_MAX_SIZE_5700)
131 #define TG3_DEF_RX_JUMBO_RING_PENDING   100
132 #define TG3_RSS_INDIR_TBL_SIZE          128
133
134 /* Do not place this n-ring entries value into the tp struct itself,
135  * we really want to expose these constants to GCC so that modulo et
136  * al.  operations are done with shifts and masks instead of with
137  * hw multiply/modulo instructions.  Another solution would be to
138  * replace things like '% foo' with '& (foo - 1)'.
139  */
140
141 #define TG3_TX_RING_SIZE                512
142 #define TG3_DEF_TX_RING_PENDING         (TG3_TX_RING_SIZE - 1)
143
144 #define TG3_RX_STD_RING_BYTES(tp) \
145         (sizeof(struct tg3_rx_buffer_desc) * TG3_RX_STD_RING_SIZE(tp))
146 #define TG3_RX_JMB_RING_BYTES(tp) \
147         (sizeof(struct tg3_ext_rx_buffer_desc) * TG3_RX_JMB_RING_SIZE(tp))
148 #define TG3_RX_RCB_RING_BYTES(tp) \
149         (sizeof(struct tg3_rx_buffer_desc) * (tp->rx_ret_ring_mask + 1))
150 #define TG3_TX_RING_BYTES       (sizeof(struct tg3_tx_buffer_desc) * \
151                                  TG3_TX_RING_SIZE)
152 #define NEXT_TX(N)              (((N) + 1) & (TG3_TX_RING_SIZE - 1))
153
154 #define TG3_DMA_BYTE_ENAB               64
155
156 #define TG3_RX_STD_DMA_SZ               1536
157 #define TG3_RX_JMB_DMA_SZ               9046
158
159 #define TG3_RX_DMA_TO_MAP_SZ(x)         ((x) + TG3_DMA_BYTE_ENAB)
160
161 #define TG3_RX_STD_MAP_SZ               TG3_RX_DMA_TO_MAP_SZ(TG3_RX_STD_DMA_SZ)
162 #define TG3_RX_JMB_MAP_SZ               TG3_RX_DMA_TO_MAP_SZ(TG3_RX_JMB_DMA_SZ)
163
164 #define TG3_RX_STD_BUFF_RING_SIZE(tp) \
165         (sizeof(struct ring_info) * TG3_RX_STD_RING_SIZE(tp))
166
167 #define TG3_RX_JMB_BUFF_RING_SIZE(tp) \
168         (sizeof(struct ring_info) * TG3_RX_JMB_RING_SIZE(tp))
169
170 /* Due to a hardware bug, the 5701 can only DMA to memory addresses
171  * that are at least dword aligned when used in PCIX mode.  The driver
172  * works around this bug by double copying the packet.  This workaround
173  * is built into the normal double copy length check for efficiency.
174  *
175  * However, the double copy is only necessary on those architectures
176  * where unaligned memory accesses are inefficient.  For those architectures
177  * where unaligned memory accesses incur little penalty, we can reintegrate
178  * the 5701 in the normal rx path.  Doing so saves a device structure
179  * dereference by hardcoding the double copy threshold in place.
180  */
181 #define TG3_RX_COPY_THRESHOLD           256
182 #if NET_IP_ALIGN == 0 || defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS)
183         #define TG3_RX_COPY_THRESH(tp)  TG3_RX_COPY_THRESHOLD
184 #else
185         #define TG3_RX_COPY_THRESH(tp)  ((tp)->rx_copy_thresh)
186 #endif
187
188 /* minimum number of free TX descriptors required to wake up TX process */
189 #define TG3_TX_WAKEUP_THRESH(tnapi)             ((tnapi)->tx_pending / 4)
190
191 #define TG3_RAW_IP_ALIGN 2
192
193 #define TG3_FW_UPDATE_TIMEOUT_SEC       5
194
195 #define FIRMWARE_TG3            "tigon/tg3.bin"
196 #define FIRMWARE_TG3TSO         "tigon/tg3_tso.bin"
197 #define FIRMWARE_TG3TSO5        "tigon/tg3_tso5.bin"
198
199 static char version[] __devinitdata =
200         DRV_MODULE_NAME ".c:v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")";
201
202 MODULE_AUTHOR("David S. Miller (davem@redhat.com) and Jeff Garzik (jgarzik@pobox.com)");
203 MODULE_DESCRIPTION("Broadcom Tigon3 ethernet driver");
204 MODULE_LICENSE("GPL");
205 MODULE_VERSION(DRV_MODULE_VERSION);
206 MODULE_FIRMWARE(FIRMWARE_TG3);
207 MODULE_FIRMWARE(FIRMWARE_TG3TSO);
208 MODULE_FIRMWARE(FIRMWARE_TG3TSO5);
209
210 static int tg3_debug = -1;      /* -1 == use TG3_DEF_MSG_ENABLE as value */
211 module_param(tg3_debug, int, 0);
212 MODULE_PARM_DESC(tg3_debug, "Tigon3 bitmapped debugging message enable value");
213
214 static DEFINE_PCI_DEVICE_TABLE(tg3_pci_tbl) = {
215         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5700)},
216         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5701)},
217         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702)},
218         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703)},
219         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704)},
220         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702FE)},
221         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705)},
222         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705_2)},
223         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705M)},
224         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705M_2)},
225         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702X)},
226         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703X)},
227         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704S)},
228         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702A3)},
229         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703A3)},
230         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5782)},
231         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5788)},
232         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5789)},
233         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5901)},
234         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5901_2)},
235         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704S_2)},
236         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705F)},
237         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5721)},
238         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5722)},
239         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751)},
240         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751M)},
241         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751F)},
242         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5752)},
243         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5752M)},
244         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753)},
245         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753M)},
246         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753F)},
247         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5754)},
248         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5754M)},
249         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5755)},
250         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5755M)},
251         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5756)},
252         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5786)},
253         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787)},
254         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787M)},
255         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787F)},
256         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5714)},
257         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5714S)},
258         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5715)},
259         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5715S)},
260         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5780)},
261         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5780S)},
262         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5781)},
263         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5906)},
264         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5906M)},
265         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5784)},
266         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5764)},
267         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5723)},
268         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5761)},
269         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5761E)},
270         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5761S)},
271         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5761SE)},
272         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5785_G)},
273         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5785_F)},
274         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57780)},
275         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57760)},
276         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57790)},
277         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57788)},
278         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5717)},
279         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5718)},
280         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57781)},
281         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57785)},
282         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57761)},
283         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57765)},
284         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57791)},
285         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57795)},
286         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5719)},
287         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5720)},
288         {PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9DXX)},
289         {PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9MXX)},
290         {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1000)},
291         {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1001)},
292         {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1003)},
293         {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC9100)},
294         {PCI_DEVICE(PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_TIGON3)},
295         {}
296 };
297
298 MODULE_DEVICE_TABLE(pci, tg3_pci_tbl);
299
300 static const struct {
301         const char string[ETH_GSTRING_LEN];
302 } ethtool_stats_keys[] = {
303         { "rx_octets" },
304         { "rx_fragments" },
305         { "rx_ucast_packets" },
306         { "rx_mcast_packets" },
307         { "rx_bcast_packets" },
308         { "rx_fcs_errors" },
309         { "rx_align_errors" },
310         { "rx_xon_pause_rcvd" },
311         { "rx_xoff_pause_rcvd" },
312         { "rx_mac_ctrl_rcvd" },
313         { "rx_xoff_entered" },
314         { "rx_frame_too_long_errors" },
315         { "rx_jabbers" },
316         { "rx_undersize_packets" },
317         { "rx_in_length_errors" },
318         { "rx_out_length_errors" },
319         { "rx_64_or_less_octet_packets" },
320         { "rx_65_to_127_octet_packets" },
321         { "rx_128_to_255_octet_packets" },
322         { "rx_256_to_511_octet_packets" },
323         { "rx_512_to_1023_octet_packets" },
324         { "rx_1024_to_1522_octet_packets" },
325         { "rx_1523_to_2047_octet_packets" },
326         { "rx_2048_to_4095_octet_packets" },
327         { "rx_4096_to_8191_octet_packets" },
328         { "rx_8192_to_9022_octet_packets" },
329
330         { "tx_octets" },
331         { "tx_collisions" },
332
333         { "tx_xon_sent" },
334         { "tx_xoff_sent" },
335         { "tx_flow_control" },
336         { "tx_mac_errors" },
337         { "tx_single_collisions" },
338         { "tx_mult_collisions" },
339         { "tx_deferred" },
340         { "tx_excessive_collisions" },
341         { "tx_late_collisions" },
342         { "tx_collide_2times" },
343         { "tx_collide_3times" },
344         { "tx_collide_4times" },
345         { "tx_collide_5times" },
346         { "tx_collide_6times" },
347         { "tx_collide_7times" },
348         { "tx_collide_8times" },
349         { "tx_collide_9times" },
350         { "tx_collide_10times" },
351         { "tx_collide_11times" },
352         { "tx_collide_12times" },
353         { "tx_collide_13times" },
354         { "tx_collide_14times" },
355         { "tx_collide_15times" },
356         { "tx_ucast_packets" },
357         { "tx_mcast_packets" },
358         { "tx_bcast_packets" },
359         { "tx_carrier_sense_errors" },
360         { "tx_discards" },
361         { "tx_errors" },
362
363         { "dma_writeq_full" },
364         { "dma_write_prioq_full" },
365         { "rxbds_empty" },
366         { "rx_discards" },
367         { "mbuf_lwm_thresh_hit" },
368         { "rx_errors" },
369         { "rx_threshold_hit" },
370
371         { "dma_readq_full" },
372         { "dma_read_prioq_full" },
373         { "tx_comp_queue_full" },
374
375         { "ring_set_send_prod_index" },
376         { "ring_status_update" },
377         { "nic_irqs" },
378         { "nic_avoided_irqs" },
379         { "nic_tx_threshold_hit" }
380 };
381
382 #define TG3_NUM_STATS   ARRAY_SIZE(ethtool_stats_keys)
383
384
385 static const struct {
386         const char string[ETH_GSTRING_LEN];
387 } ethtool_test_keys[] = {
388         { "nvram test     (online) " },
389         { "link test      (online) " },
390         { "register test  (offline)" },
391         { "memory test    (offline)" },
392         { "loopback test  (offline)" },
393         { "interrupt test (offline)" },
394 };
395
396 #define TG3_NUM_TEST    ARRAY_SIZE(ethtool_test_keys)
397
398
399 static void tg3_write32(struct tg3 *tp, u32 off, u32 val)
400 {
401         writel(val, tp->regs + off);
402 }
403
404 static u32 tg3_read32(struct tg3 *tp, u32 off)
405 {
406         return readl(tp->regs + off);
407 }
408
409 static void tg3_ape_write32(struct tg3 *tp, u32 off, u32 val)
410 {
411         writel(val, tp->aperegs + off);
412 }
413
414 static u32 tg3_ape_read32(struct tg3 *tp, u32 off)
415 {
416         return readl(tp->aperegs + off);
417 }
418
419 static void tg3_write_indirect_reg32(struct tg3 *tp, u32 off, u32 val)
420 {
421         unsigned long flags;
422
423         spin_lock_irqsave(&tp->indirect_lock, flags);
424         pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off);
425         pci_write_config_dword(tp->pdev, TG3PCI_REG_DATA, val);
426         spin_unlock_irqrestore(&tp->indirect_lock, flags);
427 }
428
429 static void tg3_write_flush_reg32(struct tg3 *tp, u32 off, u32 val)
430 {
431         writel(val, tp->regs + off);
432         readl(tp->regs + off);
433 }
434
435 static u32 tg3_read_indirect_reg32(struct tg3 *tp, u32 off)
436 {
437         unsigned long flags;
438         u32 val;
439
440         spin_lock_irqsave(&tp->indirect_lock, flags);
441         pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off);
442         pci_read_config_dword(tp->pdev, TG3PCI_REG_DATA, &val);
443         spin_unlock_irqrestore(&tp->indirect_lock, flags);
444         return val;
445 }
446
447 static void tg3_write_indirect_mbox(struct tg3 *tp, u32 off, u32 val)
448 {
449         unsigned long flags;
450
451         if (off == (MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW)) {
452                 pci_write_config_dword(tp->pdev, TG3PCI_RCV_RET_RING_CON_IDX +
453                                        TG3_64BIT_REG_LOW, val);
454                 return;
455         }
456         if (off == TG3_RX_STD_PROD_IDX_REG) {
457                 pci_write_config_dword(tp->pdev, TG3PCI_STD_RING_PROD_IDX +
458                                        TG3_64BIT_REG_LOW, val);
459                 return;
460         }
461
462         spin_lock_irqsave(&tp->indirect_lock, flags);
463         pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off + 0x5600);
464         pci_write_config_dword(tp->pdev, TG3PCI_REG_DATA, val);
465         spin_unlock_irqrestore(&tp->indirect_lock, flags);
466
467         /* In indirect mode when disabling interrupts, we also need
468          * to clear the interrupt bit in the GRC local ctrl register.
469          */
470         if ((off == (MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW)) &&
471             (val == 0x1)) {
472                 pci_write_config_dword(tp->pdev, TG3PCI_MISC_LOCAL_CTRL,
473                                        tp->grc_local_ctrl|GRC_LCLCTRL_CLEARINT);
474         }
475 }
476
477 static u32 tg3_read_indirect_mbox(struct tg3 *tp, u32 off)
478 {
479         unsigned long flags;
480         u32 val;
481
482         spin_lock_irqsave(&tp->indirect_lock, flags);
483         pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off + 0x5600);
484         pci_read_config_dword(tp->pdev, TG3PCI_REG_DATA, &val);
485         spin_unlock_irqrestore(&tp->indirect_lock, flags);
486         return val;
487 }
488
489 /* usec_wait specifies the wait time in usec when writing to certain registers
490  * where it is unsafe to read back the register without some delay.
491  * GRC_LOCAL_CTRL is one example if the GPIOs are toggled to switch power.
492  * TG3PCI_CLOCK_CTRL is another example if the clock frequencies are changed.
493  */
494 static void _tw32_flush(struct tg3 *tp, u32 off, u32 val, u32 usec_wait)
495 {
496         if (tg3_flag(tp, PCIX_TARGET_HWBUG) || tg3_flag(tp, ICH_WORKAROUND))
497                 /* Non-posted methods */
498                 tp->write32(tp, off, val);
499         else {
500                 /* Posted method */
501                 tg3_write32(tp, off, val);
502                 if (usec_wait)
503                         udelay(usec_wait);
504                 tp->read32(tp, off);
505         }
506         /* Wait again after the read for the posted method to guarantee that
507          * the wait time is met.
508          */
509         if (usec_wait)
510                 udelay(usec_wait);
511 }
512
513 static inline void tw32_mailbox_flush(struct tg3 *tp, u32 off, u32 val)
514 {
515         tp->write32_mbox(tp, off, val);
516         if (!tg3_flag(tp, MBOX_WRITE_REORDER) && !tg3_flag(tp, ICH_WORKAROUND))
517                 tp->read32_mbox(tp, off);
518 }
519
520 static void tg3_write32_tx_mbox(struct tg3 *tp, u32 off, u32 val)
521 {
522         void __iomem *mbox = tp->regs + off;
523         writel(val, mbox);
524         if (tg3_flag(tp, TXD_MBOX_HWBUG))
525                 writel(val, mbox);
526         if (tg3_flag(tp, MBOX_WRITE_REORDER))
527                 readl(mbox);
528 }
529
530 static u32 tg3_read32_mbox_5906(struct tg3 *tp, u32 off)
531 {
532         return readl(tp->regs + off + GRCMBOX_BASE);
533 }
534
535 static void tg3_write32_mbox_5906(struct tg3 *tp, u32 off, u32 val)
536 {
537         writel(val, tp->regs + off + GRCMBOX_BASE);
538 }
539
540 #define tw32_mailbox(reg, val)          tp->write32_mbox(tp, reg, val)
541 #define tw32_mailbox_f(reg, val)        tw32_mailbox_flush(tp, (reg), (val))
542 #define tw32_rx_mbox(reg, val)          tp->write32_rx_mbox(tp, reg, val)
543 #define tw32_tx_mbox(reg, val)          tp->write32_tx_mbox(tp, reg, val)
544 #define tr32_mailbox(reg)               tp->read32_mbox(tp, reg)
545
546 #define tw32(reg, val)                  tp->write32(tp, reg, val)
547 #define tw32_f(reg, val)                _tw32_flush(tp, (reg), (val), 0)
548 #define tw32_wait_f(reg, val, us)       _tw32_flush(tp, (reg), (val), (us))
549 #define tr32(reg)                       tp->read32(tp, reg)
550
551 static void tg3_write_mem(struct tg3 *tp, u32 off, u32 val)
552 {
553         unsigned long flags;
554
555         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) &&
556             (off >= NIC_SRAM_STATS_BLK) && (off < NIC_SRAM_TX_BUFFER_DESC))
557                 return;
558
559         spin_lock_irqsave(&tp->indirect_lock, flags);
560         if (tg3_flag(tp, SRAM_USE_CONFIG)) {
561                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off);
562                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
563
564                 /* Always leave this as zero. */
565                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
566         } else {
567                 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, off);
568                 tw32_f(TG3PCI_MEM_WIN_DATA, val);
569
570                 /* Always leave this as zero. */
571                 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, 0);
572         }
573         spin_unlock_irqrestore(&tp->indirect_lock, flags);
574 }
575
576 static void tg3_read_mem(struct tg3 *tp, u32 off, u32 *val)
577 {
578         unsigned long flags;
579
580         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) &&
581             (off >= NIC_SRAM_STATS_BLK) && (off < NIC_SRAM_TX_BUFFER_DESC)) {
582                 *val = 0;
583                 return;
584         }
585
586         spin_lock_irqsave(&tp->indirect_lock, flags);
587         if (tg3_flag(tp, SRAM_USE_CONFIG)) {
588                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off);
589                 pci_read_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
590
591                 /* Always leave this as zero. */
592                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
593         } else {
594                 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, off);
595                 *val = tr32(TG3PCI_MEM_WIN_DATA);
596
597                 /* Always leave this as zero. */
598                 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, 0);
599         }
600         spin_unlock_irqrestore(&tp->indirect_lock, flags);
601 }
602
603 static void tg3_ape_lock_init(struct tg3 *tp)
604 {
605         int i;
606         u32 regbase;
607
608         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
609                 regbase = TG3_APE_LOCK_GRANT;
610         else
611                 regbase = TG3_APE_PER_LOCK_GRANT;
612
613         /* Make sure the driver hasn't any stale locks. */
614         for (i = 0; i < 8; i++)
615                 tg3_ape_write32(tp, regbase + 4 * i, APE_LOCK_GRANT_DRIVER);
616 }
617
618 static int tg3_ape_lock(struct tg3 *tp, int locknum)
619 {
620         int i, off;
621         int ret = 0;
622         u32 status, req, gnt;
623
624         if (!tg3_flag(tp, ENABLE_APE))
625                 return 0;
626
627         switch (locknum) {
628         case TG3_APE_LOCK_GRC:
629         case TG3_APE_LOCK_MEM:
630                 break;
631         default:
632                 return -EINVAL;
633         }
634
635         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761) {
636                 req = TG3_APE_LOCK_REQ;
637                 gnt = TG3_APE_LOCK_GRANT;
638         } else {
639                 req = TG3_APE_PER_LOCK_REQ;
640                 gnt = TG3_APE_PER_LOCK_GRANT;
641         }
642
643         off = 4 * locknum;
644
645         tg3_ape_write32(tp, req + off, APE_LOCK_REQ_DRIVER);
646
647         /* Wait for up to 1 millisecond to acquire lock. */
648         for (i = 0; i < 100; i++) {
649                 status = tg3_ape_read32(tp, gnt + off);
650                 if (status == APE_LOCK_GRANT_DRIVER)
651                         break;
652                 udelay(10);
653         }
654
655         if (status != APE_LOCK_GRANT_DRIVER) {
656                 /* Revoke the lock request. */
657                 tg3_ape_write32(tp, gnt + off,
658                                 APE_LOCK_GRANT_DRIVER);
659
660                 ret = -EBUSY;
661         }
662
663         return ret;
664 }
665
666 static void tg3_ape_unlock(struct tg3 *tp, int locknum)
667 {
668         u32 gnt;
669
670         if (!tg3_flag(tp, ENABLE_APE))
671                 return;
672
673         switch (locknum) {
674         case TG3_APE_LOCK_GRC:
675         case TG3_APE_LOCK_MEM:
676                 break;
677         default:
678                 return;
679         }
680
681         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
682                 gnt = TG3_APE_LOCK_GRANT;
683         else
684                 gnt = TG3_APE_PER_LOCK_GRANT;
685
686         tg3_ape_write32(tp, gnt + 4 * locknum, APE_LOCK_GRANT_DRIVER);
687 }
688
689 static void tg3_disable_ints(struct tg3 *tp)
690 {
691         int i;
692
693         tw32(TG3PCI_MISC_HOST_CTRL,
694              (tp->misc_host_ctrl | MISC_HOST_CTRL_MASK_PCI_INT));
695         for (i = 0; i < tp->irq_max; i++)
696                 tw32_mailbox_f(tp->napi[i].int_mbox, 0x00000001);
697 }
698
699 static void tg3_enable_ints(struct tg3 *tp)
700 {
701         int i;
702
703         tp->irq_sync = 0;
704         wmb();
705
706         tw32(TG3PCI_MISC_HOST_CTRL,
707              (tp->misc_host_ctrl & ~MISC_HOST_CTRL_MASK_PCI_INT));
708
709         tp->coal_now = tp->coalesce_mode | HOSTCC_MODE_ENABLE;
710         for (i = 0; i < tp->irq_cnt; i++) {
711                 struct tg3_napi *tnapi = &tp->napi[i];
712
713                 tw32_mailbox_f(tnapi->int_mbox, tnapi->last_tag << 24);
714                 if (tg3_flag(tp, 1SHOT_MSI))
715                         tw32_mailbox_f(tnapi->int_mbox, tnapi->last_tag << 24);
716
717                 tp->coal_now |= tnapi->coal_now;
718         }
719
720         /* Force an initial interrupt */
721         if (!tg3_flag(tp, TAGGED_STATUS) &&
722             (tp->napi[0].hw_status->status & SD_STATUS_UPDATED))
723                 tw32(GRC_LOCAL_CTRL, tp->grc_local_ctrl | GRC_LCLCTRL_SETINT);
724         else
725                 tw32(HOSTCC_MODE, tp->coal_now);
726
727         tp->coal_now &= ~(tp->napi[0].coal_now | tp->napi[1].coal_now);
728 }
729
730 static inline unsigned int tg3_has_work(struct tg3_napi *tnapi)
731 {
732         struct tg3 *tp = tnapi->tp;
733         struct tg3_hw_status *sblk = tnapi->hw_status;
734         unsigned int work_exists = 0;
735
736         /* check for phy events */
737         if (!(tg3_flag(tp, USE_LINKCHG_REG) || tg3_flag(tp, POLL_SERDES))) {
738                 if (sblk->status & SD_STATUS_LINK_CHG)
739                         work_exists = 1;
740         }
741         /* check for RX/TX work to do */
742         if (sblk->idx[0].tx_consumer != tnapi->tx_cons ||
743             *(tnapi->rx_rcb_prod_idx) != tnapi->rx_rcb_ptr)
744                 work_exists = 1;
745
746         return work_exists;
747 }
748
749 /* tg3_int_reenable
750  *  similar to tg3_enable_ints, but it accurately determines whether there
751  *  is new work pending and can return without flushing the PIO write
752  *  which reenables interrupts
753  */
754 static void tg3_int_reenable(struct tg3_napi *tnapi)
755 {
756         struct tg3 *tp = tnapi->tp;
757
758         tw32_mailbox(tnapi->int_mbox, tnapi->last_tag << 24);
759         mmiowb();
760
761         /* When doing tagged status, this work check is unnecessary.
762          * The last_tag we write above tells the chip which piece of
763          * work we've completed.
764          */
765         if (!tg3_flag(tp, TAGGED_STATUS) && tg3_has_work(tnapi))
766                 tw32(HOSTCC_MODE, tp->coalesce_mode |
767                      HOSTCC_MODE_ENABLE | tnapi->coal_now);
768 }
769
770 static void tg3_switch_clocks(struct tg3 *tp)
771 {
772         u32 clock_ctrl;
773         u32 orig_clock_ctrl;
774
775         if (tg3_flag(tp, CPMU_PRESENT) || tg3_flag(tp, 5780_CLASS))
776                 return;
777
778         clock_ctrl = tr32(TG3PCI_CLOCK_CTRL);
779
780         orig_clock_ctrl = clock_ctrl;
781         clock_ctrl &= (CLOCK_CTRL_FORCE_CLKRUN |
782                        CLOCK_CTRL_CLKRUN_OENABLE |
783                        0x1f);
784         tp->pci_clock_ctrl = clock_ctrl;
785
786         if (tg3_flag(tp, 5705_PLUS)) {
787                 if (orig_clock_ctrl & CLOCK_CTRL_625_CORE) {
788                         tw32_wait_f(TG3PCI_CLOCK_CTRL,
789                                     clock_ctrl | CLOCK_CTRL_625_CORE, 40);
790                 }
791         } else if ((orig_clock_ctrl & CLOCK_CTRL_44MHZ_CORE) != 0) {
792                 tw32_wait_f(TG3PCI_CLOCK_CTRL,
793                             clock_ctrl |
794                             (CLOCK_CTRL_44MHZ_CORE | CLOCK_CTRL_ALTCLK),
795                             40);
796                 tw32_wait_f(TG3PCI_CLOCK_CTRL,
797                             clock_ctrl | (CLOCK_CTRL_ALTCLK),
798                             40);
799         }
800         tw32_wait_f(TG3PCI_CLOCK_CTRL, clock_ctrl, 40);
801 }
802
803 #define PHY_BUSY_LOOPS  5000
804
805 static int tg3_readphy(struct tg3 *tp, int reg, u32 *val)
806 {
807         u32 frame_val;
808         unsigned int loops;
809         int ret;
810
811         if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
812                 tw32_f(MAC_MI_MODE,
813                      (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
814                 udelay(80);
815         }
816
817         *val = 0x0;
818
819         frame_val  = ((tp->phy_addr << MI_COM_PHY_ADDR_SHIFT) &
820                       MI_COM_PHY_ADDR_MASK);
821         frame_val |= ((reg << MI_COM_REG_ADDR_SHIFT) &
822                       MI_COM_REG_ADDR_MASK);
823         frame_val |= (MI_COM_CMD_READ | MI_COM_START);
824
825         tw32_f(MAC_MI_COM, frame_val);
826
827         loops = PHY_BUSY_LOOPS;
828         while (loops != 0) {
829                 udelay(10);
830                 frame_val = tr32(MAC_MI_COM);
831
832                 if ((frame_val & MI_COM_BUSY) == 0) {
833                         udelay(5);
834                         frame_val = tr32(MAC_MI_COM);
835                         break;
836                 }
837                 loops -= 1;
838         }
839
840         ret = -EBUSY;
841         if (loops != 0) {
842                 *val = frame_val & MI_COM_DATA_MASK;
843                 ret = 0;
844         }
845
846         if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
847                 tw32_f(MAC_MI_MODE, tp->mi_mode);
848                 udelay(80);
849         }
850
851         return ret;
852 }
853
854 static int tg3_writephy(struct tg3 *tp, int reg, u32 val)
855 {
856         u32 frame_val;
857         unsigned int loops;
858         int ret;
859
860         if ((tp->phy_flags & TG3_PHYFLG_IS_FET) &&
861             (reg == MII_TG3_CTRL || reg == MII_TG3_AUX_CTRL))
862                 return 0;
863
864         if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
865                 tw32_f(MAC_MI_MODE,
866                      (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
867                 udelay(80);
868         }
869
870         frame_val  = ((tp->phy_addr << MI_COM_PHY_ADDR_SHIFT) &
871                       MI_COM_PHY_ADDR_MASK);
872         frame_val |= ((reg << MI_COM_REG_ADDR_SHIFT) &
873                       MI_COM_REG_ADDR_MASK);
874         frame_val |= (val & MI_COM_DATA_MASK);
875         frame_val |= (MI_COM_CMD_WRITE | MI_COM_START);
876
877         tw32_f(MAC_MI_COM, frame_val);
878
879         loops = PHY_BUSY_LOOPS;
880         while (loops != 0) {
881                 udelay(10);
882                 frame_val = tr32(MAC_MI_COM);
883                 if ((frame_val & MI_COM_BUSY) == 0) {
884                         udelay(5);
885                         frame_val = tr32(MAC_MI_COM);
886                         break;
887                 }
888                 loops -= 1;
889         }
890
891         ret = -EBUSY;
892         if (loops != 0)
893                 ret = 0;
894
895         if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
896                 tw32_f(MAC_MI_MODE, tp->mi_mode);
897                 udelay(80);
898         }
899
900         return ret;
901 }
902
903 static int tg3_phy_cl45_write(struct tg3 *tp, u32 devad, u32 addr, u32 val)
904 {
905         int err;
906
907         err = tg3_writephy(tp, MII_TG3_MMD_CTRL, devad);
908         if (err)
909                 goto done;
910
911         err = tg3_writephy(tp, MII_TG3_MMD_ADDRESS, addr);
912         if (err)
913                 goto done;
914
915         err = tg3_writephy(tp, MII_TG3_MMD_CTRL,
916                            MII_TG3_MMD_CTRL_DATA_NOINC | devad);
917         if (err)
918                 goto done;
919
920         err = tg3_writephy(tp, MII_TG3_MMD_ADDRESS, val);
921
922 done:
923         return err;
924 }
925
926 static int tg3_phy_cl45_read(struct tg3 *tp, u32 devad, u32 addr, u32 *val)
927 {
928         int err;
929
930         err = tg3_writephy(tp, MII_TG3_MMD_CTRL, devad);
931         if (err)
932                 goto done;
933
934         err = tg3_writephy(tp, MII_TG3_MMD_ADDRESS, addr);
935         if (err)
936                 goto done;
937
938         err = tg3_writephy(tp, MII_TG3_MMD_CTRL,
939                            MII_TG3_MMD_CTRL_DATA_NOINC | devad);
940         if (err)
941                 goto done;
942
943         err = tg3_readphy(tp, MII_TG3_MMD_ADDRESS, val);
944
945 done:
946         return err;
947 }
948
949 static int tg3_phydsp_read(struct tg3 *tp, u32 reg, u32 *val)
950 {
951         int err;
952
953         err = tg3_writephy(tp, MII_TG3_DSP_ADDRESS, reg);
954         if (!err)
955                 err = tg3_readphy(tp, MII_TG3_DSP_RW_PORT, val);
956
957         return err;
958 }
959
960 static int tg3_phydsp_write(struct tg3 *tp, u32 reg, u32 val)
961 {
962         int err;
963
964         err = tg3_writephy(tp, MII_TG3_DSP_ADDRESS, reg);
965         if (!err)
966                 err = tg3_writephy(tp, MII_TG3_DSP_RW_PORT, val);
967
968         return err;
969 }
970
971 static int tg3_phy_auxctl_read(struct tg3 *tp, int reg, u32 *val)
972 {
973         int err;
974
975         err = tg3_writephy(tp, MII_TG3_AUX_CTRL,
976                            (reg << MII_TG3_AUXCTL_MISC_RDSEL_SHIFT) |
977                            MII_TG3_AUXCTL_SHDWSEL_MISC);
978         if (!err)
979                 err = tg3_readphy(tp, MII_TG3_AUX_CTRL, val);
980
981         return err;
982 }
983
984 static int tg3_phy_auxctl_write(struct tg3 *tp, int reg, u32 set)
985 {
986         if (reg == MII_TG3_AUXCTL_SHDWSEL_MISC)
987                 set |= MII_TG3_AUXCTL_MISC_WREN;
988
989         return tg3_writephy(tp, MII_TG3_AUX_CTRL, set | reg);
990 }
991
992 #define TG3_PHY_AUXCTL_SMDSP_ENABLE(tp) \
993         tg3_phy_auxctl_write((tp), MII_TG3_AUXCTL_SHDWSEL_AUXCTL, \
994                              MII_TG3_AUXCTL_ACTL_SMDSP_ENA | \
995                              MII_TG3_AUXCTL_ACTL_TX_6DB)
996
997 #define TG3_PHY_AUXCTL_SMDSP_DISABLE(tp) \
998         tg3_phy_auxctl_write((tp), MII_TG3_AUXCTL_SHDWSEL_AUXCTL, \
999                              MII_TG3_AUXCTL_ACTL_TX_6DB);
1000
1001 static int tg3_bmcr_reset(struct tg3 *tp)
1002 {
1003         u32 phy_control;
1004         int limit, err;
1005
1006         /* OK, reset it, and poll the BMCR_RESET bit until it
1007          * clears or we time out.
1008          */
1009         phy_control = BMCR_RESET;
1010         err = tg3_writephy(tp, MII_BMCR, phy_control);
1011         if (err != 0)
1012                 return -EBUSY;
1013
1014         limit = 5000;
1015         while (limit--) {
1016                 err = tg3_readphy(tp, MII_BMCR, &phy_control);
1017                 if (err != 0)
1018                         return -EBUSY;
1019
1020                 if ((phy_control & BMCR_RESET) == 0) {
1021                         udelay(40);
1022                         break;
1023                 }
1024                 udelay(10);
1025         }
1026         if (limit < 0)
1027                 return -EBUSY;
1028
1029         return 0;
1030 }
1031
1032 static int tg3_mdio_read(struct mii_bus *bp, int mii_id, int reg)
1033 {
1034         struct tg3 *tp = bp->priv;
1035         u32 val;
1036
1037         spin_lock_bh(&tp->lock);
1038
1039         if (tg3_readphy(tp, reg, &val))
1040                 val = -EIO;
1041
1042         spin_unlock_bh(&tp->lock);
1043
1044         return val;
1045 }
1046
1047 static int tg3_mdio_write(struct mii_bus *bp, int mii_id, int reg, u16 val)
1048 {
1049         struct tg3 *tp = bp->priv;
1050         u32 ret = 0;
1051
1052         spin_lock_bh(&tp->lock);
1053
1054         if (tg3_writephy(tp, reg, val))
1055                 ret = -EIO;
1056
1057         spin_unlock_bh(&tp->lock);
1058
1059         return ret;
1060 }
1061
1062 static int tg3_mdio_reset(struct mii_bus *bp)
1063 {
1064         return 0;
1065 }
1066
1067 static void tg3_mdio_config_5785(struct tg3 *tp)
1068 {
1069         u32 val;
1070         struct phy_device *phydev;
1071
1072         phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
1073         switch (phydev->drv->phy_id & phydev->drv->phy_id_mask) {
1074         case PHY_ID_BCM50610:
1075         case PHY_ID_BCM50610M:
1076                 val = MAC_PHYCFG2_50610_LED_MODES;
1077                 break;
1078         case PHY_ID_BCMAC131:
1079                 val = MAC_PHYCFG2_AC131_LED_MODES;
1080                 break;
1081         case PHY_ID_RTL8211C:
1082                 val = MAC_PHYCFG2_RTL8211C_LED_MODES;
1083                 break;
1084         case PHY_ID_RTL8201E:
1085                 val = MAC_PHYCFG2_RTL8201E_LED_MODES;
1086                 break;
1087         default:
1088                 return;
1089         }
1090
1091         if (phydev->interface != PHY_INTERFACE_MODE_RGMII) {
1092                 tw32(MAC_PHYCFG2, val);
1093
1094                 val = tr32(MAC_PHYCFG1);
1095                 val &= ~(MAC_PHYCFG1_RGMII_INT |
1096                          MAC_PHYCFG1_RXCLK_TO_MASK | MAC_PHYCFG1_TXCLK_TO_MASK);
1097                 val |= MAC_PHYCFG1_RXCLK_TIMEOUT | MAC_PHYCFG1_TXCLK_TIMEOUT;
1098                 tw32(MAC_PHYCFG1, val);
1099
1100                 return;
1101         }
1102
1103         if (!tg3_flag(tp, RGMII_INBAND_DISABLE))
1104                 val |= MAC_PHYCFG2_EMODE_MASK_MASK |
1105                        MAC_PHYCFG2_FMODE_MASK_MASK |
1106                        MAC_PHYCFG2_GMODE_MASK_MASK |
1107                        MAC_PHYCFG2_ACT_MASK_MASK   |
1108                        MAC_PHYCFG2_QUAL_MASK_MASK |
1109                        MAC_PHYCFG2_INBAND_ENABLE;
1110
1111         tw32(MAC_PHYCFG2, val);
1112
1113         val = tr32(MAC_PHYCFG1);
1114         val &= ~(MAC_PHYCFG1_RXCLK_TO_MASK | MAC_PHYCFG1_TXCLK_TO_MASK |
1115                  MAC_PHYCFG1_RGMII_EXT_RX_DEC | MAC_PHYCFG1_RGMII_SND_STAT_EN);
1116         if (!tg3_flag(tp, RGMII_INBAND_DISABLE)) {
1117                 if (tg3_flag(tp, RGMII_EXT_IBND_RX_EN))
1118                         val |= MAC_PHYCFG1_RGMII_EXT_RX_DEC;
1119                 if (tg3_flag(tp, RGMII_EXT_IBND_TX_EN))
1120                         val |= MAC_PHYCFG1_RGMII_SND_STAT_EN;
1121         }
1122         val |= MAC_PHYCFG1_RXCLK_TIMEOUT | MAC_PHYCFG1_TXCLK_TIMEOUT |
1123                MAC_PHYCFG1_RGMII_INT | MAC_PHYCFG1_TXC_DRV;
1124         tw32(MAC_PHYCFG1, val);
1125
1126         val = tr32(MAC_EXT_RGMII_MODE);
1127         val &= ~(MAC_RGMII_MODE_RX_INT_B |
1128                  MAC_RGMII_MODE_RX_QUALITY |
1129                  MAC_RGMII_MODE_RX_ACTIVITY |
1130                  MAC_RGMII_MODE_RX_ENG_DET |
1131                  MAC_RGMII_MODE_TX_ENABLE |
1132                  MAC_RGMII_MODE_TX_LOWPWR |
1133                  MAC_RGMII_MODE_TX_RESET);
1134         if (!tg3_flag(tp, RGMII_INBAND_DISABLE)) {
1135                 if (tg3_flag(tp, RGMII_EXT_IBND_RX_EN))
1136                         val |= MAC_RGMII_MODE_RX_INT_B |
1137                                MAC_RGMII_MODE_RX_QUALITY |
1138                                MAC_RGMII_MODE_RX_ACTIVITY |
1139                                MAC_RGMII_MODE_RX_ENG_DET;
1140                 if (tg3_flag(tp, RGMII_EXT_IBND_TX_EN))
1141                         val |= MAC_RGMII_MODE_TX_ENABLE |
1142                                MAC_RGMII_MODE_TX_LOWPWR |
1143                                MAC_RGMII_MODE_TX_RESET;
1144         }
1145         tw32(MAC_EXT_RGMII_MODE, val);
1146 }
1147
1148 static void tg3_mdio_start(struct tg3 *tp)
1149 {
1150         tp->mi_mode &= ~MAC_MI_MODE_AUTO_POLL;
1151         tw32_f(MAC_MI_MODE, tp->mi_mode);
1152         udelay(80);
1153
1154         if (tg3_flag(tp, MDIOBUS_INITED) &&
1155             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
1156                 tg3_mdio_config_5785(tp);
1157 }
1158
1159 static int tg3_mdio_init(struct tg3 *tp)
1160 {
1161         int i;
1162         u32 reg;
1163         struct phy_device *phydev;
1164
1165         if (tg3_flag(tp, 5717_PLUS)) {
1166                 u32 is_serdes;
1167
1168                 tp->phy_addr = PCI_FUNC(tp->pdev->devfn) + 1;
1169
1170                 if (tp->pci_chip_rev_id != CHIPREV_ID_5717_A0)
1171                         is_serdes = tr32(SG_DIG_STATUS) & SG_DIG_IS_SERDES;
1172                 else
1173                         is_serdes = tr32(TG3_CPMU_PHY_STRAP) &
1174                                     TG3_CPMU_PHY_STRAP_IS_SERDES;
1175                 if (is_serdes)
1176                         tp->phy_addr += 7;
1177         } else
1178                 tp->phy_addr = TG3_PHY_MII_ADDR;
1179
1180         tg3_mdio_start(tp);
1181
1182         if (!tg3_flag(tp, USE_PHYLIB) || tg3_flag(tp, MDIOBUS_INITED))
1183                 return 0;
1184
1185         tp->mdio_bus = mdiobus_alloc();
1186         if (tp->mdio_bus == NULL)
1187                 return -ENOMEM;
1188
1189         tp->mdio_bus->name     = "tg3 mdio bus";
1190         snprintf(tp->mdio_bus->id, MII_BUS_ID_SIZE, "%x",
1191                  (tp->pdev->bus->number << 8) | tp->pdev->devfn);
1192         tp->mdio_bus->priv     = tp;
1193         tp->mdio_bus->parent   = &tp->pdev->dev;
1194         tp->mdio_bus->read     = &tg3_mdio_read;
1195         tp->mdio_bus->write    = &tg3_mdio_write;
1196         tp->mdio_bus->reset    = &tg3_mdio_reset;
1197         tp->mdio_bus->phy_mask = ~(1 << TG3_PHY_MII_ADDR);
1198         tp->mdio_bus->irq      = &tp->mdio_irq[0];
1199
1200         for (i = 0; i < PHY_MAX_ADDR; i++)
1201                 tp->mdio_bus->irq[i] = PHY_POLL;
1202
1203         /* The bus registration will look for all the PHYs on the mdio bus.
1204          * Unfortunately, it does not ensure the PHY is powered up before
1205          * accessing the PHY ID registers.  A chip reset is the
1206          * quickest way to bring the device back to an operational state..
1207          */
1208         if (tg3_readphy(tp, MII_BMCR, &reg) || (reg & BMCR_PDOWN))
1209                 tg3_bmcr_reset(tp);
1210
1211         i = mdiobus_register(tp->mdio_bus);
1212         if (i) {
1213                 dev_warn(&tp->pdev->dev, "mdiobus_reg failed (0x%x)\n", i);
1214                 mdiobus_free(tp->mdio_bus);
1215                 return i;
1216         }
1217
1218         phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
1219
1220         if (!phydev || !phydev->drv) {
1221                 dev_warn(&tp->pdev->dev, "No PHY devices\n");
1222                 mdiobus_unregister(tp->mdio_bus);
1223                 mdiobus_free(tp->mdio_bus);
1224                 return -ENODEV;
1225         }
1226
1227         switch (phydev->drv->phy_id & phydev->drv->phy_id_mask) {
1228         case PHY_ID_BCM57780:
1229                 phydev->interface = PHY_INTERFACE_MODE_GMII;
1230                 phydev->dev_flags |= PHY_BRCM_AUTO_PWRDWN_ENABLE;
1231                 break;
1232         case PHY_ID_BCM50610:
1233         case PHY_ID_BCM50610M:
1234                 phydev->dev_flags |= PHY_BRCM_CLEAR_RGMII_MODE |
1235                                      PHY_BRCM_RX_REFCLK_UNUSED |
1236                                      PHY_BRCM_DIS_TXCRXC_NOENRGY |
1237                                      PHY_BRCM_AUTO_PWRDWN_ENABLE;
1238                 if (tg3_flag(tp, RGMII_INBAND_DISABLE))
1239                         phydev->dev_flags |= PHY_BRCM_STD_IBND_DISABLE;
1240                 if (tg3_flag(tp, RGMII_EXT_IBND_RX_EN))
1241                         phydev->dev_flags |= PHY_BRCM_EXT_IBND_RX_ENABLE;
1242                 if (tg3_flag(tp, RGMII_EXT_IBND_TX_EN))
1243                         phydev->dev_flags |= PHY_BRCM_EXT_IBND_TX_ENABLE;
1244                 /* fallthru */
1245         case PHY_ID_RTL8211C:
1246                 phydev->interface = PHY_INTERFACE_MODE_RGMII;
1247                 break;
1248         case PHY_ID_RTL8201E:
1249         case PHY_ID_BCMAC131:
1250                 phydev->interface = PHY_INTERFACE_MODE_MII;
1251                 phydev->dev_flags |= PHY_BRCM_AUTO_PWRDWN_ENABLE;
1252                 tp->phy_flags |= TG3_PHYFLG_IS_FET;
1253                 break;
1254         }
1255
1256         tg3_flag_set(tp, MDIOBUS_INITED);
1257
1258         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
1259                 tg3_mdio_config_5785(tp);
1260
1261         return 0;
1262 }
1263
1264 static void tg3_mdio_fini(struct tg3 *tp)
1265 {
1266         if (tg3_flag(tp, MDIOBUS_INITED)) {
1267                 tg3_flag_clear(tp, MDIOBUS_INITED);
1268                 mdiobus_unregister(tp->mdio_bus);
1269                 mdiobus_free(tp->mdio_bus);
1270         }
1271 }
1272
1273 /* tp->lock is held. */
1274 static inline void tg3_generate_fw_event(struct tg3 *tp)
1275 {
1276         u32 val;
1277
1278         val = tr32(GRC_RX_CPU_EVENT);
1279         val |= GRC_RX_CPU_DRIVER_EVENT;
1280         tw32_f(GRC_RX_CPU_EVENT, val);
1281
1282         tp->last_event_jiffies = jiffies;
1283 }
1284
1285 #define TG3_FW_EVENT_TIMEOUT_USEC 2500
1286
1287 /* tp->lock is held. */
1288 static void tg3_wait_for_event_ack(struct tg3 *tp)
1289 {
1290         int i;
1291         unsigned int delay_cnt;
1292         long time_remain;
1293
1294         /* If enough time has passed, no wait is necessary. */
1295         time_remain = (long)(tp->last_event_jiffies + 1 +
1296                       usecs_to_jiffies(TG3_FW_EVENT_TIMEOUT_USEC)) -
1297                       (long)jiffies;
1298         if (time_remain < 0)
1299                 return;
1300
1301         /* Check if we can shorten the wait time. */
1302         delay_cnt = jiffies_to_usecs(time_remain);
1303         if (delay_cnt > TG3_FW_EVENT_TIMEOUT_USEC)
1304                 delay_cnt = TG3_FW_EVENT_TIMEOUT_USEC;
1305         delay_cnt = (delay_cnt >> 3) + 1;
1306
1307         for (i = 0; i < delay_cnt; i++) {
1308                 if (!(tr32(GRC_RX_CPU_EVENT) & GRC_RX_CPU_DRIVER_EVENT))
1309                         break;
1310                 udelay(8);
1311         }
1312 }
1313
1314 /* tp->lock is held. */
1315 static void tg3_ump_link_report(struct tg3 *tp)
1316 {
1317         u32 reg;
1318         u32 val;
1319
1320         if (!tg3_flag(tp, 5780_CLASS) || !tg3_flag(tp, ENABLE_ASF))
1321                 return;
1322
1323         tg3_wait_for_event_ack(tp);
1324
1325         tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX, FWCMD_NICDRV_LINK_UPDATE);
1326
1327         tg3_write_mem(tp, NIC_SRAM_FW_CMD_LEN_MBOX, 14);
1328
1329         val = 0;
1330         if (!tg3_readphy(tp, MII_BMCR, &reg))
1331                 val = reg << 16;
1332         if (!tg3_readphy(tp, MII_BMSR, &reg))
1333                 val |= (reg & 0xffff);
1334         tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX, val);
1335
1336         val = 0;
1337         if (!tg3_readphy(tp, MII_ADVERTISE, &reg))
1338                 val = reg << 16;
1339         if (!tg3_readphy(tp, MII_LPA, &reg))
1340                 val |= (reg & 0xffff);
1341         tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 4, val);
1342
1343         val = 0;
1344         if (!(tp->phy_flags & TG3_PHYFLG_MII_SERDES)) {
1345                 if (!tg3_readphy(tp, MII_CTRL1000, &reg))
1346                         val = reg << 16;
1347                 if (!tg3_readphy(tp, MII_STAT1000, &reg))
1348                         val |= (reg & 0xffff);
1349         }
1350         tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 8, val);
1351
1352         if (!tg3_readphy(tp, MII_PHYADDR, &reg))
1353                 val = reg << 16;
1354         else
1355                 val = 0;
1356         tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 12, val);
1357
1358         tg3_generate_fw_event(tp);
1359 }
1360
1361 static void tg3_link_report(struct tg3 *tp)
1362 {
1363         if (!netif_carrier_ok(tp->dev)) {
1364                 netif_info(tp, link, tp->dev, "Link is down\n");
1365                 tg3_ump_link_report(tp);
1366         } else if (netif_msg_link(tp)) {
1367                 netdev_info(tp->dev, "Link is up at %d Mbps, %s duplex\n",
1368                             (tp->link_config.active_speed == SPEED_1000 ?
1369                              1000 :
1370                              (tp->link_config.active_speed == SPEED_100 ?
1371                               100 : 10)),
1372                             (tp->link_config.active_duplex == DUPLEX_FULL ?
1373                              "full" : "half"));
1374
1375                 netdev_info(tp->dev, "Flow control is %s for TX and %s for RX\n",
1376                             (tp->link_config.active_flowctrl & FLOW_CTRL_TX) ?
1377                             "on" : "off",
1378                             (tp->link_config.active_flowctrl & FLOW_CTRL_RX) ?
1379                             "on" : "off");
1380
1381                 if (tp->phy_flags & TG3_PHYFLG_EEE_CAP)
1382                         netdev_info(tp->dev, "EEE is %s\n",
1383                                     tp->setlpicnt ? "enabled" : "disabled");
1384
1385                 tg3_ump_link_report(tp);
1386         }
1387 }
1388
1389 static u16 tg3_advert_flowctrl_1000T(u8 flow_ctrl)
1390 {
1391         u16 miireg;
1392
1393         if ((flow_ctrl & FLOW_CTRL_TX) && (flow_ctrl & FLOW_CTRL_RX))
1394                 miireg = ADVERTISE_PAUSE_CAP;
1395         else if (flow_ctrl & FLOW_CTRL_TX)
1396                 miireg = ADVERTISE_PAUSE_ASYM;
1397         else if (flow_ctrl & FLOW_CTRL_RX)
1398                 miireg = ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM;
1399         else
1400                 miireg = 0;
1401
1402         return miireg;
1403 }
1404
1405 static u16 tg3_advert_flowctrl_1000X(u8 flow_ctrl)
1406 {
1407         u16 miireg;
1408
1409         if ((flow_ctrl & FLOW_CTRL_TX) && (flow_ctrl & FLOW_CTRL_RX))
1410                 miireg = ADVERTISE_1000XPAUSE;
1411         else if (flow_ctrl & FLOW_CTRL_TX)
1412                 miireg = ADVERTISE_1000XPSE_ASYM;
1413         else if (flow_ctrl & FLOW_CTRL_RX)
1414                 miireg = ADVERTISE_1000XPAUSE | ADVERTISE_1000XPSE_ASYM;
1415         else
1416                 miireg = 0;
1417
1418         return miireg;
1419 }
1420
1421 static u8 tg3_resolve_flowctrl_1000X(u16 lcladv, u16 rmtadv)
1422 {
1423         u8 cap = 0;
1424
1425         if (lcladv & ADVERTISE_1000XPAUSE) {
1426                 if (lcladv & ADVERTISE_1000XPSE_ASYM) {
1427                         if (rmtadv & LPA_1000XPAUSE)
1428                                 cap = FLOW_CTRL_TX | FLOW_CTRL_RX;
1429                         else if (rmtadv & LPA_1000XPAUSE_ASYM)
1430                                 cap = FLOW_CTRL_RX;
1431                 } else {
1432                         if (rmtadv & LPA_1000XPAUSE)
1433                                 cap = FLOW_CTRL_TX | FLOW_CTRL_RX;
1434                 }
1435         } else if (lcladv & ADVERTISE_1000XPSE_ASYM) {
1436                 if ((rmtadv & LPA_1000XPAUSE) && (rmtadv & LPA_1000XPAUSE_ASYM))
1437                         cap = FLOW_CTRL_TX;
1438         }
1439
1440         return cap;
1441 }
1442
1443 static void tg3_setup_flow_control(struct tg3 *tp, u32 lcladv, u32 rmtadv)
1444 {
1445         u8 autoneg;
1446         u8 flowctrl = 0;
1447         u32 old_rx_mode = tp->rx_mode;
1448         u32 old_tx_mode = tp->tx_mode;
1449
1450         if (tg3_flag(tp, USE_PHYLIB))
1451                 autoneg = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]->autoneg;
1452         else
1453                 autoneg = tp->link_config.autoneg;
1454
1455         if (autoneg == AUTONEG_ENABLE && tg3_flag(tp, PAUSE_AUTONEG)) {
1456                 if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)
1457                         flowctrl = tg3_resolve_flowctrl_1000X(lcladv, rmtadv);
1458                 else
1459                         flowctrl = mii_resolve_flowctrl_fdx(lcladv, rmtadv);
1460         } else
1461                 flowctrl = tp->link_config.flowctrl;
1462
1463         tp->link_config.active_flowctrl = flowctrl;
1464
1465         if (flowctrl & FLOW_CTRL_RX)
1466                 tp->rx_mode |= RX_MODE_FLOW_CTRL_ENABLE;
1467         else
1468                 tp->rx_mode &= ~RX_MODE_FLOW_CTRL_ENABLE;
1469
1470         if (old_rx_mode != tp->rx_mode)
1471                 tw32_f(MAC_RX_MODE, tp->rx_mode);
1472
1473         if (flowctrl & FLOW_CTRL_TX)
1474                 tp->tx_mode |= TX_MODE_FLOW_CTRL_ENABLE;
1475         else
1476                 tp->tx_mode &= ~TX_MODE_FLOW_CTRL_ENABLE;
1477
1478         if (old_tx_mode != tp->tx_mode)
1479                 tw32_f(MAC_TX_MODE, tp->tx_mode);
1480 }
1481
1482 static void tg3_adjust_link(struct net_device *dev)
1483 {
1484         u8 oldflowctrl, linkmesg = 0;
1485         u32 mac_mode, lcl_adv, rmt_adv;
1486         struct tg3 *tp = netdev_priv(dev);
1487         struct phy_device *phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
1488
1489         spin_lock_bh(&tp->lock);
1490
1491         mac_mode = tp->mac_mode & ~(MAC_MODE_PORT_MODE_MASK |
1492                                     MAC_MODE_HALF_DUPLEX);
1493
1494         oldflowctrl = tp->link_config.active_flowctrl;
1495
1496         if (phydev->link) {
1497                 lcl_adv = 0;
1498                 rmt_adv = 0;
1499
1500                 if (phydev->speed == SPEED_100 || phydev->speed == SPEED_10)
1501                         mac_mode |= MAC_MODE_PORT_MODE_MII;
1502                 else if (phydev->speed == SPEED_1000 ||
1503                          GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785)
1504                         mac_mode |= MAC_MODE_PORT_MODE_GMII;
1505                 else
1506                         mac_mode |= MAC_MODE_PORT_MODE_MII;
1507
1508                 if (phydev->duplex == DUPLEX_HALF)
1509                         mac_mode |= MAC_MODE_HALF_DUPLEX;
1510                 else {
1511                         lcl_adv = tg3_advert_flowctrl_1000T(
1512                                   tp->link_config.flowctrl);
1513
1514                         if (phydev->pause)
1515                                 rmt_adv = LPA_PAUSE_CAP;
1516                         if (phydev->asym_pause)
1517                                 rmt_adv |= LPA_PAUSE_ASYM;
1518                 }
1519
1520                 tg3_setup_flow_control(tp, lcl_adv, rmt_adv);
1521         } else
1522                 mac_mode |= MAC_MODE_PORT_MODE_GMII;
1523
1524         if (mac_mode != tp->mac_mode) {
1525                 tp->mac_mode = mac_mode;
1526                 tw32_f(MAC_MODE, tp->mac_mode);
1527                 udelay(40);
1528         }
1529
1530         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785) {
1531                 if (phydev->speed == SPEED_10)
1532                         tw32(MAC_MI_STAT,
1533                              MAC_MI_STAT_10MBPS_MODE |
1534                              MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
1535                 else
1536                         tw32(MAC_MI_STAT, MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
1537         }
1538
1539         if (phydev->speed == SPEED_1000 && phydev->duplex == DUPLEX_HALF)
1540                 tw32(MAC_TX_LENGTHS,
1541                      ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
1542                       (6 << TX_LENGTHS_IPG_SHIFT) |
1543                       (0xff << TX_LENGTHS_SLOT_TIME_SHIFT)));
1544         else
1545                 tw32(MAC_TX_LENGTHS,
1546                      ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
1547                       (6 << TX_LENGTHS_IPG_SHIFT) |
1548                       (32 << TX_LENGTHS_SLOT_TIME_SHIFT)));
1549
1550         if ((phydev->link && tp->link_config.active_speed == SPEED_INVALID) ||
1551             (!phydev->link && tp->link_config.active_speed != SPEED_INVALID) ||
1552             phydev->speed != tp->link_config.active_speed ||
1553             phydev->duplex != tp->link_config.active_duplex ||
1554             oldflowctrl != tp->link_config.active_flowctrl)
1555                 linkmesg = 1;
1556
1557         tp->link_config.active_speed = phydev->speed;
1558         tp->link_config.active_duplex = phydev->duplex;
1559
1560         spin_unlock_bh(&tp->lock);
1561
1562         if (linkmesg)
1563                 tg3_link_report(tp);
1564 }
1565
1566 static int tg3_phy_init(struct tg3 *tp)
1567 {
1568         struct phy_device *phydev;
1569
1570         if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED)
1571                 return 0;
1572
1573         /* Bring the PHY back to a known state. */
1574         tg3_bmcr_reset(tp);
1575
1576         phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
1577
1578         /* Attach the MAC to the PHY. */
1579         phydev = phy_connect(tp->dev, dev_name(&phydev->dev), tg3_adjust_link,
1580                              phydev->dev_flags, phydev->interface);
1581         if (IS_ERR(phydev)) {
1582                 dev_err(&tp->pdev->dev, "Could not attach to PHY\n");
1583                 return PTR_ERR(phydev);
1584         }
1585
1586         /* Mask with MAC supported features. */
1587         switch (phydev->interface) {
1588         case PHY_INTERFACE_MODE_GMII:
1589         case PHY_INTERFACE_MODE_RGMII:
1590                 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
1591                         phydev->supported &= (PHY_GBIT_FEATURES |
1592                                               SUPPORTED_Pause |
1593                                               SUPPORTED_Asym_Pause);
1594                         break;
1595                 }
1596                 /* fallthru */
1597         case PHY_INTERFACE_MODE_MII:
1598                 phydev->supported &= (PHY_BASIC_FEATURES |
1599                                       SUPPORTED_Pause |
1600                                       SUPPORTED_Asym_Pause);
1601                 break;
1602         default:
1603                 phy_disconnect(tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]);
1604                 return -EINVAL;
1605         }
1606
1607         tp->phy_flags |= TG3_PHYFLG_IS_CONNECTED;
1608
1609         phydev->advertising = phydev->supported;
1610
1611         return 0;
1612 }
1613
1614 static void tg3_phy_start(struct tg3 *tp)
1615 {
1616         struct phy_device *phydev;
1617
1618         if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
1619                 return;
1620
1621         phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
1622
1623         if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) {
1624                 tp->phy_flags &= ~TG3_PHYFLG_IS_LOW_POWER;
1625                 phydev->speed = tp->link_config.orig_speed;
1626                 phydev->duplex = tp->link_config.orig_duplex;
1627                 phydev->autoneg = tp->link_config.orig_autoneg;
1628                 phydev->advertising = tp->link_config.orig_advertising;
1629         }
1630
1631         phy_start(phydev);
1632
1633         phy_start_aneg(phydev);
1634 }
1635
1636 static void tg3_phy_stop(struct tg3 *tp)
1637 {
1638         if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
1639                 return;
1640
1641         phy_stop(tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]);
1642 }
1643
1644 static void tg3_phy_fini(struct tg3 *tp)
1645 {
1646         if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) {
1647                 phy_disconnect(tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]);
1648                 tp->phy_flags &= ~TG3_PHYFLG_IS_CONNECTED;
1649         }
1650 }
1651
1652 static void tg3_phy_fet_toggle_apd(struct tg3 *tp, bool enable)
1653 {
1654         u32 phytest;
1655
1656         if (!tg3_readphy(tp, MII_TG3_FET_TEST, &phytest)) {
1657                 u32 phy;
1658
1659                 tg3_writephy(tp, MII_TG3_FET_TEST,
1660                              phytest | MII_TG3_FET_SHADOW_EN);
1661                 if (!tg3_readphy(tp, MII_TG3_FET_SHDW_AUXSTAT2, &phy)) {
1662                         if (enable)
1663                                 phy |= MII_TG3_FET_SHDW_AUXSTAT2_APD;
1664                         else
1665                                 phy &= ~MII_TG3_FET_SHDW_AUXSTAT2_APD;
1666                         tg3_writephy(tp, MII_TG3_FET_SHDW_AUXSTAT2, phy);
1667                 }
1668                 tg3_writephy(tp, MII_TG3_FET_TEST, phytest);
1669         }
1670 }
1671
1672 static void tg3_phy_toggle_apd(struct tg3 *tp, bool enable)
1673 {
1674         u32 reg;
1675
1676         if (!tg3_flag(tp, 5705_PLUS) ||
1677             (tg3_flag(tp, 5717_PLUS) &&
1678              (tp->phy_flags & TG3_PHYFLG_MII_SERDES)))
1679                 return;
1680
1681         if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
1682                 tg3_phy_fet_toggle_apd(tp, enable);
1683                 return;
1684         }
1685
1686         reg = MII_TG3_MISC_SHDW_WREN |
1687               MII_TG3_MISC_SHDW_SCR5_SEL |
1688               MII_TG3_MISC_SHDW_SCR5_LPED |
1689               MII_TG3_MISC_SHDW_SCR5_DLPTLM |
1690               MII_TG3_MISC_SHDW_SCR5_SDTL |
1691               MII_TG3_MISC_SHDW_SCR5_C125OE;
1692         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5784 || !enable)
1693                 reg |= MII_TG3_MISC_SHDW_SCR5_DLLAPD;
1694
1695         tg3_writephy(tp, MII_TG3_MISC_SHDW, reg);
1696
1697
1698         reg = MII_TG3_MISC_SHDW_WREN |
1699               MII_TG3_MISC_SHDW_APD_SEL |
1700               MII_TG3_MISC_SHDW_APD_WKTM_84MS;
1701         if (enable)
1702                 reg |= MII_TG3_MISC_SHDW_APD_ENABLE;
1703
1704         tg3_writephy(tp, MII_TG3_MISC_SHDW, reg);
1705 }
1706
1707 static void tg3_phy_toggle_automdix(struct tg3 *tp, int enable)
1708 {
1709         u32 phy;
1710
1711         if (!tg3_flag(tp, 5705_PLUS) ||
1712             (tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
1713                 return;
1714
1715         if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
1716                 u32 ephy;
1717
1718                 if (!tg3_readphy(tp, MII_TG3_FET_TEST, &ephy)) {
1719                         u32 reg = MII_TG3_FET_SHDW_MISCCTRL;
1720
1721                         tg3_writephy(tp, MII_TG3_FET_TEST,
1722                                      ephy | MII_TG3_FET_SHADOW_EN);
1723                         if (!tg3_readphy(tp, reg, &phy)) {
1724                                 if (enable)
1725                                         phy |= MII_TG3_FET_SHDW_MISCCTRL_MDIX;
1726                                 else
1727                                         phy &= ~MII_TG3_FET_SHDW_MISCCTRL_MDIX;
1728                                 tg3_writephy(tp, reg, phy);
1729                         }
1730                         tg3_writephy(tp, MII_TG3_FET_TEST, ephy);
1731                 }
1732         } else {
1733                 int ret;
1734
1735                 ret = tg3_phy_auxctl_read(tp,
1736                                           MII_TG3_AUXCTL_SHDWSEL_MISC, &phy);
1737                 if (!ret) {
1738                         if (enable)
1739                                 phy |= MII_TG3_AUXCTL_MISC_FORCE_AMDIX;
1740                         else
1741                                 phy &= ~MII_TG3_AUXCTL_MISC_FORCE_AMDIX;
1742                         tg3_phy_auxctl_write(tp,
1743                                              MII_TG3_AUXCTL_SHDWSEL_MISC, phy);
1744                 }
1745         }
1746 }
1747
1748 static void tg3_phy_set_wirespeed(struct tg3 *tp)
1749 {
1750         int ret;
1751         u32 val;
1752
1753         if (tp->phy_flags & TG3_PHYFLG_NO_ETH_WIRE_SPEED)
1754                 return;
1755
1756         ret = tg3_phy_auxctl_read(tp, MII_TG3_AUXCTL_SHDWSEL_MISC, &val);
1757         if (!ret)
1758                 tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_MISC,
1759                                      val | MII_TG3_AUXCTL_MISC_WIRESPD_EN);
1760 }
1761
1762 static void tg3_phy_apply_otp(struct tg3 *tp)
1763 {
1764         u32 otp, phy;
1765
1766         if (!tp->phy_otp)
1767                 return;
1768
1769         otp = tp->phy_otp;
1770
1771         if (TG3_PHY_AUXCTL_SMDSP_ENABLE(tp))
1772                 return;
1773
1774         phy = ((otp & TG3_OTP_AGCTGT_MASK) >> TG3_OTP_AGCTGT_SHIFT);
1775         phy |= MII_TG3_DSP_TAP1_AGCTGT_DFLT;
1776         tg3_phydsp_write(tp, MII_TG3_DSP_TAP1, phy);
1777
1778         phy = ((otp & TG3_OTP_HPFFLTR_MASK) >> TG3_OTP_HPFFLTR_SHIFT) |
1779               ((otp & TG3_OTP_HPFOVER_MASK) >> TG3_OTP_HPFOVER_SHIFT);
1780         tg3_phydsp_write(tp, MII_TG3_DSP_AADJ1CH0, phy);
1781
1782         phy = ((otp & TG3_OTP_LPFDIS_MASK) >> TG3_OTP_LPFDIS_SHIFT);
1783         phy |= MII_TG3_DSP_AADJ1CH3_ADCCKADJ;
1784         tg3_phydsp_write(tp, MII_TG3_DSP_AADJ1CH3, phy);
1785
1786         phy = ((otp & TG3_OTP_VDAC_MASK) >> TG3_OTP_VDAC_SHIFT);
1787         tg3_phydsp_write(tp, MII_TG3_DSP_EXP75, phy);
1788
1789         phy = ((otp & TG3_OTP_10BTAMP_MASK) >> TG3_OTP_10BTAMP_SHIFT);
1790         tg3_phydsp_write(tp, MII_TG3_DSP_EXP96, phy);
1791
1792         phy = ((otp & TG3_OTP_ROFF_MASK) >> TG3_OTP_ROFF_SHIFT) |
1793               ((otp & TG3_OTP_RCOFF_MASK) >> TG3_OTP_RCOFF_SHIFT);
1794         tg3_phydsp_write(tp, MII_TG3_DSP_EXP97, phy);
1795
1796         TG3_PHY_AUXCTL_SMDSP_DISABLE(tp);
1797 }
1798
1799 static void tg3_phy_eee_adjust(struct tg3 *tp, u32 current_link_up)
1800 {
1801         u32 val;
1802
1803         if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP))
1804                 return;
1805
1806         tp->setlpicnt = 0;
1807
1808         if (tp->link_config.autoneg == AUTONEG_ENABLE &&
1809             current_link_up == 1 &&
1810             tp->link_config.active_duplex == DUPLEX_FULL &&
1811             (tp->link_config.active_speed == SPEED_100 ||
1812              tp->link_config.active_speed == SPEED_1000)) {
1813                 u32 eeectl;
1814
1815                 if (tp->link_config.active_speed == SPEED_1000)
1816                         eeectl = TG3_CPMU_EEE_CTRL_EXIT_16_5_US;
1817                 else
1818                         eeectl = TG3_CPMU_EEE_CTRL_EXIT_36_US;
1819
1820                 tw32(TG3_CPMU_EEE_CTRL, eeectl);
1821
1822                 tg3_phy_cl45_read(tp, MDIO_MMD_AN,
1823                                   TG3_CL45_D7_EEERES_STAT, &val);
1824
1825                 if (val == TG3_CL45_D7_EEERES_STAT_LP_1000T ||
1826                     val == TG3_CL45_D7_EEERES_STAT_LP_100TX)
1827                         tp->setlpicnt = 2;
1828         }
1829
1830         if (!tp->setlpicnt) {
1831                 val = tr32(TG3_CPMU_EEE_MODE);
1832                 tw32(TG3_CPMU_EEE_MODE, val & ~TG3_CPMU_EEEMD_LPI_ENABLE);
1833         }
1834 }
1835
1836 static void tg3_phy_eee_enable(struct tg3 *tp)
1837 {
1838         u32 val;
1839
1840         if (tp->link_config.active_speed == SPEED_1000 &&
1841             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
1842              GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
1843              GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765) &&
1844             !TG3_PHY_AUXCTL_SMDSP_ENABLE(tp)) {
1845                 tg3_phydsp_write(tp, MII_TG3_DSP_TAP26, 0x0003);
1846                 TG3_PHY_AUXCTL_SMDSP_DISABLE(tp);
1847         }
1848
1849         val = tr32(TG3_CPMU_EEE_MODE);
1850         tw32(TG3_CPMU_EEE_MODE, val | TG3_CPMU_EEEMD_LPI_ENABLE);
1851 }
1852
1853 static int tg3_wait_macro_done(struct tg3 *tp)
1854 {
1855         int limit = 100;
1856
1857         while (limit--) {
1858                 u32 tmp32;
1859
1860                 if (!tg3_readphy(tp, MII_TG3_DSP_CONTROL, &tmp32)) {
1861                         if ((tmp32 & 0x1000) == 0)
1862                                 break;
1863                 }
1864         }
1865         if (limit < 0)
1866                 return -EBUSY;
1867
1868         return 0;
1869 }
1870
1871 static int tg3_phy_write_and_check_testpat(struct tg3 *tp, int *resetp)
1872 {
1873         static const u32 test_pat[4][6] = {
1874         { 0x00005555, 0x00000005, 0x00002aaa, 0x0000000a, 0x00003456, 0x00000003 },
1875         { 0x00002aaa, 0x0000000a, 0x00003333, 0x00000003, 0x0000789a, 0x00000005 },
1876         { 0x00005a5a, 0x00000005, 0x00002a6a, 0x0000000a, 0x00001bcd, 0x00000003 },
1877         { 0x00002a5a, 0x0000000a, 0x000033c3, 0x00000003, 0x00002ef1, 0x00000005 }
1878         };
1879         int chan;
1880
1881         for (chan = 0; chan < 4; chan++) {
1882                 int i;
1883
1884                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
1885                              (chan * 0x2000) | 0x0200);
1886                 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0002);
1887
1888                 for (i = 0; i < 6; i++)
1889                         tg3_writephy(tp, MII_TG3_DSP_RW_PORT,
1890                                      test_pat[chan][i]);
1891
1892                 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0202);
1893                 if (tg3_wait_macro_done(tp)) {
1894                         *resetp = 1;
1895                         return -EBUSY;
1896                 }
1897
1898                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
1899                              (chan * 0x2000) | 0x0200);
1900                 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0082);
1901                 if (tg3_wait_macro_done(tp)) {
1902                         *resetp = 1;
1903                         return -EBUSY;
1904                 }
1905
1906                 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0802);
1907                 if (tg3_wait_macro_done(tp)) {
1908                         *resetp = 1;
1909                         return -EBUSY;
1910                 }
1911
1912                 for (i = 0; i < 6; i += 2) {
1913                         u32 low, high;
1914
1915                         if (tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &low) ||
1916                             tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &high) ||
1917                             tg3_wait_macro_done(tp)) {
1918                                 *resetp = 1;
1919                                 return -EBUSY;
1920                         }
1921                         low &= 0x7fff;
1922                         high &= 0x000f;
1923                         if (low != test_pat[chan][i] ||
1924                             high != test_pat[chan][i+1]) {
1925                                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000b);
1926                                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x4001);
1927                                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x4005);
1928
1929                                 return -EBUSY;
1930                         }
1931                 }
1932         }
1933
1934         return 0;
1935 }
1936
1937 static int tg3_phy_reset_chanpat(struct tg3 *tp)
1938 {
1939         int chan;
1940
1941         for (chan = 0; chan < 4; chan++) {
1942                 int i;
1943
1944                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
1945                              (chan * 0x2000) | 0x0200);
1946                 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0002);
1947                 for (i = 0; i < 6; i++)
1948                         tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x000);
1949                 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0202);
1950                 if (tg3_wait_macro_done(tp))
1951                         return -EBUSY;
1952         }
1953
1954         return 0;
1955 }
1956
1957 static int tg3_phy_reset_5703_4_5(struct tg3 *tp)
1958 {
1959         u32 reg32, phy9_orig;
1960         int retries, do_phy_reset, err;
1961
1962         retries = 10;
1963         do_phy_reset = 1;
1964         do {
1965                 if (do_phy_reset) {
1966                         err = tg3_bmcr_reset(tp);
1967                         if (err)
1968                                 return err;
1969                         do_phy_reset = 0;
1970                 }
1971
1972                 /* Disable transmitter and interrupt.  */
1973                 if (tg3_readphy(tp, MII_TG3_EXT_CTRL, &reg32))
1974                         continue;
1975
1976                 reg32 |= 0x3000;
1977                 tg3_writephy(tp, MII_TG3_EXT_CTRL, reg32);
1978
1979                 /* Set full-duplex, 1000 mbps.  */
1980                 tg3_writephy(tp, MII_BMCR,
1981                              BMCR_FULLDPLX | TG3_BMCR_SPEED1000);
1982
1983                 /* Set to master mode.  */
1984                 if (tg3_readphy(tp, MII_TG3_CTRL, &phy9_orig))
1985                         continue;
1986
1987                 tg3_writephy(tp, MII_TG3_CTRL,
1988                              (MII_TG3_CTRL_AS_MASTER |
1989                               MII_TG3_CTRL_ENABLE_AS_MASTER));
1990
1991                 err = TG3_PHY_AUXCTL_SMDSP_ENABLE(tp);
1992                 if (err)
1993                         return err;
1994
1995                 /* Block the PHY control access.  */
1996                 tg3_phydsp_write(tp, 0x8005, 0x0800);
1997
1998                 err = tg3_phy_write_and_check_testpat(tp, &do_phy_reset);
1999                 if (!err)
2000                         break;
2001         } while (--retries);
2002
2003         err = tg3_phy_reset_chanpat(tp);
2004         if (err)
2005                 return err;
2006
2007         tg3_phydsp_write(tp, 0x8005, 0x0000);
2008
2009         tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8200);
2010         tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0000);
2011
2012         TG3_PHY_AUXCTL_SMDSP_DISABLE(tp);
2013
2014         tg3_writephy(tp, MII_TG3_CTRL, phy9_orig);
2015
2016         if (!tg3_readphy(tp, MII_TG3_EXT_CTRL, &reg32)) {
2017                 reg32 &= ~0x3000;
2018                 tg3_writephy(tp, MII_TG3_EXT_CTRL, reg32);
2019         } else if (!err)
2020                 err = -EBUSY;
2021
2022         return err;
2023 }
2024
2025 /* This will reset the tigon3 PHY if there is no valid
2026  * link unless the FORCE argument is non-zero.
2027  */
2028 static int tg3_phy_reset(struct tg3 *tp)
2029 {
2030         u32 val, cpmuctrl;
2031         int err;
2032
2033         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
2034                 val = tr32(GRC_MISC_CFG);
2035                 tw32_f(GRC_MISC_CFG, val & ~GRC_MISC_CFG_EPHY_IDDQ);
2036                 udelay(40);
2037         }
2038         err  = tg3_readphy(tp, MII_BMSR, &val);
2039         err |= tg3_readphy(tp, MII_BMSR, &val);
2040         if (err != 0)
2041                 return -EBUSY;
2042
2043         if (netif_running(tp->dev) && netif_carrier_ok(tp->dev)) {
2044                 netif_carrier_off(tp->dev);
2045                 tg3_link_report(tp);
2046         }
2047
2048         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
2049             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
2050             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
2051                 err = tg3_phy_reset_5703_4_5(tp);
2052                 if (err)
2053                         return err;
2054                 goto out;
2055         }
2056
2057         cpmuctrl = 0;
2058         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 &&
2059             GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5784_AX) {
2060                 cpmuctrl = tr32(TG3_CPMU_CTRL);
2061                 if (cpmuctrl & CPMU_CTRL_GPHY_10MB_RXONLY)
2062                         tw32(TG3_CPMU_CTRL,
2063                              cpmuctrl & ~CPMU_CTRL_GPHY_10MB_RXONLY);
2064         }
2065
2066         err = tg3_bmcr_reset(tp);
2067         if (err)
2068                 return err;
2069
2070         if (cpmuctrl & CPMU_CTRL_GPHY_10MB_RXONLY) {
2071                 val = MII_TG3_DSP_EXP8_AEDW | MII_TG3_DSP_EXP8_REJ2MHz;
2072                 tg3_phydsp_write(tp, MII_TG3_DSP_EXP8, val);
2073
2074                 tw32(TG3_CPMU_CTRL, cpmuctrl);
2075         }
2076
2077         if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5784_AX ||
2078             GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5761_AX) {
2079                 val = tr32(TG3_CPMU_LSPD_1000MB_CLK);
2080                 if ((val & CPMU_LSPD_1000MB_MACCLK_MASK) ==
2081                     CPMU_LSPD_1000MB_MACCLK_12_5) {
2082                         val &= ~CPMU_LSPD_1000MB_MACCLK_MASK;
2083                         udelay(40);
2084                         tw32_f(TG3_CPMU_LSPD_1000MB_CLK, val);
2085                 }
2086         }
2087
2088         if (tg3_flag(tp, 5717_PLUS) &&
2089             (tp->phy_flags & TG3_PHYFLG_MII_SERDES))
2090                 return 0;
2091
2092         tg3_phy_apply_otp(tp);
2093
2094         if (tp->phy_flags & TG3_PHYFLG_ENABLE_APD)
2095                 tg3_phy_toggle_apd(tp, true);
2096         else
2097                 tg3_phy_toggle_apd(tp, false);
2098
2099 out:
2100         if ((tp->phy_flags & TG3_PHYFLG_ADC_BUG) &&
2101             !TG3_PHY_AUXCTL_SMDSP_ENABLE(tp)) {
2102                 tg3_phydsp_write(tp, 0x201f, 0x2aaa);
2103                 tg3_phydsp_write(tp, 0x000a, 0x0323);
2104                 TG3_PHY_AUXCTL_SMDSP_DISABLE(tp);
2105         }
2106
2107         if (tp->phy_flags & TG3_PHYFLG_5704_A0_BUG) {
2108                 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8d68);
2109                 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8d68);
2110         }
2111
2112         if (tp->phy_flags & TG3_PHYFLG_BER_BUG) {
2113                 if (!TG3_PHY_AUXCTL_SMDSP_ENABLE(tp)) {
2114                         tg3_phydsp_write(tp, 0x000a, 0x310b);
2115                         tg3_phydsp_write(tp, 0x201f, 0x9506);
2116                         tg3_phydsp_write(tp, 0x401f, 0x14e2);
2117                         TG3_PHY_AUXCTL_SMDSP_DISABLE(tp);
2118                 }
2119         } else if (tp->phy_flags & TG3_PHYFLG_JITTER_BUG) {
2120                 if (!TG3_PHY_AUXCTL_SMDSP_ENABLE(tp)) {
2121                         tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000a);
2122                         if (tp->phy_flags & TG3_PHYFLG_ADJUST_TRIM) {
2123                                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x110b);
2124                                 tg3_writephy(tp, MII_TG3_TEST1,
2125                                              MII_TG3_TEST1_TRIM_EN | 0x4);
2126                         } else
2127                                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x010b);
2128
2129                         TG3_PHY_AUXCTL_SMDSP_DISABLE(tp);
2130                 }
2131         }
2132
2133         /* Set Extended packet length bit (bit 14) on all chips that */
2134         /* support jumbo frames */
2135         if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) {
2136                 /* Cannot do read-modify-write on 5401 */
2137                 tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_AUXCTL, 0x4c20);
2138         } else if (tg3_flag(tp, JUMBO_CAPABLE)) {
2139                 /* Set bit 14 with read-modify-write to preserve other bits */
2140                 err = tg3_phy_auxctl_read(tp,
2141                                           MII_TG3_AUXCTL_SHDWSEL_AUXCTL, &val);
2142                 if (!err)
2143                         tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_AUXCTL,
2144                                            val | MII_TG3_AUXCTL_ACTL_EXTPKTLEN);
2145         }
2146
2147         /* Set phy register 0x10 bit 0 to high fifo elasticity to support
2148          * jumbo frames transmission.
2149          */
2150         if (tg3_flag(tp, JUMBO_CAPABLE)) {
2151                 if (!tg3_readphy(tp, MII_TG3_EXT_CTRL, &val))
2152                         tg3_writephy(tp, MII_TG3_EXT_CTRL,
2153                                      val | MII_TG3_EXT_CTRL_FIFO_ELASTIC);
2154         }
2155
2156         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
2157                 /* adjust output voltage */
2158                 tg3_writephy(tp, MII_TG3_FET_PTEST, 0x12);
2159         }
2160
2161         tg3_phy_toggle_automdix(tp, 1);
2162         tg3_phy_set_wirespeed(tp);
2163         return 0;
2164 }
2165
2166 static void tg3_frob_aux_power(struct tg3 *tp)
2167 {
2168         bool need_vaux = false;
2169
2170         /* The GPIOs do something completely different on 57765. */
2171         if (!tg3_flag(tp, IS_NIC) ||
2172             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
2173             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765)
2174                 return;
2175
2176         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
2177              GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714 ||
2178              GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
2179              GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) &&
2180             tp->pdev_peer != tp->pdev) {
2181                 struct net_device *dev_peer;
2182
2183                 dev_peer = pci_get_drvdata(tp->pdev_peer);
2184
2185                 /* remove_one() may have been run on the peer. */
2186                 if (dev_peer) {
2187                         struct tg3 *tp_peer = netdev_priv(dev_peer);
2188
2189                         if (tg3_flag(tp_peer, INIT_COMPLETE))
2190                                 return;
2191
2192                         if (tg3_flag(tp_peer, WOL_ENABLE) ||
2193                             tg3_flag(tp_peer, ENABLE_ASF))
2194                                 need_vaux = true;
2195                 }
2196         }
2197
2198         if (tg3_flag(tp, WOL_ENABLE) || tg3_flag(tp, ENABLE_ASF))
2199                 need_vaux = true;
2200
2201         if (need_vaux) {
2202                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
2203                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
2204                         tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
2205                                     (GRC_LCLCTRL_GPIO_OE0 |
2206                                      GRC_LCLCTRL_GPIO_OE1 |
2207                                      GRC_LCLCTRL_GPIO_OE2 |
2208                                      GRC_LCLCTRL_GPIO_OUTPUT0 |
2209                                      GRC_LCLCTRL_GPIO_OUTPUT1),
2210                                     100);
2211                 } else if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761 ||
2212                            tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761S) {
2213                         /* The 5761 non-e device swaps GPIO 0 and GPIO 2. */
2214                         u32 grc_local_ctrl = GRC_LCLCTRL_GPIO_OE0 |
2215                                              GRC_LCLCTRL_GPIO_OE1 |
2216                                              GRC_LCLCTRL_GPIO_OE2 |
2217                                              GRC_LCLCTRL_GPIO_OUTPUT0 |
2218                                              GRC_LCLCTRL_GPIO_OUTPUT1 |
2219                                              tp->grc_local_ctrl;
2220                         tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl, 100);
2221
2222                         grc_local_ctrl |= GRC_LCLCTRL_GPIO_OUTPUT2;
2223                         tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl, 100);
2224
2225                         grc_local_ctrl &= ~GRC_LCLCTRL_GPIO_OUTPUT0;
2226                         tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl, 100);
2227                 } else {
2228                         u32 no_gpio2;
2229                         u32 grc_local_ctrl = 0;
2230
2231                         /* Workaround to prevent overdrawing Amps. */
2232                         if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
2233                             ASIC_REV_5714) {
2234                                 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE3;
2235                                 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
2236                                             grc_local_ctrl, 100);
2237                         }
2238
2239                         /* On 5753 and variants, GPIO2 cannot be used. */
2240                         no_gpio2 = tp->nic_sram_data_cfg &
2241                                     NIC_SRAM_DATA_CFG_NO_GPIO2;
2242
2243                         grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE0 |
2244                                          GRC_LCLCTRL_GPIO_OE1 |
2245                                          GRC_LCLCTRL_GPIO_OE2 |
2246                                          GRC_LCLCTRL_GPIO_OUTPUT1 |
2247                                          GRC_LCLCTRL_GPIO_OUTPUT2;
2248                         if (no_gpio2) {
2249                                 grc_local_ctrl &= ~(GRC_LCLCTRL_GPIO_OE2 |
2250                                                     GRC_LCLCTRL_GPIO_OUTPUT2);
2251                         }
2252                         tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
2253                                                     grc_local_ctrl, 100);
2254
2255                         grc_local_ctrl |= GRC_LCLCTRL_GPIO_OUTPUT0;
2256
2257                         tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
2258                                                     grc_local_ctrl, 100);
2259
2260                         if (!no_gpio2) {
2261                                 grc_local_ctrl &= ~GRC_LCLCTRL_GPIO_OUTPUT2;
2262                                 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
2263                                             grc_local_ctrl, 100);
2264                         }
2265                 }
2266         } else {
2267                 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
2268                     GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701) {
2269                         tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
2270                                     (GRC_LCLCTRL_GPIO_OE1 |
2271                                      GRC_LCLCTRL_GPIO_OUTPUT1), 100);
2272
2273                         tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
2274                                     GRC_LCLCTRL_GPIO_OE1, 100);
2275
2276                         tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
2277                                     (GRC_LCLCTRL_GPIO_OE1 |
2278                                      GRC_LCLCTRL_GPIO_OUTPUT1), 100);
2279                 }
2280         }
2281 }
2282
2283 static int tg3_5700_link_polarity(struct tg3 *tp, u32 speed)
2284 {
2285         if (tp->led_ctrl == LED_CTRL_MODE_PHY_2)
2286                 return 1;
2287         else if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5411) {
2288                 if (speed != SPEED_10)
2289                         return 1;
2290         } else if (speed == SPEED_10)
2291                 return 1;
2292
2293         return 0;
2294 }
2295
2296 static int tg3_setup_phy(struct tg3 *, int);
2297
2298 #define RESET_KIND_SHUTDOWN     0
2299 #define RESET_KIND_INIT         1
2300 #define RESET_KIND_SUSPEND      2
2301
2302 static void tg3_write_sig_post_reset(struct tg3 *, int);
2303 static int tg3_halt_cpu(struct tg3 *, u32);
2304
2305 static void tg3_power_down_phy(struct tg3 *tp, bool do_low_power)
2306 {
2307         u32 val;
2308
2309         if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
2310                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
2311                         u32 sg_dig_ctrl = tr32(SG_DIG_CTRL);
2312                         u32 serdes_cfg = tr32(MAC_SERDES_CFG);
2313
2314                         sg_dig_ctrl |=
2315                                 SG_DIG_USING_HW_AUTONEG | SG_DIG_SOFT_RESET;
2316                         tw32(SG_DIG_CTRL, sg_dig_ctrl);
2317                         tw32(MAC_SERDES_CFG, serdes_cfg | (1 << 15));
2318                 }
2319                 return;
2320         }
2321
2322         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
2323                 tg3_bmcr_reset(tp);
2324                 val = tr32(GRC_MISC_CFG);
2325                 tw32_f(GRC_MISC_CFG, val | GRC_MISC_CFG_EPHY_IDDQ);
2326                 udelay(40);
2327                 return;
2328         } else if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
2329                 u32 phytest;
2330                 if (!tg3_readphy(tp, MII_TG3_FET_TEST, &phytest)) {
2331                         u32 phy;
2332
2333                         tg3_writephy(tp, MII_ADVERTISE, 0);
2334                         tg3_writephy(tp, MII_BMCR,
2335                                      BMCR_ANENABLE | BMCR_ANRESTART);
2336
2337                         tg3_writephy(tp, MII_TG3_FET_TEST,
2338                                      phytest | MII_TG3_FET_SHADOW_EN);
2339                         if (!tg3_readphy(tp, MII_TG3_FET_SHDW_AUXMODE4, &phy)) {
2340                                 phy |= MII_TG3_FET_SHDW_AUXMODE4_SBPD;
2341                                 tg3_writephy(tp,
2342                                              MII_TG3_FET_SHDW_AUXMODE4,
2343                                              phy);
2344                         }
2345                         tg3_writephy(tp, MII_TG3_FET_TEST, phytest);
2346                 }
2347                 return;
2348         } else if (do_low_power) {
2349                 tg3_writephy(tp, MII_TG3_EXT_CTRL,
2350                              MII_TG3_EXT_CTRL_FORCE_LED_OFF);
2351
2352                 val = MII_TG3_AUXCTL_PCTL_100TX_LPWR |
2353                       MII_TG3_AUXCTL_PCTL_SPR_ISOLATE |
2354                       MII_TG3_AUXCTL_PCTL_VREG_11V;
2355                 tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_PWRCTL, val);
2356         }
2357
2358         /* The PHY should not be powered down on some chips because
2359          * of bugs.
2360          */
2361         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
2362             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
2363             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780 &&
2364              (tp->phy_flags & TG3_PHYFLG_MII_SERDES)))
2365                 return;
2366
2367         if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5784_AX ||
2368             GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5761_AX) {
2369                 val = tr32(TG3_CPMU_LSPD_1000MB_CLK);
2370                 val &= ~CPMU_LSPD_1000MB_MACCLK_MASK;
2371                 val |= CPMU_LSPD_1000MB_MACCLK_12_5;
2372                 tw32_f(TG3_CPMU_LSPD_1000MB_CLK, val);
2373         }
2374
2375         tg3_writephy(tp, MII_BMCR, BMCR_PDOWN);
2376 }
2377
2378 /* tp->lock is held. */
2379 static int tg3_nvram_lock(struct tg3 *tp)
2380 {
2381         if (tg3_flag(tp, NVRAM)) {
2382                 int i;
2383
2384                 if (tp->nvram_lock_cnt == 0) {
2385                         tw32(NVRAM_SWARB, SWARB_REQ_SET1);
2386                         for (i = 0; i < 8000; i++) {
2387                                 if (tr32(NVRAM_SWARB) & SWARB_GNT1)
2388                                         break;
2389                                 udelay(20);
2390                         }
2391                         if (i == 8000) {
2392                                 tw32(NVRAM_SWARB, SWARB_REQ_CLR1);
2393                                 return -ENODEV;
2394                         }
2395                 }
2396                 tp->nvram_lock_cnt++;
2397         }
2398         return 0;
2399 }
2400
2401 /* tp->lock is held. */
2402 static void tg3_nvram_unlock(struct tg3 *tp)
2403 {
2404         if (tg3_flag(tp, NVRAM)) {
2405                 if (tp->nvram_lock_cnt > 0)
2406                         tp->nvram_lock_cnt--;
2407                 if (tp->nvram_lock_cnt == 0)
2408                         tw32_f(NVRAM_SWARB, SWARB_REQ_CLR1);
2409         }
2410 }
2411
2412 /* tp->lock is held. */
2413 static void tg3_enable_nvram_access(struct tg3 *tp)
2414 {
2415         if (tg3_flag(tp, 5750_PLUS) && !tg3_flag(tp, PROTECTED_NVRAM)) {
2416                 u32 nvaccess = tr32(NVRAM_ACCESS);
2417
2418                 tw32(NVRAM_ACCESS, nvaccess | ACCESS_ENABLE);
2419         }
2420 }
2421
2422 /* tp->lock is held. */
2423 static void tg3_disable_nvram_access(struct tg3 *tp)
2424 {
2425         if (tg3_flag(tp, 5750_PLUS) && !tg3_flag(tp, PROTECTED_NVRAM)) {
2426                 u32 nvaccess = tr32(NVRAM_ACCESS);
2427
2428                 tw32(NVRAM_ACCESS, nvaccess & ~ACCESS_ENABLE);
2429         }
2430 }
2431
2432 static int tg3_nvram_read_using_eeprom(struct tg3 *tp,
2433                                         u32 offset, u32 *val)
2434 {
2435         u32 tmp;
2436         int i;
2437
2438         if (offset > EEPROM_ADDR_ADDR_MASK || (offset % 4) != 0)
2439                 return -EINVAL;
2440
2441         tmp = tr32(GRC_EEPROM_ADDR) & ~(EEPROM_ADDR_ADDR_MASK |
2442                                         EEPROM_ADDR_DEVID_MASK |
2443                                         EEPROM_ADDR_READ);
2444         tw32(GRC_EEPROM_ADDR,
2445              tmp |
2446              (0 << EEPROM_ADDR_DEVID_SHIFT) |
2447              ((offset << EEPROM_ADDR_ADDR_SHIFT) &
2448               EEPROM_ADDR_ADDR_MASK) |
2449              EEPROM_ADDR_READ | EEPROM_ADDR_START);
2450
2451         for (i = 0; i < 1000; i++) {
2452                 tmp = tr32(GRC_EEPROM_ADDR);
2453
2454                 if (tmp & EEPROM_ADDR_COMPLETE)
2455                         break;
2456                 msleep(1);
2457         }
2458         if (!(tmp & EEPROM_ADDR_COMPLETE))
2459                 return -EBUSY;
2460
2461         tmp = tr32(GRC_EEPROM_DATA);
2462
2463         /*
2464          * The data will always be opposite the native endian
2465          * format.  Perform a blind byteswap to compensate.
2466          */
2467         *val = swab32(tmp);
2468
2469         return 0;
2470 }
2471
2472 #define NVRAM_CMD_TIMEOUT 10000
2473
2474 static int tg3_nvram_exec_cmd(struct tg3 *tp, u32 nvram_cmd)
2475 {
2476         int i;
2477
2478         tw32(NVRAM_CMD, nvram_cmd);
2479         for (i = 0; i < NVRAM_CMD_TIMEOUT; i++) {
2480                 udelay(10);
2481                 if (tr32(NVRAM_CMD) & NVRAM_CMD_DONE) {
2482                         udelay(10);
2483                         break;
2484                 }
2485         }
2486
2487         if (i == NVRAM_CMD_TIMEOUT)
2488                 return -EBUSY;
2489
2490         return 0;
2491 }
2492
2493 static u32 tg3_nvram_phys_addr(struct tg3 *tp, u32 addr)
2494 {
2495         if (tg3_flag(tp, NVRAM) &&
2496             tg3_flag(tp, NVRAM_BUFFERED) &&
2497             tg3_flag(tp, FLASH) &&
2498             !tg3_flag(tp, NO_NVRAM_ADDR_TRANS) &&
2499             (tp->nvram_jedecnum == JEDEC_ATMEL))
2500
2501                 addr = ((addr / tp->nvram_pagesize) <<
2502                         ATMEL_AT45DB0X1B_PAGE_POS) +
2503                        (addr % tp->nvram_pagesize);
2504
2505         return addr;
2506 }
2507
2508 static u32 tg3_nvram_logical_addr(struct tg3 *tp, u32 addr)
2509 {
2510         if (tg3_flag(tp, NVRAM) &&
2511             tg3_flag(tp, NVRAM_BUFFERED) &&
2512             tg3_flag(tp, FLASH) &&
2513             !tg3_flag(tp, NO_NVRAM_ADDR_TRANS) &&
2514             (tp->nvram_jedecnum == JEDEC_ATMEL))
2515
2516                 addr = ((addr >> ATMEL_AT45DB0X1B_PAGE_POS) *
2517                         tp->nvram_pagesize) +
2518                        (addr & ((1 << ATMEL_AT45DB0X1B_PAGE_POS) - 1));
2519
2520         return addr;
2521 }
2522
2523 /* NOTE: Data read in from NVRAM is byteswapped according to
2524  * the byteswapping settings for all other register accesses.
2525  * tg3 devices are BE devices, so on a BE machine, the data
2526  * returned will be exactly as it is seen in NVRAM.  On a LE
2527  * machine, the 32-bit value will be byteswapped.
2528  */
2529 static int tg3_nvram_read(struct tg3 *tp, u32 offset, u32 *val)
2530 {
2531         int ret;
2532
2533         if (!tg3_flag(tp, NVRAM))
2534                 return tg3_nvram_read_using_eeprom(tp, offset, val);
2535
2536         offset = tg3_nvram_phys_addr(tp, offset);
2537
2538         if (offset > NVRAM_ADDR_MSK)
2539                 return -EINVAL;
2540
2541         ret = tg3_nvram_lock(tp);
2542         if (ret)
2543                 return ret;
2544
2545         tg3_enable_nvram_access(tp);
2546
2547         tw32(NVRAM_ADDR, offset);
2548         ret = tg3_nvram_exec_cmd(tp, NVRAM_CMD_RD | NVRAM_CMD_GO |
2549                 NVRAM_CMD_FIRST | NVRAM_CMD_LAST | NVRAM_CMD_DONE);
2550
2551         if (ret == 0)
2552                 *val = tr32(NVRAM_RDDATA);
2553
2554         tg3_disable_nvram_access(tp);
2555
2556         tg3_nvram_unlock(tp);
2557
2558         return ret;
2559 }
2560
2561 /* Ensures NVRAM data is in bytestream format. */
2562 static int tg3_nvram_read_be32(struct tg3 *tp, u32 offset, __be32 *val)
2563 {
2564         u32 v;
2565         int res = tg3_nvram_read(tp, offset, &v);
2566         if (!res)
2567                 *val = cpu_to_be32(v);
2568         return res;
2569 }
2570
2571 /* tp->lock is held. */
2572 static void __tg3_set_mac_addr(struct tg3 *tp, int skip_mac_1)
2573 {
2574         u32 addr_high, addr_low;
2575         int i;
2576
2577         addr_high = ((tp->dev->dev_addr[0] << 8) |
2578                      tp->dev->dev_addr[1]);
2579         addr_low = ((tp->dev->dev_addr[2] << 24) |
2580                     (tp->dev->dev_addr[3] << 16) |
2581                     (tp->dev->dev_addr[4] <<  8) |
2582                     (tp->dev->dev_addr[5] <<  0));
2583         for (i = 0; i < 4; i++) {
2584                 if (i == 1 && skip_mac_1)
2585                         continue;
2586                 tw32(MAC_ADDR_0_HIGH + (i * 8), addr_high);
2587                 tw32(MAC_ADDR_0_LOW + (i * 8), addr_low);
2588         }
2589
2590         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
2591             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
2592                 for (i = 0; i < 12; i++) {
2593                         tw32(MAC_EXTADDR_0_HIGH + (i * 8), addr_high);
2594                         tw32(MAC_EXTADDR_0_LOW + (i * 8), addr_low);
2595                 }
2596         }
2597
2598         addr_high = (tp->dev->dev_addr[0] +
2599                      tp->dev->dev_addr[1] +
2600                      tp->dev->dev_addr[2] +
2601                      tp->dev->dev_addr[3] +
2602                      tp->dev->dev_addr[4] +
2603                      tp->dev->dev_addr[5]) &
2604                 TX_BACKOFF_SEED_MASK;
2605         tw32(MAC_TX_BACKOFF_SEED, addr_high);
2606 }
2607
2608 static void tg3_enable_register_access(struct tg3 *tp)
2609 {
2610         /*
2611          * Make sure register accesses (indirect or otherwise) will function
2612          * correctly.
2613          */
2614         pci_write_config_dword(tp->pdev,
2615                                TG3PCI_MISC_HOST_CTRL, tp->misc_host_ctrl);
2616 }
2617
2618 static int tg3_power_up(struct tg3 *tp)
2619 {
2620         tg3_enable_register_access(tp);
2621
2622         pci_set_power_state(tp->pdev, PCI_D0);
2623
2624         /* Switch out of Vaux if it is a NIC */
2625         if (tg3_flag(tp, IS_NIC))
2626                 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl, 100);
2627
2628         return 0;
2629 }
2630
2631 static int tg3_power_down_prepare(struct tg3 *tp)
2632 {
2633         u32 misc_host_ctrl;
2634         bool device_should_wake, do_low_power;
2635
2636         tg3_enable_register_access(tp);
2637
2638         /* Restore the CLKREQ setting. */
2639         if (tg3_flag(tp, CLKREQ_BUG)) {
2640                 u16 lnkctl;
2641
2642                 pci_read_config_word(tp->pdev,
2643                                      tp->pcie_cap + PCI_EXP_LNKCTL,
2644                                      &lnkctl);
2645                 lnkctl |= PCI_EXP_LNKCTL_CLKREQ_EN;
2646                 pci_write_config_word(tp->pdev,
2647                                       tp->pcie_cap + PCI_EXP_LNKCTL,
2648                                       lnkctl);
2649         }
2650
2651         misc_host_ctrl = tr32(TG3PCI_MISC_HOST_CTRL);
2652         tw32(TG3PCI_MISC_HOST_CTRL,
2653              misc_host_ctrl | MISC_HOST_CTRL_MASK_PCI_INT);
2654
2655         device_should_wake = device_may_wakeup(&tp->pdev->dev) &&
2656                              tg3_flag(tp, WOL_ENABLE);
2657
2658         if (tg3_flag(tp, USE_PHYLIB)) {
2659                 do_low_power = false;
2660                 if ((tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) &&
2661                     !(tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)) {
2662                         struct phy_device *phydev;
2663                         u32 phyid, advertising;
2664
2665                         phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
2666
2667                         tp->phy_flags |= TG3_PHYFLG_IS_LOW_POWER;
2668
2669                         tp->link_config.orig_speed = phydev->speed;
2670                         tp->link_config.orig_duplex = phydev->duplex;
2671                         tp->link_config.orig_autoneg = phydev->autoneg;
2672                         tp->link_config.orig_advertising = phydev->advertising;
2673
2674                         advertising = ADVERTISED_TP |
2675                                       ADVERTISED_Pause |
2676                                       ADVERTISED_Autoneg |
2677                                       ADVERTISED_10baseT_Half;
2678
2679                         if (tg3_flag(tp, ENABLE_ASF) || device_should_wake) {
2680                                 if (tg3_flag(tp, WOL_SPEED_100MB))
2681                                         advertising |=
2682                                                 ADVERTISED_100baseT_Half |
2683                                                 ADVERTISED_100baseT_Full |
2684                                                 ADVERTISED_10baseT_Full;
2685                                 else
2686                                         advertising |= ADVERTISED_10baseT_Full;
2687                         }
2688
2689                         phydev->advertising = advertising;
2690
2691                         phy_start_aneg(phydev);
2692
2693                         phyid = phydev->drv->phy_id & phydev->drv->phy_id_mask;
2694                         if (phyid != PHY_ID_BCMAC131) {
2695                                 phyid &= PHY_BCM_OUI_MASK;
2696                                 if (phyid == PHY_BCM_OUI_1 ||
2697                                     phyid == PHY_BCM_OUI_2 ||
2698                                     phyid == PHY_BCM_OUI_3)
2699                                         do_low_power = true;
2700                         }
2701                 }
2702         } else {
2703                 do_low_power = true;
2704
2705                 if (!(tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)) {
2706                         tp->phy_flags |= TG3_PHYFLG_IS_LOW_POWER;
2707                         tp->link_config.orig_speed = tp->link_config.speed;
2708                         tp->link_config.orig_duplex = tp->link_config.duplex;
2709                         tp->link_config.orig_autoneg = tp->link_config.autoneg;
2710                 }
2711
2712                 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES)) {
2713                         tp->link_config.speed = SPEED_10;
2714                         tp->link_config.duplex = DUPLEX_HALF;
2715                         tp->link_config.autoneg = AUTONEG_ENABLE;
2716                         tg3_setup_phy(tp, 0);
2717                 }
2718         }
2719
2720         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
2721                 u32 val;
2722
2723                 val = tr32(GRC_VCPU_EXT_CTRL);
2724                 tw32(GRC_VCPU_EXT_CTRL, val | GRC_VCPU_EXT_CTRL_DISABLE_WOL);
2725         } else if (!tg3_flag(tp, ENABLE_ASF)) {
2726                 int i;
2727                 u32 val;
2728
2729                 for (i = 0; i < 200; i++) {
2730                         tg3_read_mem(tp, NIC_SRAM_FW_ASF_STATUS_MBOX, &val);
2731                         if (val == ~NIC_SRAM_FIRMWARE_MBOX_MAGIC1)
2732                                 break;
2733                         msleep(1);
2734                 }
2735         }
2736         if (tg3_flag(tp, WOL_CAP))
2737                 tg3_write_mem(tp, NIC_SRAM_WOL_MBOX, WOL_SIGNATURE |
2738                                                      WOL_DRV_STATE_SHUTDOWN |
2739                                                      WOL_DRV_WOL |
2740                                                      WOL_SET_MAGIC_PKT);
2741
2742         if (device_should_wake) {
2743                 u32 mac_mode;
2744
2745                 if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES)) {
2746                         if (do_low_power &&
2747                             !(tp->phy_flags & TG3_PHYFLG_IS_FET)) {
2748                                 tg3_phy_auxctl_write(tp,
2749                                                MII_TG3_AUXCTL_SHDWSEL_PWRCTL,
2750                                                MII_TG3_AUXCTL_PCTL_WOL_EN |
2751                                                MII_TG3_AUXCTL_PCTL_100TX_LPWR |
2752                                                MII_TG3_AUXCTL_PCTL_CL_AB_TXDAC);
2753                                 udelay(40);
2754                         }
2755
2756                         if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
2757                                 mac_mode = MAC_MODE_PORT_MODE_GMII;
2758                         else
2759                                 mac_mode = MAC_MODE_PORT_MODE_MII;
2760
2761                         mac_mode |= tp->mac_mode & MAC_MODE_LINK_POLARITY;
2762                         if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
2763                             ASIC_REV_5700) {
2764                                 u32 speed = tg3_flag(tp, WOL_SPEED_100MB) ?
2765                                              SPEED_100 : SPEED_10;
2766                                 if (tg3_5700_link_polarity(tp, speed))
2767                                         mac_mode |= MAC_MODE_LINK_POLARITY;
2768                                 else
2769                                         mac_mode &= ~MAC_MODE_LINK_POLARITY;
2770                         }
2771                 } else {
2772                         mac_mode = MAC_MODE_PORT_MODE_TBI;
2773                 }
2774
2775                 if (!tg3_flag(tp, 5750_PLUS))
2776                         tw32(MAC_LED_CTRL, tp->led_ctrl);
2777
2778                 mac_mode |= MAC_MODE_MAGIC_PKT_ENABLE;
2779                 if ((tg3_flag(tp, 5705_PLUS) && !tg3_flag(tp, 5780_CLASS)) &&
2780                     (tg3_flag(tp, ENABLE_ASF) || tg3_flag(tp, ENABLE_APE)))
2781                         mac_mode |= MAC_MODE_KEEP_FRAME_IN_WOL;
2782
2783                 if (tg3_flag(tp, ENABLE_APE))
2784                         mac_mode |= MAC_MODE_APE_TX_EN |
2785                                     MAC_MODE_APE_RX_EN |
2786                                     MAC_MODE_TDE_ENABLE;
2787
2788                 tw32_f(MAC_MODE, mac_mode);
2789                 udelay(100);
2790
2791                 tw32_f(MAC_RX_MODE, RX_MODE_ENABLE);
2792                 udelay(10);
2793         }
2794
2795         if (!tg3_flag(tp, WOL_SPEED_100MB) &&
2796             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
2797              GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)) {
2798                 u32 base_val;
2799
2800                 base_val = tp->pci_clock_ctrl;
2801                 base_val |= (CLOCK_CTRL_RXCLK_DISABLE |
2802                              CLOCK_CTRL_TXCLK_DISABLE);
2803
2804                 tw32_wait_f(TG3PCI_CLOCK_CTRL, base_val | CLOCK_CTRL_ALTCLK |
2805                             CLOCK_CTRL_PWRDOWN_PLL133, 40);
2806         } else if (tg3_flag(tp, 5780_CLASS) ||
2807                    tg3_flag(tp, CPMU_PRESENT) ||
2808                    (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)) {
2809                 /* do nothing */
2810         } else if (!(tg3_flag(tp, 5750_PLUS) && tg3_flag(tp, ENABLE_ASF))) {
2811                 u32 newbits1, newbits2;
2812
2813                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
2814                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
2815                         newbits1 = (CLOCK_CTRL_RXCLK_DISABLE |
2816                                     CLOCK_CTRL_TXCLK_DISABLE |
2817                                     CLOCK_CTRL_ALTCLK);
2818                         newbits2 = newbits1 | CLOCK_CTRL_44MHZ_CORE;
2819                 } else if (tg3_flag(tp, 5705_PLUS)) {
2820                         newbits1 = CLOCK_CTRL_625_CORE;
2821                         newbits2 = newbits1 | CLOCK_CTRL_ALTCLK;
2822                 } else {
2823                         newbits1 = CLOCK_CTRL_ALTCLK;
2824                         newbits2 = newbits1 | CLOCK_CTRL_44MHZ_CORE;
2825                 }
2826
2827                 tw32_wait_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl | newbits1,
2828                             40);
2829
2830                 tw32_wait_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl | newbits2,
2831                             40);
2832
2833                 if (!tg3_flag(tp, 5705_PLUS)) {
2834                         u32 newbits3;
2835
2836                         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
2837                             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
2838                                 newbits3 = (CLOCK_CTRL_RXCLK_DISABLE |
2839                                             CLOCK_CTRL_TXCLK_DISABLE |
2840                                             CLOCK_CTRL_44MHZ_CORE);
2841                         } else {
2842                                 newbits3 = CLOCK_CTRL_44MHZ_CORE;
2843                         }
2844
2845                         tw32_wait_f(TG3PCI_CLOCK_CTRL,
2846                                     tp->pci_clock_ctrl | newbits3, 40);
2847                 }
2848         }
2849
2850         if (!(device_should_wake) && !tg3_flag(tp, ENABLE_ASF))
2851                 tg3_power_down_phy(tp, do_low_power);
2852
2853         tg3_frob_aux_power(tp);
2854
2855         /* Workaround for unstable PLL clock */
2856         if ((GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_AX) ||
2857             (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_BX)) {
2858                 u32 val = tr32(0x7d00);
2859
2860                 val &= ~((1 << 16) | (1 << 4) | (1 << 2) | (1 << 1) | 1);
2861                 tw32(0x7d00, val);
2862                 if (!tg3_flag(tp, ENABLE_ASF)) {
2863                         int err;
2864
2865                         err = tg3_nvram_lock(tp);
2866                         tg3_halt_cpu(tp, RX_CPU_BASE);
2867                         if (!err)
2868                                 tg3_nvram_unlock(tp);
2869                 }
2870         }
2871
2872         tg3_write_sig_post_reset(tp, RESET_KIND_SHUTDOWN);
2873
2874         return 0;
2875 }
2876
2877 static void tg3_power_down(struct tg3 *tp)
2878 {
2879         tg3_power_down_prepare(tp);
2880
2881         pci_wake_from_d3(tp->pdev, tg3_flag(tp, WOL_ENABLE));
2882         pci_set_power_state(tp->pdev, PCI_D3hot);
2883 }
2884
2885 static void tg3_aux_stat_to_speed_duplex(struct tg3 *tp, u32 val, u16 *speed, u8 *duplex)
2886 {
2887         switch (val & MII_TG3_AUX_STAT_SPDMASK) {
2888         case MII_TG3_AUX_STAT_10HALF:
2889                 *speed = SPEED_10;
2890                 *duplex = DUPLEX_HALF;
2891                 break;
2892
2893         case MII_TG3_AUX_STAT_10FULL:
2894                 *speed = SPEED_10;
2895                 *duplex = DUPLEX_FULL;
2896                 break;
2897
2898         case MII_TG3_AUX_STAT_100HALF:
2899                 *speed = SPEED_100;
2900                 *duplex = DUPLEX_HALF;
2901                 break;
2902
2903         case MII_TG3_AUX_STAT_100FULL:
2904                 *speed = SPEED_100;
2905                 *duplex = DUPLEX_FULL;
2906                 break;
2907
2908         case MII_TG3_AUX_STAT_1000HALF:
2909                 *speed = SPEED_1000;
2910                 *duplex = DUPLEX_HALF;
2911                 break;
2912
2913         case MII_TG3_AUX_STAT_1000FULL:
2914                 *speed = SPEED_1000;
2915                 *duplex = DUPLEX_FULL;
2916                 break;
2917
2918         default:
2919                 if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
2920                         *speed = (val & MII_TG3_AUX_STAT_100) ? SPEED_100 :
2921                                  SPEED_10;
2922                         *duplex = (val & MII_TG3_AUX_STAT_FULL) ? DUPLEX_FULL :
2923                                   DUPLEX_HALF;
2924                         break;
2925                 }
2926                 *speed = SPEED_INVALID;
2927                 *duplex = DUPLEX_INVALID;
2928                 break;
2929         }
2930 }
2931
2932 static void tg3_phy_copper_begin(struct tg3 *tp)
2933 {
2934         u32 new_adv;
2935         int i;
2936
2937         if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) {
2938                 /* Entering low power mode.  Disable gigabit and
2939                  * 100baseT advertisements.
2940                  */
2941                 tg3_writephy(tp, MII_TG3_CTRL, 0);
2942
2943                 new_adv = (ADVERTISE_10HALF | ADVERTISE_10FULL |
2944                            ADVERTISE_CSMA | ADVERTISE_PAUSE_CAP);
2945                 if (tg3_flag(tp, WOL_SPEED_100MB))
2946                         new_adv |= (ADVERTISE_100HALF | ADVERTISE_100FULL);
2947
2948                 tg3_writephy(tp, MII_ADVERTISE, new_adv);
2949         } else if (tp->link_config.speed == SPEED_INVALID) {
2950                 if (tp->phy_flags & TG3_PHYFLG_10_100_ONLY)
2951                         tp->link_config.advertising &=
2952                                 ~(ADVERTISED_1000baseT_Half |
2953                                   ADVERTISED_1000baseT_Full);
2954
2955                 new_adv = ADVERTISE_CSMA;
2956                 if (tp->link_config.advertising & ADVERTISED_10baseT_Half)
2957                         new_adv |= ADVERTISE_10HALF;
2958                 if (tp->link_config.advertising & ADVERTISED_10baseT_Full)
2959                         new_adv |= ADVERTISE_10FULL;
2960                 if (tp->link_config.advertising & ADVERTISED_100baseT_Half)
2961                         new_adv |= ADVERTISE_100HALF;
2962                 if (tp->link_config.advertising & ADVERTISED_100baseT_Full)
2963                         new_adv |= ADVERTISE_100FULL;
2964
2965                 new_adv |= tg3_advert_flowctrl_1000T(tp->link_config.flowctrl);
2966
2967                 tg3_writephy(tp, MII_ADVERTISE, new_adv);
2968
2969                 if (tp->link_config.advertising &
2970                     (ADVERTISED_1000baseT_Half | ADVERTISED_1000baseT_Full)) {
2971                         new_adv = 0;
2972                         if (tp->link_config.advertising & ADVERTISED_1000baseT_Half)
2973                                 new_adv |= MII_TG3_CTRL_ADV_1000_HALF;
2974                         if (tp->link_config.advertising & ADVERTISED_1000baseT_Full)
2975                                 new_adv |= MII_TG3_CTRL_ADV_1000_FULL;
2976                         if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY) &&
2977                             (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
2978                              tp->pci_chip_rev_id == CHIPREV_ID_5701_B0))
2979                                 new_adv |= (MII_TG3_CTRL_AS_MASTER |
2980                                             MII_TG3_CTRL_ENABLE_AS_MASTER);
2981                         tg3_writephy(tp, MII_TG3_CTRL, new_adv);
2982                 } else {
2983                         tg3_writephy(tp, MII_TG3_CTRL, 0);
2984                 }
2985         } else {
2986                 new_adv = tg3_advert_flowctrl_1000T(tp->link_config.flowctrl);
2987                 new_adv |= ADVERTISE_CSMA;
2988
2989                 /* Asking for a specific link mode. */
2990                 if (tp->link_config.speed == SPEED_1000) {
2991                         tg3_writephy(tp, MII_ADVERTISE, new_adv);
2992
2993                         if (tp->link_config.duplex == DUPLEX_FULL)
2994                                 new_adv = MII_TG3_CTRL_ADV_1000_FULL;
2995                         else
2996                                 new_adv = MII_TG3_CTRL_ADV_1000_HALF;
2997                         if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
2998                             tp->pci_chip_rev_id == CHIPREV_ID_5701_B0)
2999                                 new_adv |= (MII_TG3_CTRL_AS_MASTER |
3000                                             MII_TG3_CTRL_ENABLE_AS_MASTER);
3001                 } else {
3002                         if (tp->link_config.speed == SPEED_100) {
3003                                 if (tp->link_config.duplex == DUPLEX_FULL)
3004                                         new_adv |= ADVERTISE_100FULL;
3005                                 else
3006                                         new_adv |= ADVERTISE_100HALF;
3007                         } else {
3008                                 if (tp->link_config.duplex == DUPLEX_FULL)
3009                                         new_adv |= ADVERTISE_10FULL;
3010                                 else
3011                                         new_adv |= ADVERTISE_10HALF;
3012                         }
3013                         tg3_writephy(tp, MII_ADVERTISE, new_adv);
3014
3015                         new_adv = 0;
3016                 }
3017
3018                 tg3_writephy(tp, MII_TG3_CTRL, new_adv);
3019         }
3020
3021         if (tp->phy_flags & TG3_PHYFLG_EEE_CAP) {
3022                 u32 val;
3023
3024                 tw32(TG3_CPMU_EEE_MODE,
3025                      tr32(TG3_CPMU_EEE_MODE) & ~TG3_CPMU_EEEMD_LPI_ENABLE);
3026
3027                 TG3_PHY_AUXCTL_SMDSP_ENABLE(tp);
3028
3029                 switch (GET_ASIC_REV(tp->pci_chip_rev_id)) {
3030                 case ASIC_REV_5717:
3031                 case ASIC_REV_57765:
3032                         if (!tg3_phydsp_read(tp, MII_TG3_DSP_CH34TP2, &val))
3033                                 tg3_phydsp_write(tp, MII_TG3_DSP_CH34TP2, val |
3034                                                  MII_TG3_DSP_CH34TP2_HIBW01);
3035                         /* Fall through */
3036                 case ASIC_REV_5719:
3037                         val = MII_TG3_DSP_TAP26_ALNOKO |
3038                               MII_TG3_DSP_TAP26_RMRXSTO |
3039                               MII_TG3_DSP_TAP26_OPCSINPT;
3040                         tg3_phydsp_write(tp, MII_TG3_DSP_TAP26, val);
3041                 }
3042
3043                 val = 0;
3044                 if (tp->link_config.autoneg == AUTONEG_ENABLE) {
3045                         /* Advertise 100-BaseTX EEE ability */
3046                         if (tp->link_config.advertising &
3047                             ADVERTISED_100baseT_Full)
3048                                 val |= MDIO_AN_EEE_ADV_100TX;
3049                         /* Advertise 1000-BaseT EEE ability */
3050                         if (tp->link_config.advertising &
3051                             ADVERTISED_1000baseT_Full)
3052                                 val |= MDIO_AN_EEE_ADV_1000T;
3053                 }
3054                 tg3_phy_cl45_write(tp, MDIO_MMD_AN, MDIO_AN_EEE_ADV, val);
3055
3056                 TG3_PHY_AUXCTL_SMDSP_DISABLE(tp);
3057         }
3058
3059         if (tp->link_config.autoneg == AUTONEG_DISABLE &&
3060             tp->link_config.speed != SPEED_INVALID) {
3061                 u32 bmcr, orig_bmcr;
3062
3063                 tp->link_config.active_speed = tp->link_config.speed;
3064                 tp->link_config.active_duplex = tp->link_config.duplex;
3065
3066                 bmcr = 0;
3067                 switch (tp->link_config.speed) {
3068                 default:
3069                 case SPEED_10:
3070                         break;
3071
3072                 case SPEED_100:
3073                         bmcr |= BMCR_SPEED100;
3074                         break;
3075
3076                 case SPEED_1000:
3077                         bmcr |= TG3_BMCR_SPEED1000;
3078                         break;
3079                 }
3080
3081                 if (tp->link_config.duplex == DUPLEX_FULL)
3082                         bmcr |= BMCR_FULLDPLX;
3083
3084                 if (!tg3_readphy(tp, MII_BMCR, &orig_bmcr) &&
3085                     (bmcr != orig_bmcr)) {
3086                         tg3_writephy(tp, MII_BMCR, BMCR_LOOPBACK);
3087                         for (i = 0; i < 1500; i++) {
3088                                 u32 tmp;
3089
3090                                 udelay(10);
3091                                 if (tg3_readphy(tp, MII_BMSR, &tmp) ||
3092                                     tg3_readphy(tp, MII_BMSR, &tmp))
3093                                         continue;
3094                                 if (!(tmp & BMSR_LSTATUS)) {
3095                                         udelay(40);
3096                                         break;
3097                                 }
3098                         }
3099                         tg3_writephy(tp, MII_BMCR, bmcr);
3100                         udelay(40);
3101                 }
3102         } else {
3103                 tg3_writephy(tp, MII_BMCR,
3104                              BMCR_ANENABLE | BMCR_ANRESTART);
3105         }
3106 }
3107
3108 static int tg3_init_5401phy_dsp(struct tg3 *tp)
3109 {
3110         int err;
3111
3112         /* Turn off tap power management. */
3113         /* Set Extended packet length bit */
3114         err = tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_AUXCTL, 0x4c20);
3115
3116         err |= tg3_phydsp_write(tp, 0x0012, 0x1804);
3117         err |= tg3_phydsp_write(tp, 0x0013, 0x1204);
3118         err |= tg3_phydsp_write(tp, 0x8006, 0x0132);
3119         err |= tg3_phydsp_write(tp, 0x8006, 0x0232);
3120         err |= tg3_phydsp_write(tp, 0x201f, 0x0a20);
3121
3122         udelay(40);
3123
3124         return err;
3125 }
3126
3127 static int tg3_copper_is_advertising_all(struct tg3 *tp, u32 mask)
3128 {
3129         u32 adv_reg, all_mask = 0;
3130
3131         if (mask & ADVERTISED_10baseT_Half)
3132                 all_mask |= ADVERTISE_10HALF;
3133         if (mask & ADVERTISED_10baseT_Full)
3134                 all_mask |= ADVERTISE_10FULL;
3135         if (mask & ADVERTISED_100baseT_Half)
3136                 all_mask |= ADVERTISE_100HALF;
3137         if (mask & ADVERTISED_100baseT_Full)
3138                 all_mask |= ADVERTISE_100FULL;
3139
3140         if (tg3_readphy(tp, MII_ADVERTISE, &adv_reg))
3141                 return 0;
3142
3143         if ((adv_reg & all_mask) != all_mask)
3144                 return 0;
3145         if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
3146                 u32 tg3_ctrl;
3147
3148                 all_mask = 0;
3149                 if (mask & ADVERTISED_1000baseT_Half)
3150                         all_mask |= ADVERTISE_1000HALF;
3151                 if (mask & ADVERTISED_1000baseT_Full)
3152                         all_mask |= ADVERTISE_1000FULL;
3153
3154                 if (tg3_readphy(tp, MII_TG3_CTRL, &tg3_ctrl))
3155                         return 0;
3156
3157                 if ((tg3_ctrl & all_mask) != all_mask)
3158                         return 0;
3159         }
3160         return 1;
3161 }
3162
3163 static int tg3_adv_1000T_flowctrl_ok(struct tg3 *tp, u32 *lcladv, u32 *rmtadv)
3164 {
3165         u32 curadv, reqadv;
3166
3167         if (tg3_readphy(tp, MII_ADVERTISE, lcladv))
3168                 return 1;
3169
3170         curadv = *lcladv & (ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM);
3171         reqadv = tg3_advert_flowctrl_1000T(tp->link_config.flowctrl);
3172
3173         if (tp->link_config.active_duplex == DUPLEX_FULL) {
3174                 if (curadv != reqadv)
3175                         return 0;
3176
3177                 if (tg3_flag(tp, PAUSE_AUTONEG))
3178                         tg3_readphy(tp, MII_LPA, rmtadv);
3179         } else {
3180                 /* Reprogram the advertisement register, even if it
3181                  * does not affect the current link.  If the link
3182                  * gets renegotiated in the future, we can save an
3183                  * additional renegotiation cycle by advertising
3184                  * it correctly in the first place.
3185                  */
3186                 if (curadv != reqadv) {
3187                         *lcladv &= ~(ADVERTISE_PAUSE_CAP |
3188                                      ADVERTISE_PAUSE_ASYM);
3189                         tg3_writephy(tp, MII_ADVERTISE, *lcladv | reqadv);
3190                 }
3191         }
3192
3193         return 1;
3194 }
3195
3196 static int tg3_setup_copper_phy(struct tg3 *tp, int force_reset)
3197 {
3198         int current_link_up;
3199         u32 bmsr, val;
3200         u32 lcl_adv, rmt_adv;
3201         u16 current_speed;
3202         u8 current_duplex;
3203         int i, err;
3204
3205         tw32(MAC_EVENT, 0);
3206
3207         tw32_f(MAC_STATUS,
3208              (MAC_STATUS_SYNC_CHANGED |
3209               MAC_STATUS_CFG_CHANGED |
3210               MAC_STATUS_MI_COMPLETION |
3211               MAC_STATUS_LNKSTATE_CHANGED));
3212         udelay(40);
3213
3214         if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
3215                 tw32_f(MAC_MI_MODE,
3216                      (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
3217                 udelay(80);
3218         }
3219
3220         tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_PWRCTL, 0);
3221
3222         /* Some third-party PHYs need to be reset on link going
3223          * down.
3224          */
3225         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
3226              GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
3227              GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) &&
3228             netif_carrier_ok(tp->dev)) {
3229                 tg3_readphy(tp, MII_BMSR, &bmsr);
3230                 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
3231                     !(bmsr & BMSR_LSTATUS))
3232                         force_reset = 1;
3233         }
3234         if (force_reset)
3235                 tg3_phy_reset(tp);
3236
3237         if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) {
3238                 tg3_readphy(tp, MII_BMSR, &bmsr);
3239                 if (tg3_readphy(tp, MII_BMSR, &bmsr) ||
3240                     !tg3_flag(tp, INIT_COMPLETE))
3241                         bmsr = 0;
3242
3243                 if (!(bmsr & BMSR_LSTATUS)) {
3244                         err = tg3_init_5401phy_dsp(tp);
3245                         if (err)
3246                                 return err;
3247
3248                         tg3_readphy(tp, MII_BMSR, &bmsr);
3249                         for (i = 0; i < 1000; i++) {
3250                                 udelay(10);
3251                                 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
3252                                     (bmsr & BMSR_LSTATUS)) {
3253                                         udelay(40);
3254                                         break;
3255                                 }
3256                         }
3257
3258                         if ((tp->phy_id & TG3_PHY_ID_REV_MASK) ==
3259                             TG3_PHY_REV_BCM5401_B0 &&
3260                             !(bmsr & BMSR_LSTATUS) &&
3261                             tp->link_config.active_speed == SPEED_1000) {
3262                                 err = tg3_phy_reset(tp);
3263                                 if (!err)
3264                                         err = tg3_init_5401phy_dsp(tp);
3265                                 if (err)
3266                                         return err;
3267                         }
3268                 }
3269         } else if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
3270                    tp->pci_chip_rev_id == CHIPREV_ID_5701_B0) {
3271                 /* 5701 {A0,B0} CRC bug workaround */
3272                 tg3_writephy(tp, 0x15, 0x0a75);
3273                 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8c68);
3274                 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8d68);
3275                 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8c68);
3276         }
3277
3278         /* Clear pending interrupts... */
3279         tg3_readphy(tp, MII_TG3_ISTAT, &val);
3280         tg3_readphy(tp, MII_TG3_ISTAT, &val);
3281
3282         if (tp->phy_flags & TG3_PHYFLG_USE_MI_INTERRUPT)
3283                 tg3_writephy(tp, MII_TG3_IMASK, ~MII_TG3_INT_LINKCHG);
3284         else if (!(tp->phy_flags & TG3_PHYFLG_IS_FET))
3285                 tg3_writephy(tp, MII_TG3_IMASK, ~0);
3286
3287         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
3288             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
3289                 if (tp->led_ctrl == LED_CTRL_MODE_PHY_1)
3290                         tg3_writephy(tp, MII_TG3_EXT_CTRL,
3291                                      MII_TG3_EXT_CTRL_LNK3_LED_MODE);
3292                 else
3293                         tg3_writephy(tp, MII_TG3_EXT_CTRL, 0);
3294         }
3295
3296         current_link_up = 0;
3297         current_speed = SPEED_INVALID;
3298         current_duplex = DUPLEX_INVALID;
3299
3300         if (tp->phy_flags & TG3_PHYFLG_CAPACITIVE_COUPLING) {
3301                 err = tg3_phy_auxctl_read(tp,
3302                                           MII_TG3_AUXCTL_SHDWSEL_MISCTEST,
3303                                           &val);
3304                 if (!err && !(val & (1 << 10))) {
3305                         tg3_phy_auxctl_write(tp,
3306                                              MII_TG3_AUXCTL_SHDWSEL_MISCTEST,
3307                                              val | (1 << 10));
3308                         goto relink;
3309                 }
3310         }
3311
3312         bmsr = 0;
3313         for (i = 0; i < 100; i++) {
3314                 tg3_readphy(tp, MII_BMSR, &bmsr);
3315                 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
3316                     (bmsr & BMSR_LSTATUS))
3317                         break;
3318                 udelay(40);
3319         }
3320
3321         if (bmsr & BMSR_LSTATUS) {
3322                 u32 aux_stat, bmcr;
3323
3324                 tg3_readphy(tp, MII_TG3_AUX_STAT, &aux_stat);
3325                 for (i = 0; i < 2000; i++) {
3326                         udelay(10);
3327                         if (!tg3_readphy(tp, MII_TG3_AUX_STAT, &aux_stat) &&
3328                             aux_stat)
3329                                 break;
3330                 }
3331
3332                 tg3_aux_stat_to_speed_duplex(tp, aux_stat,
3333                                              &current_speed,
3334                                              &current_duplex);
3335
3336                 bmcr = 0;
3337                 for (i = 0; i < 200; i++) {
3338                         tg3_readphy(tp, MII_BMCR, &bmcr);
3339                         if (tg3_readphy(tp, MII_BMCR, &bmcr))
3340                                 continue;
3341                         if (bmcr && bmcr != 0x7fff)
3342                                 break;
3343                         udelay(10);
3344                 }
3345
3346                 lcl_adv = 0;
3347                 rmt_adv = 0;
3348
3349                 tp->link_config.active_speed = current_speed;
3350                 tp->link_config.active_duplex = current_duplex;
3351
3352                 if (tp->link_config.autoneg == AUTONEG_ENABLE) {
3353                         if ((bmcr & BMCR_ANENABLE) &&
3354                             tg3_copper_is_advertising_all(tp,
3355                                                 tp->link_config.advertising)) {
3356                                 if (tg3_adv_1000T_flowctrl_ok(tp, &lcl_adv,
3357                                                                   &rmt_adv))
3358                                         current_link_up = 1;
3359                         }
3360                 } else {
3361                         if (!(bmcr & BMCR_ANENABLE) &&
3362                             tp->link_config.speed == current_speed &&
3363                             tp->link_config.duplex == current_duplex &&
3364                             tp->link_config.flowctrl ==
3365                             tp->link_config.active_flowctrl) {
3366                                 current_link_up = 1;
3367                         }
3368                 }
3369
3370                 if (current_link_up == 1 &&
3371                     tp->link_config.active_duplex == DUPLEX_FULL)
3372                         tg3_setup_flow_control(tp, lcl_adv, rmt_adv);
3373         }
3374
3375 relink:
3376         if (current_link_up == 0 || (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)) {
3377                 tg3_phy_copper_begin(tp);
3378
3379                 tg3_readphy(tp, MII_BMSR, &bmsr);
3380                 if ((!tg3_readphy(tp, MII_BMSR, &bmsr) && (bmsr & BMSR_LSTATUS)) ||
3381                     (tp->mac_mode & MAC_MODE_PORT_INT_LPBACK))
3382                         current_link_up = 1;
3383         }
3384
3385         tp->mac_mode &= ~MAC_MODE_PORT_MODE_MASK;
3386         if (current_link_up == 1) {
3387                 if (tp->link_config.active_speed == SPEED_100 ||
3388                     tp->link_config.active_speed == SPEED_10)
3389                         tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
3390                 else
3391                         tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
3392         } else if (tp->phy_flags & TG3_PHYFLG_IS_FET)
3393                 tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
3394         else
3395                 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
3396
3397         tp->mac_mode &= ~MAC_MODE_HALF_DUPLEX;
3398         if (tp->link_config.active_duplex == DUPLEX_HALF)
3399                 tp->mac_mode |= MAC_MODE_HALF_DUPLEX;
3400
3401         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700) {
3402                 if (current_link_up == 1 &&
3403                     tg3_5700_link_polarity(tp, tp->link_config.active_speed))
3404                         tp->mac_mode |= MAC_MODE_LINK_POLARITY;
3405                 else
3406                         tp->mac_mode &= ~MAC_MODE_LINK_POLARITY;
3407         }
3408
3409         /* ??? Without this setting Netgear GA302T PHY does not
3410          * ??? send/receive packets...
3411          */
3412         if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5411 &&
3413             tp->pci_chip_rev_id == CHIPREV_ID_5700_ALTIMA) {
3414                 tp->mi_mode |= MAC_MI_MODE_AUTO_POLL;
3415                 tw32_f(MAC_MI_MODE, tp->mi_mode);
3416                 udelay(80);
3417         }
3418
3419         tw32_f(MAC_MODE, tp->mac_mode);
3420         udelay(40);
3421
3422         tg3_phy_eee_adjust(tp, current_link_up);
3423
3424         if (tg3_flag(tp, USE_LINKCHG_REG)) {
3425                 /* Polled via timer. */
3426                 tw32_f(MAC_EVENT, 0);
3427         } else {
3428                 tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
3429         }
3430         udelay(40);
3431
3432         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 &&
3433             current_link_up == 1 &&
3434             tp->link_config.active_speed == SPEED_1000 &&
3435             (tg3_flag(tp, PCIX_MODE) || tg3_flag(tp, PCI_HIGH_SPEED))) {
3436                 udelay(120);
3437                 tw32_f(MAC_STATUS,
3438                      (MAC_STATUS_SYNC_CHANGED |
3439                       MAC_STATUS_CFG_CHANGED));
3440                 udelay(40);
3441                 tg3_write_mem(tp,
3442                               NIC_SRAM_FIRMWARE_MBOX,
3443                               NIC_SRAM_FIRMWARE_MBOX_MAGIC2);
3444         }
3445
3446         /* Prevent send BD corruption. */
3447         if (tg3_flag(tp, CLKREQ_BUG)) {
3448                 u16 oldlnkctl, newlnkctl;
3449
3450                 pci_read_config_word(tp->pdev,
3451                                      tp->pcie_cap + PCI_EXP_LNKCTL,
3452                                      &oldlnkctl);
3453                 if (tp->link_config.active_speed == SPEED_100 ||
3454                     tp->link_config.active_speed == SPEED_10)
3455                         newlnkctl = oldlnkctl & ~PCI_EXP_LNKCTL_CLKREQ_EN;
3456                 else
3457                         newlnkctl = oldlnkctl | PCI_EXP_LNKCTL_CLKREQ_EN;
3458                 if (newlnkctl != oldlnkctl)
3459                         pci_write_config_word(tp->pdev,
3460                                               tp->pcie_cap + PCI_EXP_LNKCTL,
3461                                               newlnkctl);
3462         }
3463
3464         if (current_link_up != netif_carrier_ok(tp->dev)) {
3465                 if (current_link_up)
3466                         netif_carrier_on(tp->dev);
3467                 else
3468                         netif_carrier_off(tp->dev);
3469                 tg3_link_report(tp);
3470         }
3471
3472         return 0;
3473 }
3474
3475 struct tg3_fiber_aneginfo {
3476         int state;
3477 #define ANEG_STATE_UNKNOWN              0
3478 #define ANEG_STATE_AN_ENABLE            1
3479 #define ANEG_STATE_RESTART_INIT         2
3480 #define ANEG_STATE_RESTART              3
3481 #define ANEG_STATE_DISABLE_LINK_OK      4
3482 #define ANEG_STATE_ABILITY_DETECT_INIT  5
3483 #define ANEG_STATE_ABILITY_DETECT       6
3484 #define ANEG_STATE_ACK_DETECT_INIT      7
3485 #define ANEG_STATE_ACK_DETECT           8
3486 #define ANEG_STATE_COMPLETE_ACK_INIT    9
3487 #define ANEG_STATE_COMPLETE_ACK         10
3488 #define ANEG_STATE_IDLE_DETECT_INIT     11
3489 #define ANEG_STATE_IDLE_DETECT          12
3490 #define ANEG_STATE_LINK_OK              13
3491 #define ANEG_STATE_NEXT_PAGE_WAIT_INIT  14
3492 #define ANEG_STATE_NEXT_PAGE_WAIT       15
3493
3494         u32 flags;
3495 #define MR_AN_ENABLE            0x00000001
3496 #define MR_RESTART_AN           0x00000002
3497 #define MR_AN_COMPLETE          0x00000004
3498 #define MR_PAGE_RX              0x00000008
3499 #define MR_NP_LOADED            0x00000010
3500 #define MR_TOGGLE_TX            0x00000020
3501 #define MR_LP_ADV_FULL_DUPLEX   0x00000040
3502 #define MR_LP_ADV_HALF_DUPLEX   0x00000080
3503 #define MR_LP_ADV_SYM_PAUSE     0x00000100
3504 #define MR_LP_ADV_ASYM_PAUSE    0x00000200
3505 #define MR_LP_ADV_REMOTE_FAULT1 0x00000400
3506 #define MR_LP_ADV_REMOTE_FAULT2 0x00000800
3507 #define MR_LP_ADV_NEXT_PAGE     0x00001000
3508 #define MR_TOGGLE_RX            0x00002000
3509 #define MR_NP_RX                0x00004000
3510
3511 #define MR_LINK_OK              0x80000000
3512
3513         unsigned long link_time, cur_time;
3514
3515         u32 ability_match_cfg;
3516         int ability_match_count;
3517
3518         char ability_match, idle_match, ack_match;
3519
3520         u32 txconfig, rxconfig;
3521 #define ANEG_CFG_NP             0x00000080
3522 #define ANEG_CFG_ACK            0x00000040
3523 #define ANEG_CFG_RF2            0x00000020
3524 #define ANEG_CFG_RF1            0x00000010
3525 #define ANEG_CFG_PS2            0x00000001
3526 #define ANEG_CFG_PS1            0x00008000
3527 #define ANEG_CFG_HD             0x00004000
3528 #define ANEG_CFG_FD             0x00002000
3529 #define ANEG_CFG_INVAL          0x00001f06
3530
3531 };
3532 #define ANEG_OK         0
3533 #define ANEG_DONE       1
3534 #define ANEG_TIMER_ENAB 2
3535 #define ANEG_FAILED     -1
3536
3537 #define ANEG_STATE_SETTLE_TIME  10000
3538
3539 static int tg3_fiber_aneg_smachine(struct tg3 *tp,
3540                                    struct tg3_fiber_aneginfo *ap)
3541 {
3542         u16 flowctrl;
3543         unsigned long delta;
3544         u32 rx_cfg_reg;
3545         int ret;
3546
3547         if (ap->state == ANEG_STATE_UNKNOWN) {
3548                 ap->rxconfig = 0;
3549                 ap->link_time = 0;
3550                 ap->cur_time = 0;
3551                 ap->ability_match_cfg = 0;
3552                 ap->ability_match_count = 0;
3553                 ap->ability_match = 0;
3554                 ap->idle_match = 0;
3555                 ap->ack_match = 0;
3556         }
3557         ap->cur_time++;
3558
3559         if (tr32(MAC_STATUS) & MAC_STATUS_RCVD_CFG) {
3560                 rx_cfg_reg = tr32(MAC_RX_AUTO_NEG);
3561
3562                 if (rx_cfg_reg != ap->ability_match_cfg) {
3563                         ap->ability_match_cfg = rx_cfg_reg;
3564                         ap->ability_match = 0;
3565                         ap->ability_match_count = 0;
3566                 } else {
3567                         if (++ap->ability_match_count > 1) {
3568                                 ap->ability_match = 1;
3569                                 ap->ability_match_cfg = rx_cfg_reg;
3570                         }
3571                 }
3572                 if (rx_cfg_reg & ANEG_CFG_ACK)
3573                         ap->ack_match = 1;
3574                 else
3575                         ap->ack_match = 0;
3576
3577                 ap->idle_match = 0;
3578         } else {
3579                 ap->idle_match = 1;
3580                 ap->ability_match_cfg = 0;
3581                 ap->ability_match_count = 0;
3582                 ap->ability_match = 0;
3583                 ap->ack_match = 0;
3584
3585                 rx_cfg_reg = 0;
3586         }
3587
3588         ap->rxconfig = rx_cfg_reg;
3589         ret = ANEG_OK;
3590
3591         switch (ap->state) {
3592         case ANEG_STATE_UNKNOWN:
3593                 if (ap->flags & (MR_AN_ENABLE | MR_RESTART_AN))
3594                         ap->state = ANEG_STATE_AN_ENABLE;
3595
3596                 /* fallthru */
3597         case ANEG_STATE_AN_ENABLE:
3598                 ap->flags &= ~(MR_AN_COMPLETE | MR_PAGE_RX);
3599                 if (ap->flags & MR_AN_ENABLE) {
3600                         ap->link_time = 0;
3601                         ap->cur_time = 0;
3602                         ap->ability_match_cfg = 0;
3603                         ap->ability_match_count = 0;
3604                         ap->ability_match = 0;
3605                         ap->idle_match = 0;
3606                         ap->ack_match = 0;
3607
3608                         ap->state = ANEG_STATE_RESTART_INIT;
3609                 } else {
3610                         ap->state = ANEG_STATE_DISABLE_LINK_OK;
3611                 }
3612                 break;
3613
3614         case ANEG_STATE_RESTART_INIT:
3615                 ap->link_time = ap->cur_time;
3616                 ap->flags &= ~(MR_NP_LOADED);
3617                 ap->txconfig = 0;
3618                 tw32(MAC_TX_AUTO_NEG, 0);
3619                 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
3620                 tw32_f(MAC_MODE, tp->mac_mode);
3621                 udelay(40);
3622
3623                 ret = ANEG_TIMER_ENAB;
3624                 ap->state = ANEG_STATE_RESTART;
3625
3626                 /* fallthru */
3627         case ANEG_STATE_RESTART:
3628                 delta = ap->cur_time - ap->link_time;
3629                 if (delta > ANEG_STATE_SETTLE_TIME)
3630                         ap->state = ANEG_STATE_ABILITY_DETECT_INIT;
3631                 else
3632                         ret = ANEG_TIMER_ENAB;
3633                 break;
3634
3635         case ANEG_STATE_DISABLE_LINK_OK:
3636                 ret = ANEG_DONE;
3637                 break;
3638
3639         case ANEG_STATE_ABILITY_DETECT_INIT:
3640                 ap->flags &= ~(MR_TOGGLE_TX);
3641                 ap->txconfig = ANEG_CFG_FD;
3642                 flowctrl = tg3_advert_flowctrl_1000X(tp->link_config.flowctrl);
3643                 if (flowctrl & ADVERTISE_1000XPAUSE)
3644                         ap->txconfig |= ANEG_CFG_PS1;
3645                 if (flowctrl & ADVERTISE_1000XPSE_ASYM)
3646                         ap->txconfig |= ANEG_CFG_PS2;
3647                 tw32(MAC_TX_AUTO_NEG, ap->txconfig);
3648                 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
3649                 tw32_f(MAC_MODE, tp->mac_mode);
3650                 udelay(40);
3651
3652                 ap->state = ANEG_STATE_ABILITY_DETECT;
3653                 break;
3654
3655         case ANEG_STATE_ABILITY_DETECT:
3656                 if (ap->ability_match != 0 && ap->rxconfig != 0)
3657                         ap->state = ANEG_STATE_ACK_DETECT_INIT;
3658                 break;
3659
3660         case ANEG_STATE_ACK_DETECT_INIT:
3661                 ap->txconfig |= ANEG_CFG_ACK;
3662                 tw32(MAC_TX_AUTO_NEG, ap->txconfig);
3663                 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
3664                 tw32_f(MAC_MODE, tp->mac_mode);
3665                 udelay(40);
3666
3667                 ap->state = ANEG_STATE_ACK_DETECT;
3668
3669                 /* fallthru */
3670         case ANEG_STATE_ACK_DETECT:
3671                 if (ap->ack_match != 0) {
3672                         if ((ap->rxconfig & ~ANEG_CFG_ACK) ==
3673                             (ap->ability_match_cfg & ~ANEG_CFG_ACK)) {
3674                                 ap->state = ANEG_STATE_COMPLETE_ACK_INIT;
3675                         } else {
3676                                 ap->state = ANEG_STATE_AN_ENABLE;
3677                         }
3678                 } else if (ap->ability_match != 0 &&
3679                            ap->rxconfig == 0) {
3680                         ap->state = ANEG_STATE_AN_ENABLE;
3681                 }
3682                 break;
3683
3684         case ANEG_STATE_COMPLETE_ACK_INIT:
3685                 if (ap->rxconfig & ANEG_CFG_INVAL) {
3686                         ret = ANEG_FAILED;
3687                         break;
3688                 }
3689                 ap->flags &= ~(MR_LP_ADV_FULL_DUPLEX |
3690                                MR_LP_ADV_HALF_DUPLEX |
3691                                MR_LP_ADV_SYM_PAUSE |
3692                                MR_LP_ADV_ASYM_PAUSE |
3693                                MR_LP_ADV_REMOTE_FAULT1 |
3694                                MR_LP_ADV_REMOTE_FAULT2 |
3695                                MR_LP_ADV_NEXT_PAGE |
3696                                MR_TOGGLE_RX |
3697                                MR_NP_RX);
3698                 if (ap->rxconfig & ANEG_CFG_FD)
3699                         ap->flags |= MR_LP_ADV_FULL_DUPLEX;
3700                 if (ap->rxconfig & ANEG_CFG_HD)
3701                         ap->flags |= MR_LP_ADV_HALF_DUPLEX;
3702                 if (ap->rxconfig & ANEG_CFG_PS1)
3703                         ap->flags |= MR_LP_ADV_SYM_PAUSE;
3704                 if (ap->rxconfig & ANEG_CFG_PS2)
3705                         ap->flags |= MR_LP_ADV_ASYM_PAUSE;
3706                 if (ap->rxconfig & ANEG_CFG_RF1)
3707                         ap->flags |= MR_LP_ADV_REMOTE_FAULT1;
3708                 if (ap->rxconfig & ANEG_CFG_RF2)
3709                         ap->flags |= MR_LP_ADV_REMOTE_FAULT2;
3710                 if (ap->rxconfig & ANEG_CFG_NP)
3711                         ap->flags |= MR_LP_ADV_NEXT_PAGE;
3712
3713                 ap->link_time = ap->cur_time;
3714
3715                 ap->flags ^= (MR_TOGGLE_TX);
3716                 if (ap->rxconfig & 0x0008)
3717                         ap->flags |= MR_TOGGLE_RX;
3718                 if (ap->rxconfig & ANEG_CFG_NP)
3719                         ap->flags |= MR_NP_RX;
3720                 ap->flags |= MR_PAGE_RX;
3721
3722                 ap->state = ANEG_STATE_COMPLETE_ACK;
3723                 ret = ANEG_TIMER_ENAB;
3724                 break;
3725
3726         case ANEG_STATE_COMPLETE_ACK:
3727                 if (ap->ability_match != 0 &&
3728                     ap->rxconfig == 0) {
3729                         ap->state = ANEG_STATE_AN_ENABLE;
3730                         break;
3731                 }
3732                 delta = ap->cur_time - ap->link_time;
3733                 if (delta > ANEG_STATE_SETTLE_TIME) {
3734                         if (!(ap->flags & (MR_LP_ADV_NEXT_PAGE))) {
3735                                 ap->state = ANEG_STATE_IDLE_DETECT_INIT;
3736                         } else {
3737                                 if ((ap->txconfig & ANEG_CFG_NP) == 0 &&
3738                                     !(ap->flags & MR_NP_RX)) {
3739                                         ap->state = ANEG_STATE_IDLE_DETECT_INIT;
3740                                 } else {
3741                                         ret = ANEG_FAILED;
3742                                 }
3743                         }
3744                 }
3745                 break;
3746
3747         case ANEG_STATE_IDLE_DETECT_INIT:
3748                 ap->link_time = ap->cur_time;
3749                 tp->mac_mode &= ~MAC_MODE_SEND_CONFIGS;
3750                 tw32_f(MAC_MODE, tp->mac_mode);
3751                 udelay(40);
3752
3753                 ap->state = ANEG_STATE_IDLE_DETECT;
3754                 ret = ANEG_TIMER_ENAB;
3755                 break;
3756
3757         case ANEG_STATE_IDLE_DETECT:
3758                 if (ap->ability_match != 0 &&
3759                     ap->rxconfig == 0) {
3760                         ap->state = ANEG_STATE_AN_ENABLE;
3761                         break;
3762                 }
3763                 delta = ap->cur_time - ap->link_time;
3764                 if (delta > ANEG_STATE_SETTLE_TIME) {
3765                         /* XXX another gem from the Broadcom driver :( */
3766                         ap->state = ANEG_STATE_LINK_OK;
3767                 }
3768                 break;
3769
3770         case ANEG_STATE_LINK_OK:
3771                 ap->flags |= (MR_AN_COMPLETE | MR_LINK_OK);
3772                 ret = ANEG_DONE;
3773                 break;
3774
3775         case ANEG_STATE_NEXT_PAGE_WAIT_INIT:
3776                 /* ??? unimplemented */
3777                 break;
3778
3779         case ANEG_STATE_NEXT_PAGE_WAIT:
3780                 /* ??? unimplemented */
3781                 break;
3782
3783         default:
3784                 ret = ANEG_FAILED;
3785                 break;
3786         }
3787
3788         return ret;
3789 }
3790
3791 static int fiber_autoneg(struct tg3 *tp, u32 *txflags, u32 *rxflags)
3792 {
3793         int res = 0;
3794         struct tg3_fiber_aneginfo aninfo;
3795         int status = ANEG_FAILED;
3796         unsigned int tick;
3797         u32 tmp;
3798
3799         tw32_f(MAC_TX_AUTO_NEG, 0);
3800
3801         tmp = tp->mac_mode & ~MAC_MODE_PORT_MODE_MASK;
3802         tw32_f(MAC_MODE, tmp | MAC_MODE_PORT_MODE_GMII);
3803         udelay(40);
3804
3805         tw32_f(MAC_MODE, tp->mac_mode | MAC_MODE_SEND_CONFIGS);
3806         udelay(40);
3807
3808         memset(&aninfo, 0, sizeof(aninfo));
3809         aninfo.flags |= MR_AN_ENABLE;
3810         aninfo.state = ANEG_STATE_UNKNOWN;
3811         aninfo.cur_time = 0;
3812         tick = 0;
3813         while (++tick < 195000) {
3814                 status = tg3_fiber_aneg_smachine(tp, &aninfo);
3815                 if (status == ANEG_DONE || status == ANEG_FAILED)
3816                         break;
3817
3818                 udelay(1);
3819         }
3820
3821         tp->mac_mode &= ~MAC_MODE_SEND_CONFIGS;
3822         tw32_f(MAC_MODE, tp->mac_mode);
3823         udelay(40);
3824
3825         *txflags = aninfo.txconfig;
3826         *rxflags = aninfo.flags;
3827
3828         if (status == ANEG_DONE &&
3829             (aninfo.flags & (MR_AN_COMPLETE | MR_LINK_OK |
3830                              MR_LP_ADV_FULL_DUPLEX)))
3831                 res = 1;
3832
3833         return res;
3834 }
3835
3836 static void tg3_init_bcm8002(struct tg3 *tp)
3837 {
3838         u32 mac_status = tr32(MAC_STATUS);
3839         int i;
3840
3841         /* Reset when initting first time or we have a link. */
3842         if (tg3_flag(tp, INIT_COMPLETE) &&
3843             !(mac_status & MAC_STATUS_PCS_SYNCED))
3844                 return;
3845
3846         /* Set PLL lock range. */
3847         tg3_writephy(tp, 0x16, 0x8007);
3848
3849         /* SW reset */
3850         tg3_writephy(tp, MII_BMCR, BMCR_RESET);
3851
3852         /* Wait for reset to complete. */
3853         /* XXX schedule_timeout() ... */
3854         for (i = 0; i < 500; i++)
3855                 udelay(10);
3856
3857         /* Config mode; select PMA/Ch 1 regs. */
3858         tg3_writephy(tp, 0x10, 0x8411);
3859
3860         /* Enable auto-lock and comdet, select txclk for tx. */
3861         tg3_writephy(tp, 0x11, 0x0a10);
3862
3863         tg3_writephy(tp, 0x18, 0x00a0);
3864         tg3_writephy(tp, 0x16, 0x41ff);
3865
3866         /* Assert and deassert POR. */
3867         tg3_writephy(tp, 0x13, 0x0400);
3868         udelay(40);
3869         tg3_writephy(tp, 0x13, 0x0000);
3870
3871         tg3_writephy(tp, 0x11, 0x0a50);
3872         udelay(40);
3873         tg3_writephy(tp, 0x11, 0x0a10);
3874
3875         /* Wait for signal to stabilize */
3876         /* XXX schedule_timeout() ... */
3877         for (i = 0; i < 15000; i++)
3878                 udelay(10);
3879
3880         /* Deselect the channel register so we can read the PHYID
3881          * later.
3882          */
3883         tg3_writephy(tp, 0x10, 0x8011);
3884 }
3885
3886 static int tg3_setup_fiber_hw_autoneg(struct tg3 *tp, u32 mac_status)
3887 {
3888         u16 flowctrl;
3889         u32 sg_dig_ctrl, sg_dig_status;
3890         u32 serdes_cfg, expected_sg_dig_ctrl;
3891         int workaround, port_a;
3892         int current_link_up;
3893
3894         serdes_cfg = 0;
3895         expected_sg_dig_ctrl = 0;
3896         workaround = 0;
3897         port_a = 1;
3898         current_link_up = 0;
3899
3900         if (tp->pci_chip_rev_id != CHIPREV_ID_5704_A0 &&
3901             tp->pci_chip_rev_id != CHIPREV_ID_5704_A1) {
3902                 workaround = 1;
3903                 if (tr32(TG3PCI_DUAL_MAC_CTRL) & DUAL_MAC_CTRL_ID)
3904                         port_a = 0;
3905
3906                 /* preserve bits 0-11,13,14 for signal pre-emphasis */
3907                 /* preserve bits 20-23 for voltage regulator */
3908                 serdes_cfg = tr32(MAC_SERDES_CFG) & 0x00f06fff;
3909         }
3910
3911         sg_dig_ctrl = tr32(SG_DIG_CTRL);
3912
3913         if (tp->link_config.autoneg != AUTONEG_ENABLE) {
3914                 if (sg_dig_ctrl & SG_DIG_USING_HW_AUTONEG) {
3915                         if (workaround) {
3916                                 u32 val = serdes_cfg;
3917
3918                                 if (port_a)
3919                                         val |= 0xc010000;
3920                                 else
3921                                         val |= 0x4010000;
3922                                 tw32_f(MAC_SERDES_CFG, val);
3923                         }
3924
3925                         tw32_f(SG_DIG_CTRL, SG_DIG_COMMON_SETUP);
3926                 }
3927                 if (mac_status & MAC_STATUS_PCS_SYNCED) {
3928                         tg3_setup_flow_control(tp, 0, 0);
3929                         current_link_up = 1;
3930                 }
3931                 goto out;
3932         }
3933
3934         /* Want auto-negotiation.  */
3935         expected_sg_dig_ctrl = SG_DIG_USING_HW_AUTONEG | SG_DIG_COMMON_SETUP;
3936
3937         flowctrl = tg3_advert_flowctrl_1000X(tp->link_config.flowctrl);
3938         if (flowctrl & ADVERTISE_1000XPAUSE)
3939                 expected_sg_dig_ctrl |= SG_DIG_PAUSE_CAP;
3940         if (flowctrl & ADVERTISE_1000XPSE_ASYM)
3941                 expected_sg_dig_ctrl |= SG_DIG_ASYM_PAUSE;
3942
3943         if (sg_dig_ctrl != expected_sg_dig_ctrl) {
3944                 if ((tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT) &&
3945                     tp->serdes_counter &&
3946                     ((mac_status & (MAC_STATUS_PCS_SYNCED |
3947                                     MAC_STATUS_RCVD_CFG)) ==
3948                      MAC_STATUS_PCS_SYNCED)) {
3949                         tp->serdes_counter--;
3950                         current_link_up = 1;
3951                         goto out;
3952                 }
3953 restart_autoneg:
3954                 if (workaround)
3955                         tw32_f(MAC_SERDES_CFG, serdes_cfg | 0xc011000);
3956                 tw32_f(SG_DIG_CTRL, expected_sg_dig_ctrl | SG_DIG_SOFT_RESET);
3957                 udelay(5);
3958                 tw32_f(SG_DIG_CTRL, expected_sg_dig_ctrl);
3959
3960                 tp->serdes_counter = SERDES_AN_TIMEOUT_5704S;
3961                 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
3962         } else if (mac_status & (MAC_STATUS_PCS_SYNCED |
3963                                  MAC_STATUS_SIGNAL_DET)) {
3964                 sg_dig_status = tr32(SG_DIG_STATUS);
3965                 mac_status = tr32(MAC_STATUS);
3966
3967                 if ((sg_dig_status & SG_DIG_AUTONEG_COMPLETE) &&
3968                     (mac_status & MAC_STATUS_PCS_SYNCED)) {
3969                         u32 local_adv = 0, remote_adv = 0;
3970
3971                         if (sg_dig_ctrl & SG_DIG_PAUSE_CAP)
3972                                 local_adv |= ADVERTISE_1000XPAUSE;
3973                         if (sg_dig_ctrl & SG_DIG_ASYM_PAUSE)
3974                                 local_adv |= ADVERTISE_1000XPSE_ASYM;
3975
3976                         if (sg_dig_status & SG_DIG_PARTNER_PAUSE_CAPABLE)
3977                                 remote_adv |= LPA_1000XPAUSE;
3978                         if (sg_dig_status & SG_DIG_PARTNER_ASYM_PAUSE)
3979                                 remote_adv |= LPA_1000XPAUSE_ASYM;
3980
3981                         tg3_setup_flow_control(tp, local_adv, remote_adv);
3982                         current_link_up = 1;
3983                         tp->serdes_counter = 0;
3984                         tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
3985                 } else if (!(sg_dig_status & SG_DIG_AUTONEG_COMPLETE)) {
3986                         if (tp->serdes_counter)
3987                                 tp->serdes_counter--;
3988                         else {
3989                                 if (workaround) {
3990                                         u32 val = serdes_cfg;
3991
3992                                         if (port_a)
3993                                                 val |= 0xc010000;
3994                                         else
3995                                                 val |= 0x4010000;
3996
3997                                         tw32_f(MAC_SERDES_CFG, val);
3998                                 }
3999
4000                                 tw32_f(SG_DIG_CTRL, SG_DIG_COMMON_SETUP);
4001                                 udelay(40);
4002
4003                                 /* Link parallel detection - link is up */
4004                                 /* only if we have PCS_SYNC and not */
4005                                 /* receiving config code words */
4006                                 mac_status = tr32(MAC_STATUS);
4007                                 if ((mac_status & MAC_STATUS_PCS_SYNCED) &&
4008                                     !(mac_status & MAC_STATUS_RCVD_CFG)) {
4009                                         tg3_setup_flow_control(tp, 0, 0);
4010                                         current_link_up = 1;
4011                                         tp->phy_flags |=
4012                                                 TG3_PHYFLG_PARALLEL_DETECT;
4013                                         tp->serdes_counter =
4014                                                 SERDES_PARALLEL_DET_TIMEOUT;
4015                                 } else
4016                                         goto restart_autoneg;
4017                         }
4018                 }
4019         } else {
4020                 tp->serdes_counter = SERDES_AN_TIMEOUT_5704S;
4021                 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
4022         }
4023
4024 out:
4025         return current_link_up;
4026 }
4027
4028 static int tg3_setup_fiber_by_hand(struct tg3 *tp, u32 mac_status)
4029 {
4030         int current_link_up = 0;
4031
4032         if (!(mac_status & MAC_STATUS_PCS_SYNCED))
4033                 goto out;
4034
4035         if (tp->link_config.autoneg == AUTONEG_ENABLE) {
4036                 u32 txflags, rxflags;
4037                 int i;
4038
4039                 if (fiber_autoneg(tp, &txflags, &rxflags)) {
4040                         u32 local_adv = 0, remote_adv = 0;
4041
4042                         if (txflags & ANEG_CFG_PS1)
4043                                 local_adv |= ADVERTISE_1000XPAUSE;
4044                         if (txflags & ANEG_CFG_PS2)
4045                                 local_adv |= ADVERTISE_1000XPSE_ASYM;
4046
4047                         if (rxflags & MR_LP_ADV_SYM_PAUSE)
4048                                 remote_adv |= LPA_1000XPAUSE;
4049                         if (rxflags & MR_LP_ADV_ASYM_PAUSE)
4050                                 remote_adv |= LPA_1000XPAUSE_ASYM;
4051
4052                         tg3_setup_flow_control(tp, local_adv, remote_adv);
4053
4054                         current_link_up = 1;
4055                 }
4056                 for (i = 0; i < 30; i++) {
4057                         udelay(20);
4058                         tw32_f(MAC_STATUS,
4059                                (MAC_STATUS_SYNC_CHANGED |
4060                                 MAC_STATUS_CFG_CHANGED));
4061                         udelay(40);
4062                         if ((tr32(MAC_STATUS) &
4063                              (MAC_STATUS_SYNC_CHANGED |
4064                               MAC_STATUS_CFG_CHANGED)) == 0)
4065                                 break;
4066                 }
4067
4068                 mac_status = tr32(MAC_STATUS);
4069                 if (current_link_up == 0 &&
4070                     (mac_status & MAC_STATUS_PCS_SYNCED) &&
4071                     !(mac_status & MAC_STATUS_RCVD_CFG))
4072                         current_link_up = 1;
4073         } else {
4074                 tg3_setup_flow_control(tp, 0, 0);
4075
4076                 /* Forcing 1000FD link up. */
4077                 current_link_up = 1;
4078
4079                 tw32_f(MAC_MODE, (tp->mac_mode | MAC_MODE_SEND_CONFIGS));
4080                 udelay(40);
4081
4082                 tw32_f(MAC_MODE, tp->mac_mode);
4083                 udelay(40);
4084         }
4085
4086 out:
4087         return current_link_up;
4088 }
4089
4090 static int tg3_setup_fiber_phy(struct tg3 *tp, int force_reset)
4091 {
4092         u32 orig_pause_cfg;
4093         u16 orig_active_speed;
4094         u8 orig_active_duplex;
4095         u32 mac_status;
4096         int current_link_up;
4097         int i;
4098
4099         orig_pause_cfg = tp->link_config.active_flowctrl;
4100         orig_active_speed = tp->link_config.active_speed;
4101         orig_active_duplex = tp->link_config.active_duplex;
4102
4103         if (!tg3_flag(tp, HW_AUTONEG) &&
4104             netif_carrier_ok(tp->dev) &&
4105             tg3_flag(tp, INIT_COMPLETE)) {
4106                 mac_status = tr32(MAC_STATUS);
4107                 mac_status &= (MAC_STATUS_PCS_SYNCED |
4108                                MAC_STATUS_SIGNAL_DET |
4109                                MAC_STATUS_CFG_CHANGED |
4110                                MAC_STATUS_RCVD_CFG);
4111                 if (mac_status == (MAC_STATUS_PCS_SYNCED |
4112                                    MAC_STATUS_SIGNAL_DET)) {
4113                         tw32_f(MAC_STATUS, (MAC_STATUS_SYNC_CHANGED |
4114                                             MAC_STATUS_CFG_CHANGED));
4115                         return 0;
4116                 }
4117         }
4118
4119         tw32_f(MAC_TX_AUTO_NEG, 0);
4120
4121         tp->mac_mode &= ~(MAC_MODE_PORT_MODE_MASK | MAC_MODE_HALF_DUPLEX);
4122         tp->mac_mode |= MAC_MODE_PORT_MODE_TBI;
4123         tw32_f(MAC_MODE, tp->mac_mode);
4124         udelay(40);
4125
4126         if (tp->phy_id == TG3_PHY_ID_BCM8002)
4127                 tg3_init_bcm8002(tp);
4128
4129         /* Enable link change event even when serdes polling.  */
4130         tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
4131         udelay(40);
4132
4133         current_link_up = 0;
4134         mac_status = tr32(MAC_STATUS);
4135
4136         if (tg3_flag(tp, HW_AUTONEG))
4137                 current_link_up = tg3_setup_fiber_hw_autoneg(tp, mac_status);
4138         else
4139                 current_link_up = tg3_setup_fiber_by_hand(tp, mac_status);
4140
4141         tp->napi[0].hw_status->status =
4142                 (SD_STATUS_UPDATED |
4143                  (tp->napi[0].hw_status->status & ~SD_STATUS_LINK_CHG));
4144
4145         for (i = 0; i < 100; i++) {
4146                 tw32_f(MAC_STATUS, (MAC_STATUS_SYNC_CHANGED |
4147                                     MAC_STATUS_CFG_CHANGED));
4148                 udelay(5);
4149                 if ((tr32(MAC_STATUS) & (MAC_STATUS_SYNC_CHANGED |
4150                                          MAC_STATUS_CFG_CHANGED |
4151                                          MAC_STATUS_LNKSTATE_CHANGED)) == 0)
4152                         break;
4153         }
4154
4155         mac_status = tr32(MAC_STATUS);
4156         if ((mac_status & MAC_STATUS_PCS_SYNCED) == 0) {
4157                 current_link_up = 0;
4158                 if (tp->link_config.autoneg == AUTONEG_ENABLE &&
4159                     tp->serdes_counter == 0) {
4160                         tw32_f(MAC_MODE, (tp->mac_mode |
4161                                           MAC_MODE_SEND_CONFIGS));
4162                         udelay(1);
4163                         tw32_f(MAC_MODE, tp->mac_mode);
4164                 }
4165         }
4166
4167         if (current_link_up == 1) {
4168                 tp->link_config.active_speed = SPEED_1000;
4169                 tp->link_config.active_duplex = DUPLEX_FULL;
4170                 tw32(MAC_LED_CTRL, (tp->led_ctrl |
4171                                     LED_CTRL_LNKLED_OVERRIDE |
4172                                     LED_CTRL_1000MBPS_ON));
4173         } else {
4174                 tp->link_config.active_speed = SPEED_INVALID;
4175                 tp->link_config.active_duplex = DUPLEX_INVALID;
4176                 tw32(MAC_LED_CTRL, (tp->led_ctrl |
4177                                     LED_CTRL_LNKLED_OVERRIDE |
4178                                     LED_CTRL_TRAFFIC_OVERRIDE));
4179         }
4180
4181         if (current_link_up != netif_carrier_ok(tp->dev)) {
4182                 if (current_link_up)
4183                         netif_carrier_on(tp->dev);
4184                 else
4185                         netif_carrier_off(tp->dev);
4186                 tg3_link_report(tp);
4187         } else {
4188                 u32 now_pause_cfg = tp->link_config.active_flowctrl;
4189                 if (orig_pause_cfg != now_pause_cfg ||
4190                     orig_active_speed != tp->link_config.active_speed ||
4191                     orig_active_duplex != tp->link_config.active_duplex)
4192                         tg3_link_report(tp);
4193         }
4194
4195         return 0;
4196 }
4197
4198 static int tg3_setup_fiber_mii_phy(struct tg3 *tp, int force_reset)
4199 {
4200         int current_link_up, err = 0;
4201         u32 bmsr, bmcr;
4202         u16 current_speed;
4203         u8 current_duplex;
4204         u32 local_adv, remote_adv;
4205
4206         tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
4207         tw32_f(MAC_MODE, tp->mac_mode);
4208         udelay(40);
4209
4210         tw32(MAC_EVENT, 0);
4211
4212         tw32_f(MAC_STATUS,
4213              (MAC_STATUS_SYNC_CHANGED |
4214               MAC_STATUS_CFG_CHANGED |
4215               MAC_STATUS_MI_COMPLETION |
4216               MAC_STATUS_LNKSTATE_CHANGED));
4217         udelay(40);
4218
4219         if (force_reset)
4220                 tg3_phy_reset(tp);
4221
4222         current_link_up = 0;
4223         current_speed = SPEED_INVALID;
4224         current_duplex = DUPLEX_INVALID;
4225
4226         err |= tg3_readphy(tp, MII_BMSR, &bmsr);
4227         err |= tg3_readphy(tp, MII_BMSR, &bmsr);
4228         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) {
4229                 if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
4230                         bmsr |= BMSR_LSTATUS;
4231                 else
4232                         bmsr &= ~BMSR_LSTATUS;
4233         }
4234
4235         err |= tg3_readphy(tp, MII_BMCR, &bmcr);
4236
4237         if ((tp->link_config.autoneg == AUTONEG_ENABLE) && !force_reset &&
4238             (tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT)) {
4239                 /* do nothing, just check for link up at the end */
4240         } else if (tp->link_config.autoneg == AUTONEG_ENABLE) {
4241                 u32 adv, new_adv;
4242
4243                 err |= tg3_readphy(tp, MII_ADVERTISE, &adv);
4244                 new_adv = adv & ~(ADVERTISE_1000XFULL | ADVERTISE_1000XHALF |
4245                                   ADVERTISE_1000XPAUSE |
4246                                   ADVERTISE_1000XPSE_ASYM |
4247                                   ADVERTISE_SLCT);
4248
4249                 new_adv |= tg3_advert_flowctrl_1000X(tp->link_config.flowctrl);
4250
4251                 if (tp->link_config.advertising & ADVERTISED_1000baseT_Half)
4252                         new_adv |= ADVERTISE_1000XHALF;
4253                 if (tp->link_config.advertising & ADVERTISED_1000baseT_Full)
4254                         new_adv |= ADVERTISE_1000XFULL;
4255
4256                 if ((new_adv != adv) || !(bmcr & BMCR_ANENABLE)) {
4257                         tg3_writephy(tp, MII_ADVERTISE, new_adv);
4258                         bmcr |= BMCR_ANENABLE | BMCR_ANRESTART;
4259                         tg3_writephy(tp, MII_BMCR, bmcr);
4260
4261                         tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
4262                         tp->serdes_counter = SERDES_AN_TIMEOUT_5714S;
4263                         tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
4264
4265                         return err;
4266                 }
4267         } else {
4268                 u32 new_bmcr;
4269
4270                 bmcr &= ~BMCR_SPEED1000;
4271                 new_bmcr = bmcr & ~(BMCR_ANENABLE | BMCR_FULLDPLX);
4272
4273                 if (tp->link_config.duplex == DUPLEX_FULL)
4274                         new_bmcr |= BMCR_FULLDPLX;
4275
4276                 if (new_bmcr != bmcr) {
4277                         /* BMCR_SPEED1000 is a reserved bit that needs
4278                          * to be set on write.
4279                          */
4280                         new_bmcr |= BMCR_SPEED1000;
4281
4282                         /* Force a linkdown */
4283                         if (netif_carrier_ok(tp->dev)) {
4284                                 u32 adv;
4285
4286                                 err |= tg3_readphy(tp, MII_ADVERTISE, &adv);
4287                                 adv &= ~(ADVERTISE_1000XFULL |
4288                                          ADVERTISE_1000XHALF |
4289                                          ADVERTISE_SLCT);
4290                                 tg3_writephy(tp, MII_ADVERTISE, adv);
4291                                 tg3_writephy(tp, MII_BMCR, bmcr |
4292                                                            BMCR_ANRESTART |
4293                                                            BMCR_ANENABLE);
4294                                 udelay(10);
4295                                 netif_carrier_off(tp->dev);
4296                         }
4297                         tg3_writephy(tp, MII_BMCR, new_bmcr);
4298                         bmcr = new_bmcr;
4299                         err |= tg3_readphy(tp, MII_BMSR, &bmsr);
4300                         err |= tg3_readphy(tp, MII_BMSR, &bmsr);
4301                         if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
4302                             ASIC_REV_5714) {
4303                                 if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
4304                                         bmsr |= BMSR_LSTATUS;
4305                                 else
4306                                         bmsr &= ~BMSR_LSTATUS;
4307                         }
4308                         tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
4309                 }
4310         }
4311
4312         if (bmsr & BMSR_LSTATUS) {
4313                 current_speed = SPEED_1000;
4314                 current_link_up = 1;
4315                 if (bmcr & BMCR_FULLDPLX)
4316                         current_duplex = DUPLEX_FULL;
4317                 else
4318                         current_duplex = DUPLEX_HALF;
4319
4320                 local_adv = 0;
4321                 remote_adv = 0;
4322
4323                 if (bmcr & BMCR_ANENABLE) {
4324                         u32 common;
4325
4326                         err |= tg3_readphy(tp, MII_ADVERTISE, &local_adv);
4327                         err |= tg3_readphy(tp, MII_LPA, &remote_adv);
4328                         common = local_adv & remote_adv;
4329                         if (common & (ADVERTISE_1000XHALF |
4330                                       ADVERTISE_1000XFULL)) {
4331                                 if (common & ADVERTISE_1000XFULL)
4332                                         current_duplex = DUPLEX_FULL;
4333                                 else
4334                                         current_duplex = DUPLEX_HALF;
4335                         } else if (!tg3_flag(tp, 5780_CLASS)) {
4336                                 /* Link is up via parallel detect */
4337                         } else {
4338                                 current_link_up = 0;
4339                         }
4340                 }
4341         }
4342
4343         if (current_link_up == 1 && current_duplex == DUPLEX_FULL)
4344                 tg3_setup_flow_control(tp, local_adv, remote_adv);
4345
4346         tp->mac_mode &= ~MAC_MODE_HALF_DUPLEX;
4347         if (tp->link_config.active_duplex == DUPLEX_HALF)
4348                 tp->mac_mode |= MAC_MODE_HALF_DUPLEX;
4349
4350         tw32_f(MAC_MODE, tp->mac_mode);
4351         udelay(40);
4352
4353         tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
4354
4355         tp->link_config.active_speed = current_speed;
4356         tp->link_config.active_duplex = current_duplex;
4357
4358         if (current_link_up != netif_carrier_ok(tp->dev)) {
4359                 if (current_link_up)
4360                         netif_carrier_on(tp->dev);
4361                 else {
4362                         netif_carrier_off(tp->dev);
4363                         tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
4364                 }
4365                 tg3_link_report(tp);
4366         }
4367         return err;
4368 }
4369
4370 static void tg3_serdes_parallel_detect(struct tg3 *tp)
4371 {
4372         if (tp->serdes_counter) {
4373                 /* Give autoneg time to complete. */
4374                 tp->serdes_counter--;
4375                 return;
4376         }
4377
4378         if (!netif_carrier_ok(tp->dev) &&
4379             (tp->link_config.autoneg == AUTONEG_ENABLE)) {
4380                 u32 bmcr;
4381
4382                 tg3_readphy(tp, MII_BMCR, &bmcr);
4383                 if (bmcr & BMCR_ANENABLE) {
4384                         u32 phy1, phy2;
4385
4386                         /* Select shadow register 0x1f */
4387                         tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x7c00);
4388                         tg3_readphy(tp, MII_TG3_MISC_SHDW, &phy1);
4389
4390                         /* Select expansion interrupt status register */
4391                         tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
4392                                          MII_TG3_DSP_EXP1_INT_STAT);
4393                         tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &phy2);
4394                         tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &phy2);
4395
4396                         if ((phy1 & 0x10) && !(phy2 & 0x20)) {
4397                                 /* We have signal detect and not receiving
4398                                  * config code words, link is up by parallel
4399                                  * detection.
4400                                  */
4401
4402                                 bmcr &= ~BMCR_ANENABLE;
4403                                 bmcr |= BMCR_SPEED1000 | BMCR_FULLDPLX;
4404                                 tg3_writephy(tp, MII_BMCR, bmcr);
4405                                 tp->phy_flags |= TG3_PHYFLG_PARALLEL_DETECT;
4406                         }
4407                 }
4408         } else if (netif_carrier_ok(tp->dev) &&
4409                    (tp->link_config.autoneg == AUTONEG_ENABLE) &&
4410                    (tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT)) {
4411                 u32 phy2;
4412
4413                 /* Select expansion interrupt status register */
4414                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
4415                                  MII_TG3_DSP_EXP1_INT_STAT);
4416                 tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &phy2);
4417                 if (phy2 & 0x20) {
4418                         u32 bmcr;
4419
4420                         /* Config code words received, turn on autoneg. */
4421                         tg3_readphy(tp, MII_BMCR, &bmcr);
4422                         tg3_writephy(tp, MII_BMCR, bmcr | BMCR_ANENABLE);
4423
4424                         tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
4425
4426                 }
4427         }
4428 }
4429
4430 static int tg3_setup_phy(struct tg3 *tp, int force_reset)
4431 {
4432         u32 val;
4433         int err;
4434
4435         if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
4436                 err = tg3_setup_fiber_phy(tp, force_reset);
4437         else if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
4438                 err = tg3_setup_fiber_mii_phy(tp, force_reset);
4439         else
4440                 err = tg3_setup_copper_phy(tp, force_reset);
4441
4442         if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5784_AX) {
4443                 u32 scale;
4444
4445                 val = tr32(TG3_CPMU_CLCK_STAT) & CPMU_CLCK_STAT_MAC_CLCK_MASK;
4446                 if (val == CPMU_CLCK_STAT_MAC_CLCK_62_5)
4447                         scale = 65;
4448                 else if (val == CPMU_CLCK_STAT_MAC_CLCK_6_25)
4449                         scale = 6;
4450                 else
4451                         scale = 12;
4452
4453                 val = tr32(GRC_MISC_CFG) & ~GRC_MISC_CFG_PRESCALAR_MASK;
4454                 val |= (scale << GRC_MISC_CFG_PRESCALAR_SHIFT);
4455                 tw32(GRC_MISC_CFG, val);
4456         }
4457
4458         val = (2 << TX_LENGTHS_IPG_CRS_SHIFT) |
4459               (6 << TX_LENGTHS_IPG_SHIFT);
4460         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720)
4461                 val |= tr32(MAC_TX_LENGTHS) &
4462                        (TX_LENGTHS_JMB_FRM_LEN_MSK |
4463                         TX_LENGTHS_CNT_DWN_VAL_MSK);
4464
4465         if (tp->link_config.active_speed == SPEED_1000 &&
4466             tp->link_config.active_duplex == DUPLEX_HALF)
4467                 tw32(MAC_TX_LENGTHS, val |
4468                      (0xff << TX_LENGTHS_SLOT_TIME_SHIFT));
4469         else
4470                 tw32(MAC_TX_LENGTHS, val |
4471                      (32 << TX_LENGTHS_SLOT_TIME_SHIFT));
4472
4473         if (!tg3_flag(tp, 5705_PLUS)) {
4474                 if (netif_carrier_ok(tp->dev)) {
4475                         tw32(HOSTCC_STAT_COAL_TICKS,
4476                              tp->coal.stats_block_coalesce_usecs);
4477                 } else {
4478                         tw32(HOSTCC_STAT_COAL_TICKS, 0);
4479                 }
4480         }
4481
4482         if (tg3_flag(tp, ASPM_WORKAROUND)) {
4483                 val = tr32(PCIE_PWR_MGMT_THRESH);
4484                 if (!netif_carrier_ok(tp->dev))
4485                         val = (val & ~PCIE_PWR_MGMT_L1_THRESH_MSK) |
4486                               tp->pwrmgmt_thresh;
4487                 else
4488                         val |= PCIE_PWR_MGMT_L1_THRESH_MSK;
4489                 tw32(PCIE_PWR_MGMT_THRESH, val);
4490         }
4491
4492         return err;
4493 }
4494
4495 static inline int tg3_irq_sync(struct tg3 *tp)
4496 {
4497         return tp->irq_sync;
4498 }
4499
4500 static inline void tg3_rd32_loop(struct tg3 *tp, u32 *dst, u32 off, u32 len)
4501 {
4502         int i;
4503
4504         dst = (u32 *)((u8 *)dst + off);
4505         for (i = 0; i < len; i += sizeof(u32))
4506                 *dst++ = tr32(off + i);
4507 }
4508
4509 static void tg3_dump_legacy_regs(struct tg3 *tp, u32 *regs)
4510 {
4511         tg3_rd32_loop(tp, regs, TG3PCI_VENDOR, 0xb0);
4512         tg3_rd32_loop(tp, regs, MAILBOX_INTERRUPT_0, 0x200);
4513         tg3_rd32_loop(tp, regs, MAC_MODE, 0x4f0);
4514         tg3_rd32_loop(tp, regs, SNDDATAI_MODE, 0xe0);
4515         tg3_rd32_loop(tp, regs, SNDDATAC_MODE, 0x04);
4516         tg3_rd32_loop(tp, regs, SNDBDS_MODE, 0x80);
4517         tg3_rd32_loop(tp, regs, SNDBDI_MODE, 0x48);
4518         tg3_rd32_loop(tp, regs, SNDBDC_MODE, 0x04);
4519         tg3_rd32_loop(tp, regs, RCVLPC_MODE, 0x20);
4520         tg3_rd32_loop(tp, regs, RCVLPC_SELLST_BASE, 0x15c);
4521         tg3_rd32_loop(tp, regs, RCVDBDI_MODE, 0x0c);
4522         tg3_rd32_loop(tp, regs, RCVDBDI_JUMBO_BD, 0x3c);
4523         tg3_rd32_loop(tp, regs, RCVDBDI_BD_PROD_IDX_0, 0x44);
4524         tg3_rd32_loop(tp, regs, RCVDCC_MODE, 0x04);
4525         tg3_rd32_loop(tp, regs, RCVBDI_MODE, 0x20);
4526         tg3_rd32_loop(tp, regs, RCVCC_MODE, 0x14);
4527         tg3_rd32_loop(tp, regs, RCVLSC_MODE, 0x08);
4528         tg3_rd32_loop(tp, regs, MBFREE_MODE, 0x08);
4529         tg3_rd32_loop(tp, regs, HOSTCC_MODE, 0x100);
4530
4531         if (tg3_flag(tp, SUPPORT_MSIX))
4532                 tg3_rd32_loop(tp, regs, HOSTCC_RXCOL_TICKS_VEC1, 0x180);
4533
4534         tg3_rd32_loop(tp, regs, MEMARB_MODE, 0x10);
4535         tg3_rd32_loop(tp, regs, BUFMGR_MODE, 0x58);
4536         tg3_rd32_loop(tp, regs, RDMAC_MODE, 0x08);
4537         tg3_rd32_loop(tp, regs, WDMAC_MODE, 0x08);
4538         tg3_rd32_loop(tp, regs, RX_CPU_MODE, 0x04);
4539         tg3_rd32_loop(tp, regs, RX_CPU_STATE, 0x04);
4540         tg3_rd32_loop(tp, regs, RX_CPU_PGMCTR, 0x04);
4541         tg3_rd32_loop(tp, regs, RX_CPU_HWBKPT, 0x04);
4542
4543         if (!tg3_flag(tp, 5705_PLUS)) {
4544                 tg3_rd32_loop(tp, regs, TX_CPU_MODE, 0x04);
4545                 tg3_rd32_loop(tp, regs, TX_CPU_STATE, 0x04);
4546                 tg3_rd32_loop(tp, regs, TX_CPU_PGMCTR, 0x04);
4547         }
4548
4549         tg3_rd32_loop(tp, regs, GRCMBOX_INTERRUPT_0, 0x110);
4550         tg3_rd32_loop(tp, regs, FTQ_RESET, 0x120);
4551         tg3_rd32_loop(tp, regs, MSGINT_MODE, 0x0c);
4552         tg3_rd32_loop(tp, regs, DMAC_MODE, 0x04);
4553         tg3_rd32_loop(tp, regs, GRC_MODE, 0x4c);
4554
4555         if (tg3_flag(tp, NVRAM))
4556                 tg3_rd32_loop(tp, regs, NVRAM_CMD, 0x24);
4557 }
4558
4559 static void tg3_dump_state(struct tg3 *tp)
4560 {
4561         int i;
4562         u32 *regs;
4563
4564         regs = kzalloc(TG3_REG_BLK_SIZE, GFP_ATOMIC);
4565         if (!regs) {
4566                 netdev_err(tp->dev, "Failed allocating register dump buffer\n");
4567                 return;
4568         }
4569
4570         if (tg3_flag(tp, PCI_EXPRESS)) {
4571                 /* Read up to but not including private PCI registers */
4572                 for (i = 0; i < TG3_PCIE_TLDLPL_PORT; i += sizeof(u32))
4573                         regs[i / sizeof(u32)] = tr32(i);
4574         } else
4575                 tg3_dump_legacy_regs(tp, regs);
4576
4577         for (i = 0; i < TG3_REG_BLK_SIZE / sizeof(u32); i += 4) {
4578                 if (!regs[i + 0] && !regs[i + 1] &&
4579                     !regs[i + 2] && !regs[i + 3])
4580                         continue;
4581
4582                 netdev_err(tp->dev, "0x%08x: 0x%08x, 0x%08x, 0x%08x, 0x%08x\n",
4583                            i * 4,
4584                            regs[i + 0], regs[i + 1], regs[i + 2], regs[i + 3]);
4585         }
4586
4587         kfree(regs);
4588
4589         for (i = 0; i < tp->irq_cnt; i++) {
4590                 struct tg3_napi *tnapi = &tp->napi[i];
4591
4592                 /* SW status block */
4593                 netdev_err(tp->dev,
4594                          "%d: Host status block [%08x:%08x:(%04x:%04x:%04x):(%04x:%04x)]\n",
4595                            i,
4596                            tnapi->hw_status->status,
4597                            tnapi->hw_status->status_tag,
4598                            tnapi->hw_status->rx_jumbo_consumer,
4599                            tnapi->hw_status->rx_consumer,
4600                            tnapi->hw_status->rx_mini_consumer,
4601                            tnapi->hw_status->idx[0].rx_producer,
4602                            tnapi->hw_status->idx[0].tx_consumer);
4603
4604                 netdev_err(tp->dev,
4605                 "%d: NAPI info [%08x:%08x:(%04x:%04x:%04x):%04x:(%04x:%04x:%04x:%04x)]\n",
4606                            i,
4607                            tnapi->last_tag, tnapi->last_irq_tag,
4608                            tnapi->tx_prod, tnapi->tx_cons, tnapi->tx_pending,
4609                            tnapi->rx_rcb_ptr,
4610                            tnapi->prodring.rx_std_prod_idx,
4611                            tnapi->prodring.rx_std_cons_idx,
4612                            tnapi->prodring.rx_jmb_prod_idx,
4613                            tnapi->prodring.rx_jmb_cons_idx);
4614         }
4615 }
4616
4617 /* This is called whenever we suspect that the system chipset is re-
4618  * ordering the sequence of MMIO to the tx send mailbox. The symptom
4619  * is bogus tx completions. We try to recover by setting the
4620  * TG3_FLAG_MBOX_WRITE_REORDER flag and resetting the chip later
4621  * in the workqueue.
4622  */
4623 static void tg3_tx_recover(struct tg3 *tp)
4624 {
4625         BUG_ON(tg3_flag(tp, MBOX_WRITE_REORDER) ||
4626                tp->write32_tx_mbox == tg3_write_indirect_mbox);
4627
4628         netdev_warn(tp->dev,
4629                     "The system may be re-ordering memory-mapped I/O "
4630                     "cycles to the network device, attempting to recover. "
4631                     "Please report the problem to the driver maintainer "
4632                     "and include system chipset information.\n");
4633
4634         spin_lock(&tp->lock);
4635         tg3_flag_set(tp, TX_RECOVERY_PENDING);
4636         spin_unlock(&tp->lock);
4637 }
4638
4639 static inline u32 tg3_tx_avail(struct tg3_napi *tnapi)
4640 {
4641         /* Tell compiler to fetch tx indices from memory. */
4642         barrier();
4643         return tnapi->tx_pending -
4644                ((tnapi->tx_prod - tnapi->tx_cons) & (TG3_TX_RING_SIZE - 1));
4645 }
4646
4647 /* Tigon3 never reports partial packet sends.  So we do not
4648  * need special logic to handle SKBs that have not had all
4649  * of their frags sent yet, like SunGEM does.
4650  */
4651 static void tg3_tx(struct tg3_napi *tnapi)
4652 {
4653         struct tg3 *tp = tnapi->tp;
4654         u32 hw_idx = tnapi->hw_status->idx[0].tx_consumer;
4655         u32 sw_idx = tnapi->tx_cons;
4656         struct netdev_queue *txq;
4657         int index = tnapi - tp->napi;
4658
4659         if (tg3_flag(tp, ENABLE_TSS))
4660                 index--;
4661
4662         txq = netdev_get_tx_queue(tp->dev, index);
4663
4664         while (sw_idx != hw_idx) {
4665                 struct ring_info *ri = &tnapi->tx_buffers[sw_idx];
4666                 struct sk_buff *skb = ri->skb;
4667                 int i, tx_bug = 0;
4668
4669                 if (unlikely(skb == NULL)) {
4670                         tg3_tx_recover(tp);
4671                         return;
4672                 }
4673
4674                 pci_unmap_single(tp->pdev,
4675                                  dma_unmap_addr(ri, mapping),
4676                                  skb_headlen(skb),
4677                                  PCI_DMA_TODEVICE);
4678
4679                 ri->skb = NULL;
4680
4681                 sw_idx = NEXT_TX(sw_idx);
4682
4683                 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
4684                         ri = &tnapi->tx_buffers[sw_idx];
4685                         if (unlikely(ri->skb != NULL || sw_idx == hw_idx))
4686                                 tx_bug = 1;
4687
4688                         pci_unmap_page(tp->pdev,
4689                                        dma_unmap_addr(ri, mapping),
4690                                        skb_shinfo(skb)->frags[i].size,
4691                                        PCI_DMA_TODEVICE);
4692                         sw_idx = NEXT_TX(sw_idx);
4693                 }
4694
4695                 dev_kfree_skb(skb);
4696
4697                 if (unlikely(tx_bug)) {
4698                         tg3_tx_recover(tp);
4699                         return;
4700                 }
4701         }
4702
4703         tnapi->tx_cons = sw_idx;
4704
4705         /* Need to make the tx_cons update visible to tg3_start_xmit()
4706          * before checking for netif_queue_stopped().  Without the
4707          * memory barrier, there is a small possibility that tg3_start_xmit()
4708          * will miss it and cause the queue to be stopped forever.
4709          */
4710         smp_mb();
4711
4712         if (unlikely(netif_tx_queue_stopped(txq) &&
4713                      (tg3_tx_avail(tnapi) > TG3_TX_WAKEUP_THRESH(tnapi)))) {
4714                 __netif_tx_lock(txq, smp_processor_id());
4715                 if (netif_tx_queue_stopped(txq) &&
4716                     (tg3_tx_avail(tnapi) > TG3_TX_WAKEUP_THRESH(tnapi)))
4717                         netif_tx_wake_queue(txq);
4718                 __netif_tx_unlock(txq);
4719         }
4720 }
4721
4722 static void tg3_rx_skb_free(struct tg3 *tp, struct ring_info *ri, u32 map_sz)
4723 {
4724         if (!ri->skb)
4725                 return;
4726
4727         pci_unmap_single(tp->pdev, dma_unmap_addr(ri, mapping),
4728                          map_sz, PCI_DMA_FROMDEVICE);
4729         dev_kfree_skb_any(ri->skb);
4730         ri->skb = NULL;
4731 }
4732
4733 /* Returns size of skb allocated or < 0 on error.
4734  *
4735  * We only need to fill in the address because the other members
4736  * of the RX descriptor are invariant, see tg3_init_rings.
4737  *
4738  * Note the purposeful assymetry of cpu vs. chip accesses.  For
4739  * posting buffers we only dirty the first cache line of the RX
4740  * descriptor (containing the address).  Whereas for the RX status
4741  * buffers the cpu only reads the last cacheline of the RX descriptor
4742  * (to fetch the error flags, vlan tag, checksum, and opaque cookie).
4743  */
4744 static int tg3_alloc_rx_skb(struct tg3 *tp, struct tg3_rx_prodring_set *tpr,
4745                             u32 opaque_key, u32 dest_idx_unmasked)
4746 {
4747         struct tg3_rx_buffer_desc *desc;
4748         struct ring_info *map;
4749         struct sk_buff *skb;
4750         dma_addr_t mapping;
4751         int skb_size, dest_idx;
4752
4753         switch (opaque_key) {
4754         case RXD_OPAQUE_RING_STD:
4755                 dest_idx = dest_idx_unmasked & tp->rx_std_ring_mask;
4756                 desc = &tpr->rx_std[dest_idx];
4757                 map = &tpr->rx_std_buffers[dest_idx];
4758                 skb_size = tp->rx_pkt_map_sz;
4759                 break;
4760
4761         case RXD_OPAQUE_RING_JUMBO:
4762                 dest_idx = dest_idx_unmasked & tp->rx_jmb_ring_mask;
4763                 desc = &tpr->rx_jmb[dest_idx].std;
4764                 map = &tpr->rx_jmb_buffers[dest_idx];
4765                 skb_size = TG3_RX_JMB_MAP_SZ;
4766                 break;
4767
4768         default:
4769                 return -EINVAL;
4770         }
4771
4772         /* Do not overwrite any of the map or rp information
4773          * until we are sure we can commit to a new buffer.
4774          *
4775          * Callers depend upon this behavior and assume that
4776          * we leave everything unchanged if we fail.
4777          */
4778         skb = netdev_alloc_skb(tp->dev, skb_size + tp->rx_offset);
4779         if (skb == NULL)
4780                 return -ENOMEM;
4781
4782         skb_reserve(skb, tp->rx_offset);
4783
4784         mapping = pci_map_single(tp->pdev, skb->data, skb_size,
4785                                  PCI_DMA_FROMDEVICE);
4786         if (pci_dma_mapping_error(tp->pdev, mapping)) {
4787                 dev_kfree_skb(skb);
4788                 return -EIO;
4789         }
4790
4791         map->skb = skb;
4792         dma_unmap_addr_set(map, mapping, mapping);
4793
4794         desc->addr_hi = ((u64)mapping >> 32);
4795         desc->addr_lo = ((u64)mapping & 0xffffffff);
4796
4797         return skb_size;
4798 }
4799
4800 /* We only need to move over in the address because the other
4801  * members of the RX descriptor are invariant.  See notes above
4802  * tg3_alloc_rx_skb for full details.
4803  */
4804 static void tg3_recycle_rx(struct tg3_napi *tnapi,
4805                            struct tg3_rx_prodring_set *dpr,
4806                            u32 opaque_key, int src_idx,
4807                            u32 dest_idx_unmasked)
4808 {
4809         struct tg3 *tp = tnapi->tp;
4810         struct tg3_rx_buffer_desc *src_desc, *dest_desc;
4811         struct ring_info *src_map, *dest_map;
4812         struct tg3_rx_prodring_set *spr = &tp->napi[0].prodring;
4813         int dest_idx;
4814
4815         switch (opaque_key) {
4816         case RXD_OPAQUE_RING_STD:
4817                 dest_idx = dest_idx_unmasked & tp->rx_std_ring_mask;
4818                 dest_desc = &dpr->rx_std[dest_idx];
4819                 dest_map = &dpr->rx_std_buffers[dest_idx];
4820                 src_desc = &spr->rx_std[src_idx];
4821                 src_map = &spr->rx_std_buffers[src_idx];
4822                 break;
4823
4824         case RXD_OPAQUE_RING_JUMBO:
4825                 dest_idx = dest_idx_unmasked & tp->rx_jmb_ring_mask;
4826                 dest_desc = &dpr->rx_jmb[dest_idx].std;
4827                 dest_map = &dpr->rx_jmb_buffers[dest_idx];
4828                 src_desc = &spr->rx_jmb[src_idx].std;
4829                 src_map = &spr->rx_jmb_buffers[src_idx];
4830                 break;
4831
4832         default:
4833                 return;
4834         }
4835
4836         dest_map->skb = src_map->skb;
4837         dma_unmap_addr_set(dest_map, mapping,
4838                            dma_unmap_addr(src_map, mapping));
4839         dest_desc->addr_hi = src_desc->addr_hi;
4840         dest_desc->addr_lo = src_desc->addr_lo;
4841
4842         /* Ensure that the update to the skb happens after the physical
4843          * addresses have been transferred to the new BD location.
4844          */
4845         smp_wmb();
4846
4847         src_map->skb = NULL;
4848 }
4849
4850 /* The RX ring scheme is composed of multiple rings which post fresh
4851  * buffers to the chip, and one special ring the chip uses to report
4852  * status back to the host.
4853  *
4854  * The special ring reports the status of received packets to the
4855  * host.  The chip does not write into the original descriptor the
4856  * RX buffer was obtained from.  The chip simply takes the original
4857  * descriptor as provided by the host, updates the status and length
4858  * field, then writes this into the next status ring entry.
4859  *
4860  * Each ring the host uses to post buffers to the chip is described
4861  * by a TG3_BDINFO entry in the chips SRAM area.  When a packet arrives,
4862  * it is first placed into the on-chip ram.  When the packet's length
4863  * is known, it walks down the TG3_BDINFO entries to select the ring.
4864  * Each TG3_BDINFO specifies a MAXLEN field and the first TG3_BDINFO
4865  * which is within the range of the new packet's length is chosen.
4866  *
4867  * The "separate ring for rx status" scheme may sound queer, but it makes
4868  * sense from a cache coherency perspective.  If only the host writes
4869  * to the buffer post rings, and only the chip writes to the rx status
4870  * rings, then cache lines never move beyond shared-modified state.
4871  * If both the host and chip were to write into the same ring, cache line
4872  * eviction could occur since both entities want it in an exclusive state.
4873  */
4874 static int tg3_rx(struct tg3_napi *tnapi, int budget)
4875 {
4876         struct tg3 *tp = tnapi->tp;
4877         u32 work_mask, rx_std_posted = 0;
4878         u32 std_prod_idx, jmb_prod_idx;
4879         u32 sw_idx = tnapi->rx_rcb_ptr;
4880         u16 hw_idx;
4881         int received;
4882         struct tg3_rx_prodring_set *tpr = &tnapi->prodring;
4883
4884         hw_idx = *(tnapi->rx_rcb_prod_idx);
4885         /*
4886          * We need to order the read of hw_idx and the read of
4887          * the opaque cookie.
4888          */
4889         rmb();
4890         work_mask = 0;
4891         received = 0;
4892         std_prod_idx = tpr->rx_std_prod_idx;
4893         jmb_prod_idx = tpr->rx_jmb_prod_idx;
4894         while (sw_idx != hw_idx && budget > 0) {
4895                 struct ring_info *ri;
4896                 struct tg3_rx_buffer_desc *desc = &tnapi->rx_rcb[sw_idx];
4897                 unsigned int len;
4898                 struct sk_buff *skb;
4899                 dma_addr_t dma_addr;
4900                 u32 opaque_key, desc_idx, *post_ptr;
4901
4902                 desc_idx = desc->opaque & RXD_OPAQUE_INDEX_MASK;
4903                 opaque_key = desc->opaque & RXD_OPAQUE_RING_MASK;
4904                 if (opaque_key == RXD_OPAQUE_RING_STD) {
4905                         ri = &tp->napi[0].prodring.rx_std_buffers[desc_idx];
4906                         dma_addr = dma_unmap_addr(ri, mapping);
4907                         skb = ri->skb;
4908                         post_ptr = &std_prod_idx;
4909                         rx_std_posted++;
4910                 } else if (opaque_key == RXD_OPAQUE_RING_JUMBO) {
4911                         ri = &tp->napi[0].prodring.rx_jmb_buffers[desc_idx];
4912                         dma_addr = dma_unmap_addr(ri, mapping);
4913                         skb = ri->skb;
4914                         post_ptr = &jmb_prod_idx;
4915                 } else
4916                         goto next_pkt_nopost;
4917
4918                 work_mask |= opaque_key;
4919
4920                 if ((desc->err_vlan & RXD_ERR_MASK) != 0 &&
4921                     (desc->err_vlan != RXD_ERR_ODD_NIBBLE_RCVD_MII)) {
4922                 drop_it:
4923                         tg3_recycle_rx(tnapi, tpr, opaque_key,
4924                                        desc_idx, *post_ptr);
4925                 drop_it_no_recycle:
4926                         /* Other statistics kept track of by card. */
4927                         tp->rx_dropped++;
4928                         goto next_pkt;
4929                 }
4930
4931                 len = ((desc->idx_len & RXD_LEN_MASK) >> RXD_LEN_SHIFT) -
4932                       ETH_FCS_LEN;
4933
4934                 if (len > TG3_RX_COPY_THRESH(tp)) {
4935                         int skb_size;
4936
4937                         skb_size = tg3_alloc_rx_skb(tp, tpr, opaque_key,
4938                                                     *post_ptr);
4939                         if (skb_size < 0)
4940                                 goto drop_it;
4941
4942                         pci_unmap_single(tp->pdev, dma_addr, skb_size,
4943                                          PCI_DMA_FROMDEVICE);
4944
4945                         /* Ensure that the update to the skb happens
4946                          * after the usage of the old DMA mapping.
4947                          */
4948                         smp_wmb();
4949
4950                         ri->skb = NULL;
4951
4952                         skb_put(skb, len);
4953                 } else {
4954                         struct sk_buff *copy_skb;
4955
4956                         tg3_recycle_rx(tnapi, tpr, opaque_key,
4957                                        desc_idx, *post_ptr);
4958
4959                         copy_skb = netdev_alloc_skb(tp->dev, len +
4960                                                     TG3_RAW_IP_ALIGN);
4961                         if (copy_skb == NULL)
4962                                 goto drop_it_no_recycle;
4963
4964                         skb_reserve(copy_skb, TG3_RAW_IP_ALIGN);
4965                         skb_put(copy_skb, len);
4966                         pci_dma_sync_single_for_cpu(tp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE);
4967                         skb_copy_from_linear_data(skb, copy_skb->data, len);
4968                         pci_dma_sync_single_for_device(tp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE);
4969
4970                         /* We'll reuse the original ring buffer. */
4971                         skb = copy_skb;
4972                 }
4973
4974                 if ((tp->dev->features & NETIF_F_RXCSUM) &&
4975                     (desc->type_flags & RXD_FLAG_TCPUDP_CSUM) &&
4976                     (((desc->ip_tcp_csum & RXD_TCPCSUM_MASK)
4977                       >> RXD_TCPCSUM_SHIFT) == 0xffff))
4978                         skb->ip_summed = CHECKSUM_UNNECESSARY;
4979                 else
4980                         skb_checksum_none_assert(skb);
4981
4982                 skb->protocol = eth_type_trans(skb, tp->dev);
4983
4984                 if (len > (tp->dev->mtu + ETH_HLEN) &&
4985                     skb->protocol != htons(ETH_P_8021Q)) {
4986                         dev_kfree_skb(skb);
4987                         goto drop_it_no_recycle;
4988                 }
4989
4990                 if (desc->type_flags & RXD_FLAG_VLAN &&
4991                     !(tp->rx_mode & RX_MODE_KEEP_VLAN_TAG))
4992                         __vlan_hwaccel_put_tag(skb,
4993                                                desc->err_vlan & RXD_VLAN_MASK);
4994
4995                 napi_gro_receive(&tnapi->napi, skb);
4996
4997                 received++;
4998                 budget--;
4999
5000 next_pkt:
5001                 (*post_ptr)++;
5002
5003                 if (unlikely(rx_std_posted >= tp->rx_std_max_post)) {
5004                         tpr->rx_std_prod_idx = std_prod_idx &
5005                                                tp->rx_std_ring_mask;
5006                         tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG,
5007                                      tpr->rx_std_prod_idx);
5008                         work_mask &= ~RXD_OPAQUE_RING_STD;
5009                         rx_std_posted = 0;
5010                 }
5011 next_pkt_nopost:
5012                 sw_idx++;
5013                 sw_idx &= tp->rx_ret_ring_mask;
5014
5015                 /* Refresh hw_idx to see if there is new work */
5016                 if (sw_idx == hw_idx) {
5017                         hw_idx = *(tnapi->rx_rcb_prod_idx);
5018                         rmb();
5019                 }
5020         }
5021
5022         /* ACK the status ring. */
5023         tnapi->rx_rcb_ptr = sw_idx;
5024         tw32_rx_mbox(tnapi->consmbox, sw_idx);
5025
5026         /* Refill RX ring(s). */
5027         if (!tg3_flag(tp, ENABLE_RSS)) {
5028                 if (work_mask & RXD_OPAQUE_RING_STD) {
5029                         tpr->rx_std_prod_idx = std_prod_idx &
5030                                                tp->rx_std_ring_mask;
5031                         tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG,
5032                                      tpr->rx_std_prod_idx);
5033                 }
5034                 if (work_mask & RXD_OPAQUE_RING_JUMBO) {
5035                         tpr->rx_jmb_prod_idx = jmb_prod_idx &
5036                                                tp->rx_jmb_ring_mask;
5037                         tw32_rx_mbox(TG3_RX_JMB_PROD_IDX_REG,
5038                                      tpr->rx_jmb_prod_idx);
5039                 }
5040                 mmiowb();
5041         } else if (work_mask) {
5042                 /* rx_std_buffers[] and rx_jmb_buffers[] entries must be
5043                  * updated before the producer indices can be updated.
5044                  */
5045                 smp_wmb();
5046
5047                 tpr->rx_std_prod_idx = std_prod_idx & tp->rx_std_ring_mask;
5048                 tpr->rx_jmb_prod_idx = jmb_prod_idx & tp->rx_jmb_ring_mask;
5049
5050                 if (tnapi != &tp->napi[1])
5051                         napi_schedule(&tp->napi[1].napi);
5052         }
5053
5054         return received;
5055 }
5056
5057 static void tg3_poll_link(struct tg3 *tp)
5058 {
5059         /* handle link change and other phy events */
5060         if (!(tg3_flag(tp, USE_LINKCHG_REG) || tg3_flag(tp, POLL_SERDES))) {
5061                 struct tg3_hw_status *sblk = tp->napi[0].hw_status;
5062
5063                 if (sblk->status & SD_STATUS_LINK_CHG) {
5064                         sblk->status = SD_STATUS_UPDATED |
5065                                        (sblk->status & ~SD_STATUS_LINK_CHG);
5066                         spin_lock(&tp->lock);
5067                         if (tg3_flag(tp, USE_PHYLIB)) {
5068                                 tw32_f(MAC_STATUS,
5069                                      (MAC_STATUS_SYNC_CHANGED |
5070                                       MAC_STATUS_CFG_CHANGED |
5071                                       MAC_STATUS_MI_COMPLETION |
5072                                       MAC_STATUS_LNKSTATE_CHANGED));
5073                                 udelay(40);
5074                         } else
5075                                 tg3_setup_phy(tp, 0);
5076                         spin_unlock(&tp->lock);
5077                 }
5078         }
5079 }
5080
5081 static int tg3_rx_prodring_xfer(struct tg3 *tp,
5082                                 struct tg3_rx_prodring_set *dpr,
5083                                 struct tg3_rx_prodring_set *spr)
5084 {
5085         u32 si, di, cpycnt, src_prod_idx;
5086         int i, err = 0;
5087
5088         while (1) {
5089                 src_prod_idx = spr->rx_std_prod_idx;
5090
5091                 /* Make sure updates to the rx_std_buffers[] entries and the
5092                  * standard producer index are seen in the correct order.
5093                  */
5094                 smp_rmb();
5095
5096                 if (spr->rx_std_cons_idx == src_prod_idx)
5097                         break;
5098
5099                 if (spr->rx_std_cons_idx < src_prod_idx)
5100                         cpycnt = src_prod_idx - spr->rx_std_cons_idx;
5101                 else
5102                         cpycnt = tp->rx_std_ring_mask + 1 -
5103                                  spr->rx_std_cons_idx;
5104
5105                 cpycnt = min(cpycnt,
5106                              tp->rx_std_ring_mask + 1 - dpr->rx_std_prod_idx);
5107
5108                 si = spr->rx_std_cons_idx;
5109                 di = dpr->rx_std_prod_idx;
5110
5111                 for (i = di; i < di + cpycnt; i++) {
5112                         if (dpr->rx_std_buffers[i].skb) {
5113                                 cpycnt = i - di;
5114                                 err = -ENOSPC;
5115                                 break;
5116                         }
5117                 }
5118
5119                 if (!cpycnt)
5120                         break;
5121
5122                 /* Ensure that updates to the rx_std_buffers ring and the
5123                  * shadowed hardware producer ring from tg3_recycle_skb() are
5124                  * ordered correctly WRT the skb check above.
5125                  */
5126                 smp_rmb();
5127
5128                 memcpy(&dpr->rx_std_buffers[di],
5129                        &spr->rx_std_buffers[si],
5130                        cpycnt * sizeof(struct ring_info));
5131
5132                 for (i = 0; i < cpycnt; i++, di++, si++) {
5133                         struct tg3_rx_buffer_desc *sbd, *dbd;
5134                         sbd = &spr->rx_std[si];
5135                         dbd = &dpr->rx_std[di];
5136                         dbd->addr_hi = sbd->addr_hi;
5137                         dbd->addr_lo = sbd->addr_lo;
5138                 }
5139
5140                 spr->rx_std_cons_idx = (spr->rx_std_cons_idx + cpycnt) &
5141                                        tp->rx_std_ring_mask;
5142                 dpr->rx_std_prod_idx = (dpr->rx_std_prod_idx + cpycnt) &
5143                                        tp->rx_std_ring_mask;
5144         }
5145
5146         while (1) {
5147                 src_prod_idx = spr->rx_jmb_prod_idx;
5148
5149                 /* Make sure updates to the rx_jmb_buffers[] entries and
5150                  * the jumbo producer index are seen in the correct order.
5151                  */
5152                 smp_rmb();
5153
5154                 if (spr->rx_jmb_cons_idx == src_prod_idx)
5155                         break;
5156
5157                 if (spr->rx_jmb_cons_idx < src_prod_idx)
5158                         cpycnt = src_prod_idx - spr->rx_jmb_cons_idx;
5159                 else
5160                         cpycnt = tp->rx_jmb_ring_mask + 1 -
5161                                  spr->rx_jmb_cons_idx;
5162
5163                 cpycnt = min(cpycnt,
5164                              tp->rx_jmb_ring_mask + 1 - dpr->rx_jmb_prod_idx);
5165
5166                 si = spr->rx_jmb_cons_idx;
5167                 di = dpr->rx_jmb_prod_idx;
5168
5169                 for (i = di; i < di + cpycnt; i++) {
5170                         if (dpr->rx_jmb_buffers[i].skb) {
5171                                 cpycnt = i - di;
5172                                 err = -ENOSPC;
5173                                 break;
5174                         }
5175                 }
5176
5177                 if (!cpycnt)
5178                         break;
5179
5180                 /* Ensure that updates to the rx_jmb_buffers ring and the
5181                  * shadowed hardware producer ring from tg3_recycle_skb() are
5182                  * ordered correctly WRT the skb check above.
5183                  */
5184                 smp_rmb();
5185
5186                 memcpy(&dpr->rx_jmb_buffers[di],
5187                        &spr->rx_jmb_buffers[si],
5188                        cpycnt * sizeof(struct ring_info));
5189
5190                 for (i = 0; i < cpycnt; i++, di++, si++) {
5191                         struct tg3_rx_buffer_desc *sbd, *dbd;
5192                         sbd = &spr->rx_jmb[si].std;
5193                         dbd = &dpr->rx_jmb[di].std;
5194                         dbd->addr_hi = sbd->addr_hi;
5195                         dbd->addr_lo = sbd->addr_lo;
5196                 }
5197
5198                 spr->rx_jmb_cons_idx = (spr->rx_jmb_cons_idx + cpycnt) &
5199                                        tp->rx_jmb_ring_mask;
5200                 dpr->rx_jmb_prod_idx = (dpr->rx_jmb_prod_idx + cpycnt) &
5201                                        tp->rx_jmb_ring_mask;
5202         }
5203
5204         return err;
5205 }
5206
5207 static int tg3_poll_work(struct tg3_napi *tnapi, int work_done, int budget)
5208 {
5209         struct tg3 *tp = tnapi->tp;
5210
5211         /* run TX completion thread */
5212         if (tnapi->hw_status->idx[0].tx_consumer != tnapi->tx_cons) {
5213                 tg3_tx(tnapi);
5214                 if (unlikely(tg3_flag(tp, TX_RECOVERY_PENDING)))
5215                         return work_done;
5216         }
5217
5218         /* run RX thread, within the bounds set by NAPI.
5219          * All RX "locking" is done by ensuring outside
5220          * code synchronizes with tg3->napi.poll()
5221          */
5222         if (*(tnapi->rx_rcb_prod_idx) != tnapi->rx_rcb_ptr)
5223                 work_done += tg3_rx(tnapi, budget - work_done);
5224
5225         if (tg3_flag(tp, ENABLE_RSS) && tnapi == &tp->napi[1]) {
5226                 struct tg3_rx_prodring_set *dpr = &tp->napi[0].prodring;
5227                 int i, err = 0;
5228                 u32 std_prod_idx = dpr->rx_std_prod_idx;
5229                 u32 jmb_prod_idx = dpr->rx_jmb_prod_idx;
5230
5231                 for (i = 1; i < tp->irq_cnt; i++)
5232                         err |= tg3_rx_prodring_xfer(tp, dpr,
5233                                                     &tp->napi[i].prodring);
5234
5235                 wmb();
5236
5237                 if (std_prod_idx != dpr->rx_std_prod_idx)
5238                         tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG,
5239                                      dpr->rx_std_prod_idx);
5240
5241                 if (jmb_prod_idx != dpr->rx_jmb_prod_idx)
5242                         tw32_rx_mbox(TG3_RX_JMB_PROD_IDX_REG,
5243                                      dpr->rx_jmb_prod_idx);
5244
5245                 mmiowb();
5246
5247                 if (err)
5248                         tw32_f(HOSTCC_MODE, tp->coal_now);
5249         }
5250
5251         return work_done;
5252 }
5253
5254 static int tg3_poll_msix(struct napi_struct *napi, int budget)
5255 {
5256         struct tg3_napi *tnapi = container_of(napi, struct tg3_napi, napi);
5257         struct tg3 *tp = tnapi->tp;
5258         int work_done = 0;
5259         struct tg3_hw_status *sblk = tnapi->hw_status;
5260
5261         while (1) {
5262                 work_done = tg3_poll_work(tnapi, work_done, budget);
5263
5264                 if (unlikely(tg3_flag(tp, TX_RECOVERY_PENDING)))
5265                         goto tx_recovery;
5266
5267                 if (unlikely(work_done >= budget))
5268                         break;
5269
5270                 /* tp->last_tag is used in tg3_int_reenable() below
5271                  * to tell the hw how much work has been processed,
5272                  * so we must read it before checking for more work.
5273                  */
5274                 tnapi->last_tag = sblk->status_tag;
5275                 tnapi->last_irq_tag = tnapi->last_tag;
5276                 rmb();
5277
5278                 /* check for RX/TX work to do */
5279                 if (likely(sblk->idx[0].tx_consumer == tnapi->tx_cons &&
5280                            *(tnapi->rx_rcb_prod_idx) == tnapi->rx_rcb_ptr)) {
5281                         napi_complete(napi);
5282                         /* Reenable interrupts. */
5283                         tw32_mailbox(tnapi->int_mbox, tnapi->last_tag << 24);
5284                         mmiowb();
5285                         break;
5286                 }
5287         }
5288
5289         return work_done;
5290
5291 tx_recovery:
5292         /* work_done is guaranteed to be less than budget. */
5293         napi_complete(napi);
5294         schedule_work(&tp->reset_task);
5295         return work_done;
5296 }
5297
5298 static void tg3_process_error(struct tg3 *tp)
5299 {
5300         u32 val;
5301         bool real_error = false;
5302
5303         if (tg3_flag(tp, ERROR_PROCESSED))
5304                 return;
5305
5306         /* Check Flow Attention register */
5307         val = tr32(HOSTCC_FLOW_ATTN);
5308         if (val & ~HOSTCC_FLOW_ATTN_MBUF_LWM) {
5309                 netdev_err(tp->dev, "FLOW Attention error.  Resetting chip.\n");
5310                 real_error = true;
5311         }
5312
5313         if (tr32(MSGINT_STATUS) & ~MSGINT_STATUS_MSI_REQ) {
5314                 netdev_err(tp->dev, "MSI Status error.  Resetting chip.\n");
5315                 real_error = true;
5316         }
5317
5318         if (tr32(RDMAC_STATUS) || tr32(WDMAC_STATUS)) {
5319                 netdev_err(tp->dev, "DMA Status error.  Resetting chip.\n");
5320                 real_error = true;
5321         }
5322
5323         if (!real_error)
5324                 return;
5325
5326         tg3_dump_state(tp);
5327
5328         tg3_flag_set(tp, ERROR_PROCESSED);
5329         schedule_work(&tp->reset_task);
5330 }
5331
5332 static int tg3_poll(struct napi_struct *napi, int budget)
5333 {
5334         struct tg3_napi *tnapi = container_of(napi, struct tg3_napi, napi);
5335         struct tg3 *tp = tnapi->tp;
5336         int work_done = 0;
5337         struct tg3_hw_status *sblk = tnapi->hw_status;
5338
5339         while (1) {
5340                 if (sblk->status & SD_STATUS_ERROR)
5341                         tg3_process_error(tp);
5342
5343                 tg3_poll_link(tp);
5344
5345                 work_done = tg3_poll_work(tnapi, work_done, budget);
5346
5347                 if (unlikely(tg3_flag(tp, TX_RECOVERY_PENDING)))
5348                         goto tx_recovery;
5349
5350                 if (unlikely(work_done >= budget))
5351                         break;
5352
5353                 if (tg3_flag(tp, TAGGED_STATUS)) {
5354                         /* tp->last_tag is used in tg3_int_reenable() below
5355                          * to tell the hw how much work has been processed,
5356                          * so we must read it before checking for more work.
5357                          */
5358                         tnapi->last_tag = sblk->status_tag;
5359                         tnapi->last_irq_tag = tnapi->last_tag;
5360                         rmb();
5361                 } else
5362                         sblk->status &= ~SD_STATUS_UPDATED;
5363
5364                 if (likely(!tg3_has_work(tnapi))) {
5365                         napi_complete(napi);
5366                         tg3_int_reenable(tnapi);
5367                         break;
5368                 }
5369         }
5370
5371         return work_done;
5372
5373 tx_recovery:
5374         /* work_done is guaranteed to be less than budget. */
5375         napi_complete(napi);
5376         schedule_work(&tp->reset_task);
5377         return work_done;
5378 }
5379
5380 static void tg3_napi_disable(struct tg3 *tp)
5381 {
5382         int i;
5383
5384         for (i = tp->irq_cnt - 1; i >= 0; i--)
5385                 napi_disable(&tp->napi[i].napi);
5386 }
5387
5388 static void tg3_napi_enable(struct tg3 *tp)
5389 {
5390         int i;
5391
5392         for (i = 0; i < tp->irq_cnt; i++)
5393                 napi_enable(&tp->napi[i].napi);
5394 }
5395
5396 static void tg3_napi_init(struct tg3 *tp)
5397 {
5398         int i;
5399
5400         netif_napi_add(tp->dev, &tp->napi[0].napi, tg3_poll, 64);
5401         for (i = 1; i < tp->irq_cnt; i++)
5402                 netif_napi_add(tp->dev, &tp->napi[i].napi, tg3_poll_msix, 64);
5403 }
5404
5405 static void tg3_napi_fini(struct tg3 *tp)
5406 {
5407         int i;
5408
5409         for (i = 0; i < tp->irq_cnt; i++)
5410                 netif_napi_del(&tp->napi[i].napi);
5411 }
5412
5413 static inline void tg3_netif_stop(struct tg3 *tp)
5414 {
5415         tp->dev->trans_start = jiffies; /* prevent tx timeout */
5416         tg3_napi_disable(tp);
5417         netif_tx_disable(tp->dev);
5418 }
5419
5420 static inline void tg3_netif_start(struct tg3 *tp)
5421 {
5422         /* NOTE: unconditional netif_tx_wake_all_queues is only
5423          * appropriate so long as all callers are assured to
5424          * have free tx slots (such as after tg3_init_hw)
5425          */
5426         netif_tx_wake_all_queues(tp->dev);
5427
5428         tg3_napi_enable(tp);
5429         tp->napi[0].hw_status->status |= SD_STATUS_UPDATED;
5430         tg3_enable_ints(tp);
5431 }
5432
5433 static void tg3_irq_quiesce(struct tg3 *tp)
5434 {
5435         int i;
5436
5437         BUG_ON(tp->irq_sync);
5438
5439         tp->irq_sync = 1;
5440         smp_mb();
5441
5442         for (i = 0; i < tp->irq_cnt; i++)
5443                 synchronize_irq(tp->napi[i].irq_vec);
5444 }
5445
5446 /* Fully shutdown all tg3 driver activity elsewhere in the system.
5447  * If irq_sync is non-zero, then the IRQ handler must be synchronized
5448  * with as well.  Most of the time, this is not necessary except when
5449  * shutting down the device.
5450  */
5451 static inline void tg3_full_lock(struct tg3 *tp, int irq_sync)
5452 {
5453         spin_lock_bh(&tp->lock);
5454         if (irq_sync)
5455                 tg3_irq_quiesce(tp);
5456 }
5457
5458 static inline void tg3_full_unlock(struct tg3 *tp)
5459 {
5460         spin_unlock_bh(&tp->lock);
5461 }
5462
5463 /* One-shot MSI handler - Chip automatically disables interrupt
5464  * after sending MSI so driver doesn't have to do it.
5465  */
5466 static irqreturn_t tg3_msi_1shot(int irq, void *dev_id)
5467 {
5468         struct tg3_napi *tnapi = dev_id;
5469         struct tg3 *tp = tnapi->tp;
5470
5471         prefetch(tnapi->hw_status);
5472         if (tnapi->rx_rcb)
5473                 prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
5474
5475         if (likely(!tg3_irq_sync(tp)))
5476                 napi_schedule(&tnapi->napi);
5477
5478         return IRQ_HANDLED;
5479 }
5480
5481 /* MSI ISR - No need to check for interrupt sharing and no need to
5482  * flush status block and interrupt mailbox. PCI ordering rules
5483  * guarantee that MSI will arrive after the status block.
5484  */
5485 static irqreturn_t tg3_msi(int irq, void *dev_id)
5486 {
5487         struct tg3_napi *tnapi = dev_id;
5488         struct tg3 *tp = tnapi->tp;
5489
5490         prefetch(tnapi->hw_status);
5491         if (tnapi->rx_rcb)
5492                 prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
5493         /*
5494          * Writing any value to intr-mbox-0 clears PCI INTA# and
5495          * chip-internal interrupt pending events.
5496          * Writing non-zero to intr-mbox-0 additional tells the
5497          * NIC to stop sending us irqs, engaging "in-intr-handler"
5498          * event coalescing.
5499          */
5500         tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
5501         if (likely(!tg3_irq_sync(tp)))
5502                 napi_schedule(&tnapi->napi);
5503
5504         return IRQ_RETVAL(1);
5505 }
5506
5507 static irqreturn_t tg3_interrupt(int irq, void *dev_id)
5508 {
5509         struct tg3_napi *tnapi = dev_id;
5510         struct tg3 *tp = tnapi->tp;
5511         struct tg3_hw_status *sblk = tnapi->hw_status;
5512         unsigned int handled = 1;
5513
5514         /* In INTx mode, it is possible for the interrupt to arrive at
5515          * the CPU before the status block posted prior to the interrupt.
5516          * Reading the PCI State register will confirm whether the
5517          * interrupt is ours and will flush the status block.
5518          */
5519         if (unlikely(!(sblk->status & SD_STATUS_UPDATED))) {
5520                 if (tg3_flag(tp, CHIP_RESETTING) ||
5521                     (tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
5522                         handled = 0;
5523                         goto out;
5524                 }
5525         }
5526
5527         /*
5528          * Writing any value to intr-mbox-0 clears PCI INTA# and
5529          * chip-internal interrupt pending events.
5530          * Writing non-zero to intr-mbox-0 additional tells the
5531          * NIC to stop sending us irqs, engaging "in-intr-handler"
5532          * event coalescing.
5533          *
5534          * Flush the mailbox to de-assert the IRQ immediately to prevent
5535          * spurious interrupts.  The flush impacts performance but
5536          * excessive spurious interrupts can be worse in some cases.
5537          */
5538         tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
5539         if (tg3_irq_sync(tp))
5540                 goto out;
5541         sblk->status &= ~SD_STATUS_UPDATED;
5542         if (likely(tg3_has_work(tnapi))) {
5543                 prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
5544                 napi_schedule(&tnapi->napi);
5545         } else {
5546                 /* No work, shared interrupt perhaps?  re-enable
5547                  * interrupts, and flush that PCI write
5548                  */
5549                 tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
5550                                0x00000000);
5551         }
5552 out:
5553         return IRQ_RETVAL(handled);
5554 }
5555
5556 static irqreturn_t tg3_interrupt_tagged(int irq, void *dev_id)
5557 {
5558         struct tg3_napi *tnapi = dev_id;
5559         struct tg3 *tp = tnapi->tp;
5560         struct tg3_hw_status *sblk = tnapi->hw_status;
5561         unsigned int handled = 1;
5562
5563         /* In INTx mode, it is possible for the interrupt to arrive at
5564          * the CPU before the status block posted prior to the interrupt.
5565          * Reading the PCI State register will confirm whether the
5566          * interrupt is ours and will flush the status block.
5567          */
5568         if (unlikely(sblk->status_tag == tnapi->last_irq_tag)) {
5569                 if (tg3_flag(tp, CHIP_RESETTING) ||
5570                     (tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
5571                         handled = 0;
5572                         goto out;
5573                 }
5574         }
5575
5576         /*
5577          * writing any value to intr-mbox-0 clears PCI INTA# and
5578          * chip-internal interrupt pending events.
5579          * writing non-zero to intr-mbox-0 additional tells the
5580          * NIC to stop sending us irqs, engaging "in-intr-handler"
5581          * event coalescing.
5582          *
5583          * Flush the mailbox to de-assert the IRQ immediately to prevent
5584          * spurious interrupts.  The flush impacts performance but
5585          * excessive spurious interrupts can be worse in some cases.
5586          */
5587         tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
5588
5589         /*
5590          * In a shared interrupt configuration, sometimes other devices'
5591          * interrupts will scream.  We record the current status tag here
5592          * so that the above check can report that the screaming interrupts
5593          * are unhandled.  Eventually they will be silenced.
5594          */
5595         tnapi->last_irq_tag = sblk->status_tag;
5596
5597         if (tg3_irq_sync(tp))
5598                 goto out;
5599
5600         prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
5601
5602         napi_schedule(&tnapi->napi);
5603
5604 out:
5605         return IRQ_RETVAL(handled);
5606 }
5607
5608 /* ISR for interrupt test */
5609 static irqreturn_t tg3_test_isr(int irq, void *dev_id)
5610 {
5611         struct tg3_napi *tnapi = dev_id;
5612         struct tg3 *tp = tnapi->tp;
5613         struct tg3_hw_status *sblk = tnapi->hw_status;
5614
5615         if ((sblk->status & SD_STATUS_UPDATED) ||
5616             !(tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
5617                 tg3_disable_ints(tp);
5618                 return IRQ_RETVAL(1);
5619         }
5620         return IRQ_RETVAL(0);
5621 }
5622
5623 static int tg3_init_hw(struct tg3 *, int);
5624 static int tg3_halt(struct tg3 *, int, int);
5625
5626 /* Restart hardware after configuration changes, self-test, etc.
5627  * Invoked with tp->lock held.
5628  */
5629 static int tg3_restart_hw(struct tg3 *tp, int reset_phy)
5630         __releases(tp->lock)
5631         __acquires(tp->lock)
5632 {
5633         int err;
5634
5635         err = tg3_init_hw(tp, reset_phy);
5636         if (err) {
5637                 netdev_err(tp->dev,
5638                            "Failed to re-initialize device, aborting\n");
5639                 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
5640                 tg3_full_unlock(tp);
5641                 del_timer_sync(&tp->timer);
5642                 tp->irq_sync = 0;
5643                 tg3_napi_enable(tp);
5644                 dev_close(tp->dev);
5645                 tg3_full_lock(tp, 0);
5646         }
5647         return err;
5648 }
5649
5650 #ifdef CONFIG_NET_POLL_CONTROLLER
5651 static void tg3_poll_controller(struct net_device *dev)
5652 {
5653         int i;
5654         struct tg3 *tp = netdev_priv(dev);
5655
5656         for (i = 0; i < tp->irq_cnt; i++)
5657                 tg3_interrupt(tp->napi[i].irq_vec, &tp->napi[i]);
5658 }
5659 #endif
5660
5661 static void tg3_reset_task(struct work_struct *work)
5662 {
5663         struct tg3 *tp = container_of(work, struct tg3, reset_task);
5664         int err;
5665         unsigned int restart_timer;
5666
5667         tg3_full_lock(tp, 0);
5668
5669         if (!netif_running(tp->dev)) {
5670                 tg3_full_unlock(tp);
5671                 return;
5672         }
5673
5674         tg3_full_unlock(tp);
5675
5676         tg3_phy_stop(tp);
5677
5678         tg3_netif_stop(tp);
5679
5680         tg3_full_lock(tp, 1);
5681
5682         restart_timer = tg3_flag(tp, RESTART_TIMER);
5683         tg3_flag_clear(tp, RESTART_TIMER);
5684
5685         if (tg3_flag(tp, TX_RECOVERY_PENDING)) {
5686                 tp->write32_tx_mbox = tg3_write32_tx_mbox;
5687                 tp->write32_rx_mbox = tg3_write_flush_reg32;
5688                 tg3_flag_set(tp, MBOX_WRITE_REORDER);
5689                 tg3_flag_clear(tp, TX_RECOVERY_PENDING);
5690         }
5691
5692         tg3_halt(tp, RESET_KIND_SHUTDOWN, 0);
5693         err = tg3_init_hw(tp, 1);
5694         if (err)
5695                 goto out;
5696
5697         tg3_netif_start(tp);
5698
5699         if (restart_timer)
5700                 mod_timer(&tp->timer, jiffies + 1);
5701
5702 out:
5703         tg3_full_unlock(tp);
5704
5705         if (!err)
5706                 tg3_phy_start(tp);
5707 }
5708
5709 static void tg3_tx_timeout(struct net_device *dev)
5710 {
5711         struct tg3 *tp = netdev_priv(dev);
5712
5713         if (netif_msg_tx_err(tp)) {
5714                 netdev_err(dev, "transmit timed out, resetting\n");
5715                 tg3_dump_state(tp);
5716         }
5717
5718         schedule_work(&tp->reset_task);
5719 }
5720
5721 /* Test for DMA buffers crossing any 4GB boundaries: 4G, 8G, etc */
5722 static inline int tg3_4g_overflow_test(dma_addr_t mapping, int len)
5723 {
5724         u32 base = (u32) mapping & 0xffffffff;
5725
5726         return (base > 0xffffdcc0) && (base + len + 8 < base);
5727 }
5728
5729 /* Test for DMA addresses > 40-bit */
5730 static inline int tg3_40bit_overflow_test(struct tg3 *tp, dma_addr_t mapping,
5731                                           int len)
5732 {
5733 #if defined(CONFIG_HIGHMEM) && (BITS_PER_LONG == 64)
5734         if (tg3_flag(tp, 40BIT_DMA_BUG))
5735                 return ((u64) mapping + len) > DMA_BIT_MASK(40);
5736         return 0;
5737 #else
5738         return 0;
5739 #endif
5740 }
5741
5742 static void tg3_set_txd(struct tg3_napi *tnapi, int entry,
5743                         dma_addr_t mapping, int len, u32 flags,
5744                         u32 mss_and_is_end)
5745 {
5746         struct tg3_tx_buffer_desc *txd = &tnapi->tx_ring[entry];
5747         int is_end = (mss_and_is_end & 0x1);
5748         u32 mss = (mss_and_is_end >> 1);
5749         u32 vlan_tag = 0;
5750
5751         if (is_end)
5752                 flags |= TXD_FLAG_END;
5753         if (flags & TXD_FLAG_VLAN) {
5754                 vlan_tag = flags >> 16;
5755                 flags &= 0xffff;
5756         }
5757         vlan_tag |= (mss << TXD_MSS_SHIFT);
5758
5759         txd->addr_hi = ((u64) mapping >> 32);
5760         txd->addr_lo = ((u64) mapping & 0xffffffff);
5761         txd->len_flags = (len << TXD_LEN_SHIFT) | flags;
5762         txd->vlan_tag = vlan_tag << TXD_VLAN_TAG_SHIFT;
5763 }
5764
5765 static void tg3_skb_error_unmap(struct tg3_napi *tnapi,
5766                                 struct sk_buff *skb, int last)
5767 {
5768         int i;
5769         u32 entry = tnapi->tx_prod;
5770         struct ring_info *txb = &tnapi->tx_buffers[entry];
5771
5772         pci_unmap_single(tnapi->tp->pdev,
5773                          dma_unmap_addr(txb, mapping),
5774                          skb_headlen(skb),
5775                          PCI_DMA_TODEVICE);
5776         for (i = 0; i <= last; i++) {
5777                 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
5778
5779                 entry = NEXT_TX(entry);
5780                 txb = &tnapi->tx_buffers[entry];
5781
5782                 pci_unmap_page(tnapi->tp->pdev,
5783                                dma_unmap_addr(txb, mapping),
5784                                frag->size, PCI_DMA_TODEVICE);
5785         }
5786 }
5787
5788 /* Workaround 4GB and 40-bit hardware DMA bugs. */
5789 static int tigon3_dma_hwbug_workaround(struct tg3_napi *tnapi,
5790                                        struct sk_buff *skb,
5791                                        u32 base_flags, u32 mss)
5792 {
5793         struct tg3 *tp = tnapi->tp;
5794         struct sk_buff *new_skb;
5795         dma_addr_t new_addr = 0;
5796         u32 entry = tnapi->tx_prod;
5797         int ret = 0;
5798
5799         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701)
5800                 new_skb = skb_copy(skb, GFP_ATOMIC);
5801         else {
5802                 int more_headroom = 4 - ((unsigned long)skb->data & 3);
5803
5804                 new_skb = skb_copy_expand(skb,
5805                                           skb_headroom(skb) + more_headroom,
5806                                           skb_tailroom(skb), GFP_ATOMIC);
5807         }
5808
5809         if (!new_skb) {
5810                 ret = -1;
5811         } else {
5812                 /* New SKB is guaranteed to be linear. */
<