tg3: Move TSO_CAPABLE assignment
[linux-2.6.git] / drivers / net / tg3.c
1 /*
2  * tg3.c: Broadcom Tigon3 ethernet driver.
3  *
4  * Copyright (C) 2001, 2002, 2003, 2004 David S. Miller (davem@redhat.com)
5  * Copyright (C) 2001, 2002, 2003 Jeff Garzik (jgarzik@pobox.com)
6  * Copyright (C) 2004 Sun Microsystems Inc.
7  * Copyright (C) 2005-2011 Broadcom Corporation.
8  *
9  * Firmware is:
10  *      Derived from proprietary unpublished source code,
11  *      Copyright (C) 2000-2003 Broadcom Corporation.
12  *
13  *      Permission is hereby granted for the distribution of this firmware
14  *      data in hexadecimal or equivalent format, provided this copyright
15  *      notice is accompanying it.
16  */
17
18
19 #include <linux/module.h>
20 #include <linux/moduleparam.h>
21 #include <linux/stringify.h>
22 #include <linux/kernel.h>
23 #include <linux/types.h>
24 #include <linux/compiler.h>
25 #include <linux/slab.h>
26 #include <linux/delay.h>
27 #include <linux/in.h>
28 #include <linux/init.h>
29 #include <linux/ioport.h>
30 #include <linux/pci.h>
31 #include <linux/netdevice.h>
32 #include <linux/etherdevice.h>
33 #include <linux/skbuff.h>
34 #include <linux/ethtool.h>
35 #include <linux/mdio.h>
36 #include <linux/mii.h>
37 #include <linux/phy.h>
38 #include <linux/brcmphy.h>
39 #include <linux/if_vlan.h>
40 #include <linux/ip.h>
41 #include <linux/tcp.h>
42 #include <linux/workqueue.h>
43 #include <linux/prefetch.h>
44 #include <linux/dma-mapping.h>
45 #include <linux/firmware.h>
46
47 #include <net/checksum.h>
48 #include <net/ip.h>
49
50 #include <asm/system.h>
51 #include <linux/io.h>
52 #include <asm/byteorder.h>
53 #include <linux/uaccess.h>
54
55 #ifdef CONFIG_SPARC
56 #include <asm/idprom.h>
57 #include <asm/prom.h>
58 #endif
59
60 #define BAR_0   0
61 #define BAR_2   2
62
63 #include "tg3.h"
64
65 /* Functions & macros to verify TG3_FLAGS types */
66
67 static inline int _tg3_flag(enum TG3_FLAGS flag, unsigned long *bits)
68 {
69         return test_bit(flag, bits);
70 }
71
72 static inline void _tg3_flag_set(enum TG3_FLAGS flag, unsigned long *bits)
73 {
74         set_bit(flag, bits);
75 }
76
77 static inline void _tg3_flag_clear(enum TG3_FLAGS flag, unsigned long *bits)
78 {
79         clear_bit(flag, bits);
80 }
81
82 #define tg3_flag(tp, flag)                              \
83         _tg3_flag(TG3_FLAG_##flag, (tp)->tg3_flags)
84 #define tg3_flag_set(tp, flag)                          \
85         _tg3_flag_set(TG3_FLAG_##flag, (tp)->tg3_flags)
86 #define tg3_flag_clear(tp, flag)                        \
87         _tg3_flag_clear(TG3_FLAG_##flag, (tp)->tg3_flags)
88
89 #define DRV_MODULE_NAME         "tg3"
90 #define TG3_MAJ_NUM                     3
91 #define TG3_MIN_NUM                     118
92 #define DRV_MODULE_VERSION      \
93         __stringify(TG3_MAJ_NUM) "." __stringify(TG3_MIN_NUM)
94 #define DRV_MODULE_RELDATE      "April 22, 2011"
95
96 #define TG3_DEF_MAC_MODE        0
97 #define TG3_DEF_RX_MODE         0
98 #define TG3_DEF_TX_MODE         0
99 #define TG3_DEF_MSG_ENABLE        \
100         (NETIF_MSG_DRV          | \
101          NETIF_MSG_PROBE        | \
102          NETIF_MSG_LINK         | \
103          NETIF_MSG_TIMER        | \
104          NETIF_MSG_IFDOWN       | \
105          NETIF_MSG_IFUP         | \
106          NETIF_MSG_RX_ERR       | \
107          NETIF_MSG_TX_ERR)
108
109 /* length of time before we decide the hardware is borked,
110  * and dev->tx_timeout() should be called to fix the problem
111  */
112
113 #define TG3_TX_TIMEOUT                  (5 * HZ)
114
115 /* hardware minimum and maximum for a single frame's data payload */
116 #define TG3_MIN_MTU                     60
117 #define TG3_MAX_MTU(tp) \
118         (tg3_flag(tp, JUMBO_CAPABLE) ? 9000 : 1500)
119
120 /* These numbers seem to be hard coded in the NIC firmware somehow.
121  * You can't change the ring sizes, but you can change where you place
122  * them in the NIC onboard memory.
123  */
124 #define TG3_RX_STD_RING_SIZE(tp) \
125         (tg3_flag(tp, LRG_PROD_RING_CAP) ? \
126          TG3_RX_STD_MAX_SIZE_5717 : TG3_RX_STD_MAX_SIZE_5700)
127 #define TG3_DEF_RX_RING_PENDING         200
128 #define TG3_RX_JMB_RING_SIZE(tp) \
129         (tg3_flag(tp, LRG_PROD_RING_CAP) ? \
130          TG3_RX_JMB_MAX_SIZE_5717 : TG3_RX_JMB_MAX_SIZE_5700)
131 #define TG3_DEF_RX_JUMBO_RING_PENDING   100
132 #define TG3_RSS_INDIR_TBL_SIZE          128
133
134 /* Do not place this n-ring entries value into the tp struct itself,
135  * we really want to expose these constants to GCC so that modulo et
136  * al.  operations are done with shifts and masks instead of with
137  * hw multiply/modulo instructions.  Another solution would be to
138  * replace things like '% foo' with '& (foo - 1)'.
139  */
140
141 #define TG3_TX_RING_SIZE                512
142 #define TG3_DEF_TX_RING_PENDING         (TG3_TX_RING_SIZE - 1)
143
144 #define TG3_RX_STD_RING_BYTES(tp) \
145         (sizeof(struct tg3_rx_buffer_desc) * TG3_RX_STD_RING_SIZE(tp))
146 #define TG3_RX_JMB_RING_BYTES(tp) \
147         (sizeof(struct tg3_ext_rx_buffer_desc) * TG3_RX_JMB_RING_SIZE(tp))
148 #define TG3_RX_RCB_RING_BYTES(tp) \
149         (sizeof(struct tg3_rx_buffer_desc) * (tp->rx_ret_ring_mask + 1))
150 #define TG3_TX_RING_BYTES       (sizeof(struct tg3_tx_buffer_desc) * \
151                                  TG3_TX_RING_SIZE)
152 #define NEXT_TX(N)              (((N) + 1) & (TG3_TX_RING_SIZE - 1))
153
154 #define TG3_DMA_BYTE_ENAB               64
155
156 #define TG3_RX_STD_DMA_SZ               1536
157 #define TG3_RX_JMB_DMA_SZ               9046
158
159 #define TG3_RX_DMA_TO_MAP_SZ(x)         ((x) + TG3_DMA_BYTE_ENAB)
160
161 #define TG3_RX_STD_MAP_SZ               TG3_RX_DMA_TO_MAP_SZ(TG3_RX_STD_DMA_SZ)
162 #define TG3_RX_JMB_MAP_SZ               TG3_RX_DMA_TO_MAP_SZ(TG3_RX_JMB_DMA_SZ)
163
164 #define TG3_RX_STD_BUFF_RING_SIZE(tp) \
165         (sizeof(struct ring_info) * TG3_RX_STD_RING_SIZE(tp))
166
167 #define TG3_RX_JMB_BUFF_RING_SIZE(tp) \
168         (sizeof(struct ring_info) * TG3_RX_JMB_RING_SIZE(tp))
169
170 /* Due to a hardware bug, the 5701 can only DMA to memory addresses
171  * that are at least dword aligned when used in PCIX mode.  The driver
172  * works around this bug by double copying the packet.  This workaround
173  * is built into the normal double copy length check for efficiency.
174  *
175  * However, the double copy is only necessary on those architectures
176  * where unaligned memory accesses are inefficient.  For those architectures
177  * where unaligned memory accesses incur little penalty, we can reintegrate
178  * the 5701 in the normal rx path.  Doing so saves a device structure
179  * dereference by hardcoding the double copy threshold in place.
180  */
181 #define TG3_RX_COPY_THRESHOLD           256
182 #if NET_IP_ALIGN == 0 || defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS)
183         #define TG3_RX_COPY_THRESH(tp)  TG3_RX_COPY_THRESHOLD
184 #else
185         #define TG3_RX_COPY_THRESH(tp)  ((tp)->rx_copy_thresh)
186 #endif
187
188 /* minimum number of free TX descriptors required to wake up TX process */
189 #define TG3_TX_WAKEUP_THRESH(tnapi)             ((tnapi)->tx_pending / 4)
190
191 #define TG3_RAW_IP_ALIGN 2
192
193 #define TG3_FW_UPDATE_TIMEOUT_SEC       5
194
195 #define FIRMWARE_TG3            "tigon/tg3.bin"
196 #define FIRMWARE_TG3TSO         "tigon/tg3_tso.bin"
197 #define FIRMWARE_TG3TSO5        "tigon/tg3_tso5.bin"
198
199 static char version[] __devinitdata =
200         DRV_MODULE_NAME ".c:v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")";
201
202 MODULE_AUTHOR("David S. Miller (davem@redhat.com) and Jeff Garzik (jgarzik@pobox.com)");
203 MODULE_DESCRIPTION("Broadcom Tigon3 ethernet driver");
204 MODULE_LICENSE("GPL");
205 MODULE_VERSION(DRV_MODULE_VERSION);
206 MODULE_FIRMWARE(FIRMWARE_TG3);
207 MODULE_FIRMWARE(FIRMWARE_TG3TSO);
208 MODULE_FIRMWARE(FIRMWARE_TG3TSO5);
209
210 static int tg3_debug = -1;      /* -1 == use TG3_DEF_MSG_ENABLE as value */
211 module_param(tg3_debug, int, 0);
212 MODULE_PARM_DESC(tg3_debug, "Tigon3 bitmapped debugging message enable value");
213
214 static DEFINE_PCI_DEVICE_TABLE(tg3_pci_tbl) = {
215         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5700)},
216         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5701)},
217         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702)},
218         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703)},
219         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704)},
220         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702FE)},
221         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705)},
222         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705_2)},
223         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705M)},
224         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705M_2)},
225         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702X)},
226         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703X)},
227         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704S)},
228         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702A3)},
229         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703A3)},
230         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5782)},
231         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5788)},
232         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5789)},
233         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5901)},
234         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5901_2)},
235         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704S_2)},
236         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705F)},
237         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5721)},
238         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5722)},
239         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751)},
240         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751M)},
241         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751F)},
242         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5752)},
243         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5752M)},
244         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753)},
245         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753M)},
246         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753F)},
247         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5754)},
248         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5754M)},
249         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5755)},
250         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5755M)},
251         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5756)},
252         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5786)},
253         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787)},
254         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787M)},
255         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787F)},
256         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5714)},
257         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5714S)},
258         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5715)},
259         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5715S)},
260         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5780)},
261         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5780S)},
262         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5781)},
263         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5906)},
264         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5906M)},
265         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5784)},
266         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5764)},
267         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5723)},
268         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5761)},
269         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5761E)},
270         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5761S)},
271         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5761SE)},
272         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5785_G)},
273         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5785_F)},
274         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57780)},
275         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57760)},
276         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57790)},
277         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57788)},
278         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5717)},
279         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5718)},
280         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57781)},
281         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57785)},
282         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57761)},
283         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57765)},
284         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57791)},
285         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57795)},
286         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5719)},
287         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5720)},
288         {PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9DXX)},
289         {PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9MXX)},
290         {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1000)},
291         {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1001)},
292         {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1003)},
293         {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC9100)},
294         {PCI_DEVICE(PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_TIGON3)},
295         {}
296 };
297
298 MODULE_DEVICE_TABLE(pci, tg3_pci_tbl);
299
300 static const struct {
301         const char string[ETH_GSTRING_LEN];
302 } ethtool_stats_keys[] = {
303         { "rx_octets" },
304         { "rx_fragments" },
305         { "rx_ucast_packets" },
306         { "rx_mcast_packets" },
307         { "rx_bcast_packets" },
308         { "rx_fcs_errors" },
309         { "rx_align_errors" },
310         { "rx_xon_pause_rcvd" },
311         { "rx_xoff_pause_rcvd" },
312         { "rx_mac_ctrl_rcvd" },
313         { "rx_xoff_entered" },
314         { "rx_frame_too_long_errors" },
315         { "rx_jabbers" },
316         { "rx_undersize_packets" },
317         { "rx_in_length_errors" },
318         { "rx_out_length_errors" },
319         { "rx_64_or_less_octet_packets" },
320         { "rx_65_to_127_octet_packets" },
321         { "rx_128_to_255_octet_packets" },
322         { "rx_256_to_511_octet_packets" },
323         { "rx_512_to_1023_octet_packets" },
324         { "rx_1024_to_1522_octet_packets" },
325         { "rx_1523_to_2047_octet_packets" },
326         { "rx_2048_to_4095_octet_packets" },
327         { "rx_4096_to_8191_octet_packets" },
328         { "rx_8192_to_9022_octet_packets" },
329
330         { "tx_octets" },
331         { "tx_collisions" },
332
333         { "tx_xon_sent" },
334         { "tx_xoff_sent" },
335         { "tx_flow_control" },
336         { "tx_mac_errors" },
337         { "tx_single_collisions" },
338         { "tx_mult_collisions" },
339         { "tx_deferred" },
340         { "tx_excessive_collisions" },
341         { "tx_late_collisions" },
342         { "tx_collide_2times" },
343         { "tx_collide_3times" },
344         { "tx_collide_4times" },
345         { "tx_collide_5times" },
346         { "tx_collide_6times" },
347         { "tx_collide_7times" },
348         { "tx_collide_8times" },
349         { "tx_collide_9times" },
350         { "tx_collide_10times" },
351         { "tx_collide_11times" },
352         { "tx_collide_12times" },
353         { "tx_collide_13times" },
354         { "tx_collide_14times" },
355         { "tx_collide_15times" },
356         { "tx_ucast_packets" },
357         { "tx_mcast_packets" },
358         { "tx_bcast_packets" },
359         { "tx_carrier_sense_errors" },
360         { "tx_discards" },
361         { "tx_errors" },
362
363         { "dma_writeq_full" },
364         { "dma_write_prioq_full" },
365         { "rxbds_empty" },
366         { "rx_discards" },
367         { "rx_errors" },
368         { "rx_threshold_hit" },
369
370         { "dma_readq_full" },
371         { "dma_read_prioq_full" },
372         { "tx_comp_queue_full" },
373
374         { "ring_set_send_prod_index" },
375         { "ring_status_update" },
376         { "nic_irqs" },
377         { "nic_avoided_irqs" },
378         { "nic_tx_threshold_hit" },
379
380         { "mbuf_lwm_thresh_hit" },
381 };
382
383 #define TG3_NUM_STATS   ARRAY_SIZE(ethtool_stats_keys)
384
385
386 static const struct {
387         const char string[ETH_GSTRING_LEN];
388 } ethtool_test_keys[] = {
389         { "nvram test     (online) " },
390         { "link test      (online) " },
391         { "register test  (offline)" },
392         { "memory test    (offline)" },
393         { "loopback test  (offline)" },
394         { "interrupt test (offline)" },
395 };
396
397 #define TG3_NUM_TEST    ARRAY_SIZE(ethtool_test_keys)
398
399
400 static void tg3_write32(struct tg3 *tp, u32 off, u32 val)
401 {
402         writel(val, tp->regs + off);
403 }
404
405 static u32 tg3_read32(struct tg3 *tp, u32 off)
406 {
407         return readl(tp->regs + off);
408 }
409
410 static void tg3_ape_write32(struct tg3 *tp, u32 off, u32 val)
411 {
412         writel(val, tp->aperegs + off);
413 }
414
415 static u32 tg3_ape_read32(struct tg3 *tp, u32 off)
416 {
417         return readl(tp->aperegs + off);
418 }
419
420 static void tg3_write_indirect_reg32(struct tg3 *tp, u32 off, u32 val)
421 {
422         unsigned long flags;
423
424         spin_lock_irqsave(&tp->indirect_lock, flags);
425         pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off);
426         pci_write_config_dword(tp->pdev, TG3PCI_REG_DATA, val);
427         spin_unlock_irqrestore(&tp->indirect_lock, flags);
428 }
429
430 static void tg3_write_flush_reg32(struct tg3 *tp, u32 off, u32 val)
431 {
432         writel(val, tp->regs + off);
433         readl(tp->regs + off);
434 }
435
436 static u32 tg3_read_indirect_reg32(struct tg3 *tp, u32 off)
437 {
438         unsigned long flags;
439         u32 val;
440
441         spin_lock_irqsave(&tp->indirect_lock, flags);
442         pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off);
443         pci_read_config_dword(tp->pdev, TG3PCI_REG_DATA, &val);
444         spin_unlock_irqrestore(&tp->indirect_lock, flags);
445         return val;
446 }
447
448 static void tg3_write_indirect_mbox(struct tg3 *tp, u32 off, u32 val)
449 {
450         unsigned long flags;
451
452         if (off == (MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW)) {
453                 pci_write_config_dword(tp->pdev, TG3PCI_RCV_RET_RING_CON_IDX +
454                                        TG3_64BIT_REG_LOW, val);
455                 return;
456         }
457         if (off == TG3_RX_STD_PROD_IDX_REG) {
458                 pci_write_config_dword(tp->pdev, TG3PCI_STD_RING_PROD_IDX +
459                                        TG3_64BIT_REG_LOW, val);
460                 return;
461         }
462
463         spin_lock_irqsave(&tp->indirect_lock, flags);
464         pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off + 0x5600);
465         pci_write_config_dword(tp->pdev, TG3PCI_REG_DATA, val);
466         spin_unlock_irqrestore(&tp->indirect_lock, flags);
467
468         /* In indirect mode when disabling interrupts, we also need
469          * to clear the interrupt bit in the GRC local ctrl register.
470          */
471         if ((off == (MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW)) &&
472             (val == 0x1)) {
473                 pci_write_config_dword(tp->pdev, TG3PCI_MISC_LOCAL_CTRL,
474                                        tp->grc_local_ctrl|GRC_LCLCTRL_CLEARINT);
475         }
476 }
477
478 static u32 tg3_read_indirect_mbox(struct tg3 *tp, u32 off)
479 {
480         unsigned long flags;
481         u32 val;
482
483         spin_lock_irqsave(&tp->indirect_lock, flags);
484         pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off + 0x5600);
485         pci_read_config_dword(tp->pdev, TG3PCI_REG_DATA, &val);
486         spin_unlock_irqrestore(&tp->indirect_lock, flags);
487         return val;
488 }
489
490 /* usec_wait specifies the wait time in usec when writing to certain registers
491  * where it is unsafe to read back the register without some delay.
492  * GRC_LOCAL_CTRL is one example if the GPIOs are toggled to switch power.
493  * TG3PCI_CLOCK_CTRL is another example if the clock frequencies are changed.
494  */
495 static void _tw32_flush(struct tg3 *tp, u32 off, u32 val, u32 usec_wait)
496 {
497         if (tg3_flag(tp, PCIX_TARGET_HWBUG) || tg3_flag(tp, ICH_WORKAROUND))
498                 /* Non-posted methods */
499                 tp->write32(tp, off, val);
500         else {
501                 /* Posted method */
502                 tg3_write32(tp, off, val);
503                 if (usec_wait)
504                         udelay(usec_wait);
505                 tp->read32(tp, off);
506         }
507         /* Wait again after the read for the posted method to guarantee that
508          * the wait time is met.
509          */
510         if (usec_wait)
511                 udelay(usec_wait);
512 }
513
514 static inline void tw32_mailbox_flush(struct tg3 *tp, u32 off, u32 val)
515 {
516         tp->write32_mbox(tp, off, val);
517         if (!tg3_flag(tp, MBOX_WRITE_REORDER) && !tg3_flag(tp, ICH_WORKAROUND))
518                 tp->read32_mbox(tp, off);
519 }
520
521 static void tg3_write32_tx_mbox(struct tg3 *tp, u32 off, u32 val)
522 {
523         void __iomem *mbox = tp->regs + off;
524         writel(val, mbox);
525         if (tg3_flag(tp, TXD_MBOX_HWBUG))
526                 writel(val, mbox);
527         if (tg3_flag(tp, MBOX_WRITE_REORDER))
528                 readl(mbox);
529 }
530
531 static u32 tg3_read32_mbox_5906(struct tg3 *tp, u32 off)
532 {
533         return readl(tp->regs + off + GRCMBOX_BASE);
534 }
535
536 static void tg3_write32_mbox_5906(struct tg3 *tp, u32 off, u32 val)
537 {
538         writel(val, tp->regs + off + GRCMBOX_BASE);
539 }
540
541 #define tw32_mailbox(reg, val)          tp->write32_mbox(tp, reg, val)
542 #define tw32_mailbox_f(reg, val)        tw32_mailbox_flush(tp, (reg), (val))
543 #define tw32_rx_mbox(reg, val)          tp->write32_rx_mbox(tp, reg, val)
544 #define tw32_tx_mbox(reg, val)          tp->write32_tx_mbox(tp, reg, val)
545 #define tr32_mailbox(reg)               tp->read32_mbox(tp, reg)
546
547 #define tw32(reg, val)                  tp->write32(tp, reg, val)
548 #define tw32_f(reg, val)                _tw32_flush(tp, (reg), (val), 0)
549 #define tw32_wait_f(reg, val, us)       _tw32_flush(tp, (reg), (val), (us))
550 #define tr32(reg)                       tp->read32(tp, reg)
551
552 static void tg3_write_mem(struct tg3 *tp, u32 off, u32 val)
553 {
554         unsigned long flags;
555
556         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) &&
557             (off >= NIC_SRAM_STATS_BLK) && (off < NIC_SRAM_TX_BUFFER_DESC))
558                 return;
559
560         spin_lock_irqsave(&tp->indirect_lock, flags);
561         if (tg3_flag(tp, SRAM_USE_CONFIG)) {
562                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off);
563                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
564
565                 /* Always leave this as zero. */
566                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
567         } else {
568                 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, off);
569                 tw32_f(TG3PCI_MEM_WIN_DATA, val);
570
571                 /* Always leave this as zero. */
572                 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, 0);
573         }
574         spin_unlock_irqrestore(&tp->indirect_lock, flags);
575 }
576
577 static void tg3_read_mem(struct tg3 *tp, u32 off, u32 *val)
578 {
579         unsigned long flags;
580
581         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) &&
582             (off >= NIC_SRAM_STATS_BLK) && (off < NIC_SRAM_TX_BUFFER_DESC)) {
583                 *val = 0;
584                 return;
585         }
586
587         spin_lock_irqsave(&tp->indirect_lock, flags);
588         if (tg3_flag(tp, SRAM_USE_CONFIG)) {
589                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off);
590                 pci_read_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
591
592                 /* Always leave this as zero. */
593                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
594         } else {
595                 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, off);
596                 *val = tr32(TG3PCI_MEM_WIN_DATA);
597
598                 /* Always leave this as zero. */
599                 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, 0);
600         }
601         spin_unlock_irqrestore(&tp->indirect_lock, flags);
602 }
603
604 static void tg3_ape_lock_init(struct tg3 *tp)
605 {
606         int i;
607         u32 regbase;
608
609         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
610                 regbase = TG3_APE_LOCK_GRANT;
611         else
612                 regbase = TG3_APE_PER_LOCK_GRANT;
613
614         /* Make sure the driver hasn't any stale locks. */
615         for (i = 0; i < 8; i++)
616                 tg3_ape_write32(tp, regbase + 4 * i, APE_LOCK_GRANT_DRIVER);
617 }
618
619 static int tg3_ape_lock(struct tg3 *tp, int locknum)
620 {
621         int i, off;
622         int ret = 0;
623         u32 status, req, gnt;
624
625         if (!tg3_flag(tp, ENABLE_APE))
626                 return 0;
627
628         switch (locknum) {
629         case TG3_APE_LOCK_GRC:
630         case TG3_APE_LOCK_MEM:
631                 break;
632         default:
633                 return -EINVAL;
634         }
635
636         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761) {
637                 req = TG3_APE_LOCK_REQ;
638                 gnt = TG3_APE_LOCK_GRANT;
639         } else {
640                 req = TG3_APE_PER_LOCK_REQ;
641                 gnt = TG3_APE_PER_LOCK_GRANT;
642         }
643
644         off = 4 * locknum;
645
646         tg3_ape_write32(tp, req + off, APE_LOCK_REQ_DRIVER);
647
648         /* Wait for up to 1 millisecond to acquire lock. */
649         for (i = 0; i < 100; i++) {
650                 status = tg3_ape_read32(tp, gnt + off);
651                 if (status == APE_LOCK_GRANT_DRIVER)
652                         break;
653                 udelay(10);
654         }
655
656         if (status != APE_LOCK_GRANT_DRIVER) {
657                 /* Revoke the lock request. */
658                 tg3_ape_write32(tp, gnt + off,
659                                 APE_LOCK_GRANT_DRIVER);
660
661                 ret = -EBUSY;
662         }
663
664         return ret;
665 }
666
667 static void tg3_ape_unlock(struct tg3 *tp, int locknum)
668 {
669         u32 gnt;
670
671         if (!tg3_flag(tp, ENABLE_APE))
672                 return;
673
674         switch (locknum) {
675         case TG3_APE_LOCK_GRC:
676         case TG3_APE_LOCK_MEM:
677                 break;
678         default:
679                 return;
680         }
681
682         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
683                 gnt = TG3_APE_LOCK_GRANT;
684         else
685                 gnt = TG3_APE_PER_LOCK_GRANT;
686
687         tg3_ape_write32(tp, gnt + 4 * locknum, APE_LOCK_GRANT_DRIVER);
688 }
689
690 static void tg3_disable_ints(struct tg3 *tp)
691 {
692         int i;
693
694         tw32(TG3PCI_MISC_HOST_CTRL,
695              (tp->misc_host_ctrl | MISC_HOST_CTRL_MASK_PCI_INT));
696         for (i = 0; i < tp->irq_max; i++)
697                 tw32_mailbox_f(tp->napi[i].int_mbox, 0x00000001);
698 }
699
700 static void tg3_enable_ints(struct tg3 *tp)
701 {
702         int i;
703
704         tp->irq_sync = 0;
705         wmb();
706
707         tw32(TG3PCI_MISC_HOST_CTRL,
708              (tp->misc_host_ctrl & ~MISC_HOST_CTRL_MASK_PCI_INT));
709
710         tp->coal_now = tp->coalesce_mode | HOSTCC_MODE_ENABLE;
711         for (i = 0; i < tp->irq_cnt; i++) {
712                 struct tg3_napi *tnapi = &tp->napi[i];
713
714                 tw32_mailbox_f(tnapi->int_mbox, tnapi->last_tag << 24);
715                 if (tg3_flag(tp, 1SHOT_MSI))
716                         tw32_mailbox_f(tnapi->int_mbox, tnapi->last_tag << 24);
717
718                 tp->coal_now |= tnapi->coal_now;
719         }
720
721         /* Force an initial interrupt */
722         if (!tg3_flag(tp, TAGGED_STATUS) &&
723             (tp->napi[0].hw_status->status & SD_STATUS_UPDATED))
724                 tw32(GRC_LOCAL_CTRL, tp->grc_local_ctrl | GRC_LCLCTRL_SETINT);
725         else
726                 tw32(HOSTCC_MODE, tp->coal_now);
727
728         tp->coal_now &= ~(tp->napi[0].coal_now | tp->napi[1].coal_now);
729 }
730
731 static inline unsigned int tg3_has_work(struct tg3_napi *tnapi)
732 {
733         struct tg3 *tp = tnapi->tp;
734         struct tg3_hw_status *sblk = tnapi->hw_status;
735         unsigned int work_exists = 0;
736
737         /* check for phy events */
738         if (!(tg3_flag(tp, USE_LINKCHG_REG) || tg3_flag(tp, POLL_SERDES))) {
739                 if (sblk->status & SD_STATUS_LINK_CHG)
740                         work_exists = 1;
741         }
742         /* check for RX/TX work to do */
743         if (sblk->idx[0].tx_consumer != tnapi->tx_cons ||
744             *(tnapi->rx_rcb_prod_idx) != tnapi->rx_rcb_ptr)
745                 work_exists = 1;
746
747         return work_exists;
748 }
749
750 /* tg3_int_reenable
751  *  similar to tg3_enable_ints, but it accurately determines whether there
752  *  is new work pending and can return without flushing the PIO write
753  *  which reenables interrupts
754  */
755 static void tg3_int_reenable(struct tg3_napi *tnapi)
756 {
757         struct tg3 *tp = tnapi->tp;
758
759         tw32_mailbox(tnapi->int_mbox, tnapi->last_tag << 24);
760         mmiowb();
761
762         /* When doing tagged status, this work check is unnecessary.
763          * The last_tag we write above tells the chip which piece of
764          * work we've completed.
765          */
766         if (!tg3_flag(tp, TAGGED_STATUS) && tg3_has_work(tnapi))
767                 tw32(HOSTCC_MODE, tp->coalesce_mode |
768                      HOSTCC_MODE_ENABLE | tnapi->coal_now);
769 }
770
771 static void tg3_switch_clocks(struct tg3 *tp)
772 {
773         u32 clock_ctrl;
774         u32 orig_clock_ctrl;
775
776         if (tg3_flag(tp, CPMU_PRESENT) || tg3_flag(tp, 5780_CLASS))
777                 return;
778
779         clock_ctrl = tr32(TG3PCI_CLOCK_CTRL);
780
781         orig_clock_ctrl = clock_ctrl;
782         clock_ctrl &= (CLOCK_CTRL_FORCE_CLKRUN |
783                        CLOCK_CTRL_CLKRUN_OENABLE |
784                        0x1f);
785         tp->pci_clock_ctrl = clock_ctrl;
786
787         if (tg3_flag(tp, 5705_PLUS)) {
788                 if (orig_clock_ctrl & CLOCK_CTRL_625_CORE) {
789                         tw32_wait_f(TG3PCI_CLOCK_CTRL,
790                                     clock_ctrl | CLOCK_CTRL_625_CORE, 40);
791                 }
792         } else if ((orig_clock_ctrl & CLOCK_CTRL_44MHZ_CORE) != 0) {
793                 tw32_wait_f(TG3PCI_CLOCK_CTRL,
794                             clock_ctrl |
795                             (CLOCK_CTRL_44MHZ_CORE | CLOCK_CTRL_ALTCLK),
796                             40);
797                 tw32_wait_f(TG3PCI_CLOCK_CTRL,
798                             clock_ctrl | (CLOCK_CTRL_ALTCLK),
799                             40);
800         }
801         tw32_wait_f(TG3PCI_CLOCK_CTRL, clock_ctrl, 40);
802 }
803
804 #define PHY_BUSY_LOOPS  5000
805
806 static int tg3_readphy(struct tg3 *tp, int reg, u32 *val)
807 {
808         u32 frame_val;
809         unsigned int loops;
810         int ret;
811
812         if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
813                 tw32_f(MAC_MI_MODE,
814                      (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
815                 udelay(80);
816         }
817
818         *val = 0x0;
819
820         frame_val  = ((tp->phy_addr << MI_COM_PHY_ADDR_SHIFT) &
821                       MI_COM_PHY_ADDR_MASK);
822         frame_val |= ((reg << MI_COM_REG_ADDR_SHIFT) &
823                       MI_COM_REG_ADDR_MASK);
824         frame_val |= (MI_COM_CMD_READ | MI_COM_START);
825
826         tw32_f(MAC_MI_COM, frame_val);
827
828         loops = PHY_BUSY_LOOPS;
829         while (loops != 0) {
830                 udelay(10);
831                 frame_val = tr32(MAC_MI_COM);
832
833                 if ((frame_val & MI_COM_BUSY) == 0) {
834                         udelay(5);
835                         frame_val = tr32(MAC_MI_COM);
836                         break;
837                 }
838                 loops -= 1;
839         }
840
841         ret = -EBUSY;
842         if (loops != 0) {
843                 *val = frame_val & MI_COM_DATA_MASK;
844                 ret = 0;
845         }
846
847         if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
848                 tw32_f(MAC_MI_MODE, tp->mi_mode);
849                 udelay(80);
850         }
851
852         return ret;
853 }
854
855 static int tg3_writephy(struct tg3 *tp, int reg, u32 val)
856 {
857         u32 frame_val;
858         unsigned int loops;
859         int ret;
860
861         if ((tp->phy_flags & TG3_PHYFLG_IS_FET) &&
862             (reg == MII_TG3_CTRL || reg == MII_TG3_AUX_CTRL))
863                 return 0;
864
865         if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
866                 tw32_f(MAC_MI_MODE,
867                      (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
868                 udelay(80);
869         }
870
871         frame_val  = ((tp->phy_addr << MI_COM_PHY_ADDR_SHIFT) &
872                       MI_COM_PHY_ADDR_MASK);
873         frame_val |= ((reg << MI_COM_REG_ADDR_SHIFT) &
874                       MI_COM_REG_ADDR_MASK);
875         frame_val |= (val & MI_COM_DATA_MASK);
876         frame_val |= (MI_COM_CMD_WRITE | MI_COM_START);
877
878         tw32_f(MAC_MI_COM, frame_val);
879
880         loops = PHY_BUSY_LOOPS;
881         while (loops != 0) {
882                 udelay(10);
883                 frame_val = tr32(MAC_MI_COM);
884                 if ((frame_val & MI_COM_BUSY) == 0) {
885                         udelay(5);
886                         frame_val = tr32(MAC_MI_COM);
887                         break;
888                 }
889                 loops -= 1;
890         }
891
892         ret = -EBUSY;
893         if (loops != 0)
894                 ret = 0;
895
896         if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
897                 tw32_f(MAC_MI_MODE, tp->mi_mode);
898                 udelay(80);
899         }
900
901         return ret;
902 }
903
904 static int tg3_phy_cl45_write(struct tg3 *tp, u32 devad, u32 addr, u32 val)
905 {
906         int err;
907
908         err = tg3_writephy(tp, MII_TG3_MMD_CTRL, devad);
909         if (err)
910                 goto done;
911
912         err = tg3_writephy(tp, MII_TG3_MMD_ADDRESS, addr);
913         if (err)
914                 goto done;
915
916         err = tg3_writephy(tp, MII_TG3_MMD_CTRL,
917                            MII_TG3_MMD_CTRL_DATA_NOINC | devad);
918         if (err)
919                 goto done;
920
921         err = tg3_writephy(tp, MII_TG3_MMD_ADDRESS, val);
922
923 done:
924         return err;
925 }
926
927 static int tg3_phy_cl45_read(struct tg3 *tp, u32 devad, u32 addr, u32 *val)
928 {
929         int err;
930
931         err = tg3_writephy(tp, MII_TG3_MMD_CTRL, devad);
932         if (err)
933                 goto done;
934
935         err = tg3_writephy(tp, MII_TG3_MMD_ADDRESS, addr);
936         if (err)
937                 goto done;
938
939         err = tg3_writephy(tp, MII_TG3_MMD_CTRL,
940                            MII_TG3_MMD_CTRL_DATA_NOINC | devad);
941         if (err)
942                 goto done;
943
944         err = tg3_readphy(tp, MII_TG3_MMD_ADDRESS, val);
945
946 done:
947         return err;
948 }
949
950 static int tg3_phydsp_read(struct tg3 *tp, u32 reg, u32 *val)
951 {
952         int err;
953
954         err = tg3_writephy(tp, MII_TG3_DSP_ADDRESS, reg);
955         if (!err)
956                 err = tg3_readphy(tp, MII_TG3_DSP_RW_PORT, val);
957
958         return err;
959 }
960
961 static int tg3_phydsp_write(struct tg3 *tp, u32 reg, u32 val)
962 {
963         int err;
964
965         err = tg3_writephy(tp, MII_TG3_DSP_ADDRESS, reg);
966         if (!err)
967                 err = tg3_writephy(tp, MII_TG3_DSP_RW_PORT, val);
968
969         return err;
970 }
971
972 static int tg3_phy_auxctl_read(struct tg3 *tp, int reg, u32 *val)
973 {
974         int err;
975
976         err = tg3_writephy(tp, MII_TG3_AUX_CTRL,
977                            (reg << MII_TG3_AUXCTL_MISC_RDSEL_SHIFT) |
978                            MII_TG3_AUXCTL_SHDWSEL_MISC);
979         if (!err)
980                 err = tg3_readphy(tp, MII_TG3_AUX_CTRL, val);
981
982         return err;
983 }
984
985 static int tg3_phy_auxctl_write(struct tg3 *tp, int reg, u32 set)
986 {
987         if (reg == MII_TG3_AUXCTL_SHDWSEL_MISC)
988                 set |= MII_TG3_AUXCTL_MISC_WREN;
989
990         return tg3_writephy(tp, MII_TG3_AUX_CTRL, set | reg);
991 }
992
993 #define TG3_PHY_AUXCTL_SMDSP_ENABLE(tp) \
994         tg3_phy_auxctl_write((tp), MII_TG3_AUXCTL_SHDWSEL_AUXCTL, \
995                              MII_TG3_AUXCTL_ACTL_SMDSP_ENA | \
996                              MII_TG3_AUXCTL_ACTL_TX_6DB)
997
998 #define TG3_PHY_AUXCTL_SMDSP_DISABLE(tp) \
999         tg3_phy_auxctl_write((tp), MII_TG3_AUXCTL_SHDWSEL_AUXCTL, \
1000                              MII_TG3_AUXCTL_ACTL_TX_6DB);
1001
1002 static int tg3_bmcr_reset(struct tg3 *tp)
1003 {
1004         u32 phy_control;
1005         int limit, err;
1006
1007         /* OK, reset it, and poll the BMCR_RESET bit until it
1008          * clears or we time out.
1009          */
1010         phy_control = BMCR_RESET;
1011         err = tg3_writephy(tp, MII_BMCR, phy_control);
1012         if (err != 0)
1013                 return -EBUSY;
1014
1015         limit = 5000;
1016         while (limit--) {
1017                 err = tg3_readphy(tp, MII_BMCR, &phy_control);
1018                 if (err != 0)
1019                         return -EBUSY;
1020
1021                 if ((phy_control & BMCR_RESET) == 0) {
1022                         udelay(40);
1023                         break;
1024                 }
1025                 udelay(10);
1026         }
1027         if (limit < 0)
1028                 return -EBUSY;
1029
1030         return 0;
1031 }
1032
1033 static int tg3_mdio_read(struct mii_bus *bp, int mii_id, int reg)
1034 {
1035         struct tg3 *tp = bp->priv;
1036         u32 val;
1037
1038         spin_lock_bh(&tp->lock);
1039
1040         if (tg3_readphy(tp, reg, &val))
1041                 val = -EIO;
1042
1043         spin_unlock_bh(&tp->lock);
1044
1045         return val;
1046 }
1047
1048 static int tg3_mdio_write(struct mii_bus *bp, int mii_id, int reg, u16 val)
1049 {
1050         struct tg3 *tp = bp->priv;
1051         u32 ret = 0;
1052
1053         spin_lock_bh(&tp->lock);
1054
1055         if (tg3_writephy(tp, reg, val))
1056                 ret = -EIO;
1057
1058         spin_unlock_bh(&tp->lock);
1059
1060         return ret;
1061 }
1062
1063 static int tg3_mdio_reset(struct mii_bus *bp)
1064 {
1065         return 0;
1066 }
1067
1068 static void tg3_mdio_config_5785(struct tg3 *tp)
1069 {
1070         u32 val;
1071         struct phy_device *phydev;
1072
1073         phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
1074         switch (phydev->drv->phy_id & phydev->drv->phy_id_mask) {
1075         case PHY_ID_BCM50610:
1076         case PHY_ID_BCM50610M:
1077                 val = MAC_PHYCFG2_50610_LED_MODES;
1078                 break;
1079         case PHY_ID_BCMAC131:
1080                 val = MAC_PHYCFG2_AC131_LED_MODES;
1081                 break;
1082         case PHY_ID_RTL8211C:
1083                 val = MAC_PHYCFG2_RTL8211C_LED_MODES;
1084                 break;
1085         case PHY_ID_RTL8201E:
1086                 val = MAC_PHYCFG2_RTL8201E_LED_MODES;
1087                 break;
1088         default:
1089                 return;
1090         }
1091
1092         if (phydev->interface != PHY_INTERFACE_MODE_RGMII) {
1093                 tw32(MAC_PHYCFG2, val);
1094
1095                 val = tr32(MAC_PHYCFG1);
1096                 val &= ~(MAC_PHYCFG1_RGMII_INT |
1097                          MAC_PHYCFG1_RXCLK_TO_MASK | MAC_PHYCFG1_TXCLK_TO_MASK);
1098                 val |= MAC_PHYCFG1_RXCLK_TIMEOUT | MAC_PHYCFG1_TXCLK_TIMEOUT;
1099                 tw32(MAC_PHYCFG1, val);
1100
1101                 return;
1102         }
1103
1104         if (!tg3_flag(tp, RGMII_INBAND_DISABLE))
1105                 val |= MAC_PHYCFG2_EMODE_MASK_MASK |
1106                        MAC_PHYCFG2_FMODE_MASK_MASK |
1107                        MAC_PHYCFG2_GMODE_MASK_MASK |
1108                        MAC_PHYCFG2_ACT_MASK_MASK   |
1109                        MAC_PHYCFG2_QUAL_MASK_MASK |
1110                        MAC_PHYCFG2_INBAND_ENABLE;
1111
1112         tw32(MAC_PHYCFG2, val);
1113
1114         val = tr32(MAC_PHYCFG1);
1115         val &= ~(MAC_PHYCFG1_RXCLK_TO_MASK | MAC_PHYCFG1_TXCLK_TO_MASK |
1116                  MAC_PHYCFG1_RGMII_EXT_RX_DEC | MAC_PHYCFG1_RGMII_SND_STAT_EN);
1117         if (!tg3_flag(tp, RGMII_INBAND_DISABLE)) {
1118                 if (tg3_flag(tp, RGMII_EXT_IBND_RX_EN))
1119                         val |= MAC_PHYCFG1_RGMII_EXT_RX_DEC;
1120                 if (tg3_flag(tp, RGMII_EXT_IBND_TX_EN))
1121                         val |= MAC_PHYCFG1_RGMII_SND_STAT_EN;
1122         }
1123         val |= MAC_PHYCFG1_RXCLK_TIMEOUT | MAC_PHYCFG1_TXCLK_TIMEOUT |
1124                MAC_PHYCFG1_RGMII_INT | MAC_PHYCFG1_TXC_DRV;
1125         tw32(MAC_PHYCFG1, val);
1126
1127         val = tr32(MAC_EXT_RGMII_MODE);
1128         val &= ~(MAC_RGMII_MODE_RX_INT_B |
1129                  MAC_RGMII_MODE_RX_QUALITY |
1130                  MAC_RGMII_MODE_RX_ACTIVITY |
1131                  MAC_RGMII_MODE_RX_ENG_DET |
1132                  MAC_RGMII_MODE_TX_ENABLE |
1133                  MAC_RGMII_MODE_TX_LOWPWR |
1134                  MAC_RGMII_MODE_TX_RESET);
1135         if (!tg3_flag(tp, RGMII_INBAND_DISABLE)) {
1136                 if (tg3_flag(tp, RGMII_EXT_IBND_RX_EN))
1137                         val |= MAC_RGMII_MODE_RX_INT_B |
1138                                MAC_RGMII_MODE_RX_QUALITY |
1139                                MAC_RGMII_MODE_RX_ACTIVITY |
1140                                MAC_RGMII_MODE_RX_ENG_DET;
1141                 if (tg3_flag(tp, RGMII_EXT_IBND_TX_EN))
1142                         val |= MAC_RGMII_MODE_TX_ENABLE |
1143                                MAC_RGMII_MODE_TX_LOWPWR |
1144                                MAC_RGMII_MODE_TX_RESET;
1145         }
1146         tw32(MAC_EXT_RGMII_MODE, val);
1147 }
1148
1149 static void tg3_mdio_start(struct tg3 *tp)
1150 {
1151         tp->mi_mode &= ~MAC_MI_MODE_AUTO_POLL;
1152         tw32_f(MAC_MI_MODE, tp->mi_mode);
1153         udelay(80);
1154
1155         if (tg3_flag(tp, MDIOBUS_INITED) &&
1156             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
1157                 tg3_mdio_config_5785(tp);
1158 }
1159
1160 static int tg3_mdio_init(struct tg3 *tp)
1161 {
1162         int i;
1163         u32 reg;
1164         struct phy_device *phydev;
1165
1166         if (tg3_flag(tp, 5717_PLUS)) {
1167                 u32 is_serdes;
1168
1169                 tp->phy_addr = PCI_FUNC(tp->pdev->devfn) + 1;
1170
1171                 if (tp->pci_chip_rev_id != CHIPREV_ID_5717_A0)
1172                         is_serdes = tr32(SG_DIG_STATUS) & SG_DIG_IS_SERDES;
1173                 else
1174                         is_serdes = tr32(TG3_CPMU_PHY_STRAP) &
1175                                     TG3_CPMU_PHY_STRAP_IS_SERDES;
1176                 if (is_serdes)
1177                         tp->phy_addr += 7;
1178         } else
1179                 tp->phy_addr = TG3_PHY_MII_ADDR;
1180
1181         tg3_mdio_start(tp);
1182
1183         if (!tg3_flag(tp, USE_PHYLIB) || tg3_flag(tp, MDIOBUS_INITED))
1184                 return 0;
1185
1186         tp->mdio_bus = mdiobus_alloc();
1187         if (tp->mdio_bus == NULL)
1188                 return -ENOMEM;
1189
1190         tp->mdio_bus->name     = "tg3 mdio bus";
1191         snprintf(tp->mdio_bus->id, MII_BUS_ID_SIZE, "%x",
1192                  (tp->pdev->bus->number << 8) | tp->pdev->devfn);
1193         tp->mdio_bus->priv     = tp;
1194         tp->mdio_bus->parent   = &tp->pdev->dev;
1195         tp->mdio_bus->read     = &tg3_mdio_read;
1196         tp->mdio_bus->write    = &tg3_mdio_write;
1197         tp->mdio_bus->reset    = &tg3_mdio_reset;
1198         tp->mdio_bus->phy_mask = ~(1 << TG3_PHY_MII_ADDR);
1199         tp->mdio_bus->irq      = &tp->mdio_irq[0];
1200
1201         for (i = 0; i < PHY_MAX_ADDR; i++)
1202                 tp->mdio_bus->irq[i] = PHY_POLL;
1203
1204         /* The bus registration will look for all the PHYs on the mdio bus.
1205          * Unfortunately, it does not ensure the PHY is powered up before
1206          * accessing the PHY ID registers.  A chip reset is the
1207          * quickest way to bring the device back to an operational state..
1208          */
1209         if (tg3_readphy(tp, MII_BMCR, &reg) || (reg & BMCR_PDOWN))
1210                 tg3_bmcr_reset(tp);
1211
1212         i = mdiobus_register(tp->mdio_bus);
1213         if (i) {
1214                 dev_warn(&tp->pdev->dev, "mdiobus_reg failed (0x%x)\n", i);
1215                 mdiobus_free(tp->mdio_bus);
1216                 return i;
1217         }
1218
1219         phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
1220
1221         if (!phydev || !phydev->drv) {
1222                 dev_warn(&tp->pdev->dev, "No PHY devices\n");
1223                 mdiobus_unregister(tp->mdio_bus);
1224                 mdiobus_free(tp->mdio_bus);
1225                 return -ENODEV;
1226         }
1227
1228         switch (phydev->drv->phy_id & phydev->drv->phy_id_mask) {
1229         case PHY_ID_BCM57780:
1230                 phydev->interface = PHY_INTERFACE_MODE_GMII;
1231                 phydev->dev_flags |= PHY_BRCM_AUTO_PWRDWN_ENABLE;
1232                 break;
1233         case PHY_ID_BCM50610:
1234         case PHY_ID_BCM50610M:
1235                 phydev->dev_flags |= PHY_BRCM_CLEAR_RGMII_MODE |
1236                                      PHY_BRCM_RX_REFCLK_UNUSED |
1237                                      PHY_BRCM_DIS_TXCRXC_NOENRGY |
1238                                      PHY_BRCM_AUTO_PWRDWN_ENABLE;
1239                 if (tg3_flag(tp, RGMII_INBAND_DISABLE))
1240                         phydev->dev_flags |= PHY_BRCM_STD_IBND_DISABLE;
1241                 if (tg3_flag(tp, RGMII_EXT_IBND_RX_EN))
1242                         phydev->dev_flags |= PHY_BRCM_EXT_IBND_RX_ENABLE;
1243                 if (tg3_flag(tp, RGMII_EXT_IBND_TX_EN))
1244                         phydev->dev_flags |= PHY_BRCM_EXT_IBND_TX_ENABLE;
1245                 /* fallthru */
1246         case PHY_ID_RTL8211C:
1247                 phydev->interface = PHY_INTERFACE_MODE_RGMII;
1248                 break;
1249         case PHY_ID_RTL8201E:
1250         case PHY_ID_BCMAC131:
1251                 phydev->interface = PHY_INTERFACE_MODE_MII;
1252                 phydev->dev_flags |= PHY_BRCM_AUTO_PWRDWN_ENABLE;
1253                 tp->phy_flags |= TG3_PHYFLG_IS_FET;
1254                 break;
1255         }
1256
1257         tg3_flag_set(tp, MDIOBUS_INITED);
1258
1259         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
1260                 tg3_mdio_config_5785(tp);
1261
1262         return 0;
1263 }
1264
1265 static void tg3_mdio_fini(struct tg3 *tp)
1266 {
1267         if (tg3_flag(tp, MDIOBUS_INITED)) {
1268                 tg3_flag_clear(tp, MDIOBUS_INITED);
1269                 mdiobus_unregister(tp->mdio_bus);
1270                 mdiobus_free(tp->mdio_bus);
1271         }
1272 }
1273
1274 /* tp->lock is held. */
1275 static inline void tg3_generate_fw_event(struct tg3 *tp)
1276 {
1277         u32 val;
1278
1279         val = tr32(GRC_RX_CPU_EVENT);
1280         val |= GRC_RX_CPU_DRIVER_EVENT;
1281         tw32_f(GRC_RX_CPU_EVENT, val);
1282
1283         tp->last_event_jiffies = jiffies;
1284 }
1285
1286 #define TG3_FW_EVENT_TIMEOUT_USEC 2500
1287
1288 /* tp->lock is held. */
1289 static void tg3_wait_for_event_ack(struct tg3 *tp)
1290 {
1291         int i;
1292         unsigned int delay_cnt;
1293         long time_remain;
1294
1295         /* If enough time has passed, no wait is necessary. */
1296         time_remain = (long)(tp->last_event_jiffies + 1 +
1297                       usecs_to_jiffies(TG3_FW_EVENT_TIMEOUT_USEC)) -
1298                       (long)jiffies;
1299         if (time_remain < 0)
1300                 return;
1301
1302         /* Check if we can shorten the wait time. */
1303         delay_cnt = jiffies_to_usecs(time_remain);
1304         if (delay_cnt > TG3_FW_EVENT_TIMEOUT_USEC)
1305                 delay_cnt = TG3_FW_EVENT_TIMEOUT_USEC;
1306         delay_cnt = (delay_cnt >> 3) + 1;
1307
1308         for (i = 0; i < delay_cnt; i++) {
1309                 if (!(tr32(GRC_RX_CPU_EVENT) & GRC_RX_CPU_DRIVER_EVENT))
1310                         break;
1311                 udelay(8);
1312         }
1313 }
1314
1315 /* tp->lock is held. */
1316 static void tg3_ump_link_report(struct tg3 *tp)
1317 {
1318         u32 reg;
1319         u32 val;
1320
1321         if (!tg3_flag(tp, 5780_CLASS) || !tg3_flag(tp, ENABLE_ASF))
1322                 return;
1323
1324         tg3_wait_for_event_ack(tp);
1325
1326         tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX, FWCMD_NICDRV_LINK_UPDATE);
1327
1328         tg3_write_mem(tp, NIC_SRAM_FW_CMD_LEN_MBOX, 14);
1329
1330         val = 0;
1331         if (!tg3_readphy(tp, MII_BMCR, &reg))
1332                 val = reg << 16;
1333         if (!tg3_readphy(tp, MII_BMSR, &reg))
1334                 val |= (reg & 0xffff);
1335         tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX, val);
1336
1337         val = 0;
1338         if (!tg3_readphy(tp, MII_ADVERTISE, &reg))
1339                 val = reg << 16;
1340         if (!tg3_readphy(tp, MII_LPA, &reg))
1341                 val |= (reg & 0xffff);
1342         tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 4, val);
1343
1344         val = 0;
1345         if (!(tp->phy_flags & TG3_PHYFLG_MII_SERDES)) {
1346                 if (!tg3_readphy(tp, MII_CTRL1000, &reg))
1347                         val = reg << 16;
1348                 if (!tg3_readphy(tp, MII_STAT1000, &reg))
1349                         val |= (reg & 0xffff);
1350         }
1351         tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 8, val);
1352
1353         if (!tg3_readphy(tp, MII_PHYADDR, &reg))
1354                 val = reg << 16;
1355         else
1356                 val = 0;
1357         tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 12, val);
1358
1359         tg3_generate_fw_event(tp);
1360 }
1361
1362 static void tg3_link_report(struct tg3 *tp)
1363 {
1364         if (!netif_carrier_ok(tp->dev)) {
1365                 netif_info(tp, link, tp->dev, "Link is down\n");
1366                 tg3_ump_link_report(tp);
1367         } else if (netif_msg_link(tp)) {
1368                 netdev_info(tp->dev, "Link is up at %d Mbps, %s duplex\n",
1369                             (tp->link_config.active_speed == SPEED_1000 ?
1370                              1000 :
1371                              (tp->link_config.active_speed == SPEED_100 ?
1372                               100 : 10)),
1373                             (tp->link_config.active_duplex == DUPLEX_FULL ?
1374                              "full" : "half"));
1375
1376                 netdev_info(tp->dev, "Flow control is %s for TX and %s for RX\n",
1377                             (tp->link_config.active_flowctrl & FLOW_CTRL_TX) ?
1378                             "on" : "off",
1379                             (tp->link_config.active_flowctrl & FLOW_CTRL_RX) ?
1380                             "on" : "off");
1381
1382                 if (tp->phy_flags & TG3_PHYFLG_EEE_CAP)
1383                         netdev_info(tp->dev, "EEE is %s\n",
1384                                     tp->setlpicnt ? "enabled" : "disabled");
1385
1386                 tg3_ump_link_report(tp);
1387         }
1388 }
1389
1390 static u16 tg3_advert_flowctrl_1000T(u8 flow_ctrl)
1391 {
1392         u16 miireg;
1393
1394         if ((flow_ctrl & FLOW_CTRL_TX) && (flow_ctrl & FLOW_CTRL_RX))
1395                 miireg = ADVERTISE_PAUSE_CAP;
1396         else if (flow_ctrl & FLOW_CTRL_TX)
1397                 miireg = ADVERTISE_PAUSE_ASYM;
1398         else if (flow_ctrl & FLOW_CTRL_RX)
1399                 miireg = ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM;
1400         else
1401                 miireg = 0;
1402
1403         return miireg;
1404 }
1405
1406 static u16 tg3_advert_flowctrl_1000X(u8 flow_ctrl)
1407 {
1408         u16 miireg;
1409
1410         if ((flow_ctrl & FLOW_CTRL_TX) && (flow_ctrl & FLOW_CTRL_RX))
1411                 miireg = ADVERTISE_1000XPAUSE;
1412         else if (flow_ctrl & FLOW_CTRL_TX)
1413                 miireg = ADVERTISE_1000XPSE_ASYM;
1414         else if (flow_ctrl & FLOW_CTRL_RX)
1415                 miireg = ADVERTISE_1000XPAUSE | ADVERTISE_1000XPSE_ASYM;
1416         else
1417                 miireg = 0;
1418
1419         return miireg;
1420 }
1421
1422 static u8 tg3_resolve_flowctrl_1000X(u16 lcladv, u16 rmtadv)
1423 {
1424         u8 cap = 0;
1425
1426         if (lcladv & ADVERTISE_1000XPAUSE) {
1427                 if (lcladv & ADVERTISE_1000XPSE_ASYM) {
1428                         if (rmtadv & LPA_1000XPAUSE)
1429                                 cap = FLOW_CTRL_TX | FLOW_CTRL_RX;
1430                         else if (rmtadv & LPA_1000XPAUSE_ASYM)
1431                                 cap = FLOW_CTRL_RX;
1432                 } else {
1433                         if (rmtadv & LPA_1000XPAUSE)
1434                                 cap = FLOW_CTRL_TX | FLOW_CTRL_RX;
1435                 }
1436         } else if (lcladv & ADVERTISE_1000XPSE_ASYM) {
1437                 if ((rmtadv & LPA_1000XPAUSE) && (rmtadv & LPA_1000XPAUSE_ASYM))
1438                         cap = FLOW_CTRL_TX;
1439         }
1440
1441         return cap;
1442 }
1443
1444 static void tg3_setup_flow_control(struct tg3 *tp, u32 lcladv, u32 rmtadv)
1445 {
1446         u8 autoneg;
1447         u8 flowctrl = 0;
1448         u32 old_rx_mode = tp->rx_mode;
1449         u32 old_tx_mode = tp->tx_mode;
1450
1451         if (tg3_flag(tp, USE_PHYLIB))
1452                 autoneg = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]->autoneg;
1453         else
1454                 autoneg = tp->link_config.autoneg;
1455
1456         if (autoneg == AUTONEG_ENABLE && tg3_flag(tp, PAUSE_AUTONEG)) {
1457                 if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)
1458                         flowctrl = tg3_resolve_flowctrl_1000X(lcladv, rmtadv);
1459                 else
1460                         flowctrl = mii_resolve_flowctrl_fdx(lcladv, rmtadv);
1461         } else
1462                 flowctrl = tp->link_config.flowctrl;
1463
1464         tp->link_config.active_flowctrl = flowctrl;
1465
1466         if (flowctrl & FLOW_CTRL_RX)
1467                 tp->rx_mode |= RX_MODE_FLOW_CTRL_ENABLE;
1468         else
1469                 tp->rx_mode &= ~RX_MODE_FLOW_CTRL_ENABLE;
1470
1471         if (old_rx_mode != tp->rx_mode)
1472                 tw32_f(MAC_RX_MODE, tp->rx_mode);
1473
1474         if (flowctrl & FLOW_CTRL_TX)
1475                 tp->tx_mode |= TX_MODE_FLOW_CTRL_ENABLE;
1476         else
1477                 tp->tx_mode &= ~TX_MODE_FLOW_CTRL_ENABLE;
1478
1479         if (old_tx_mode != tp->tx_mode)
1480                 tw32_f(MAC_TX_MODE, tp->tx_mode);
1481 }
1482
1483 static void tg3_adjust_link(struct net_device *dev)
1484 {
1485         u8 oldflowctrl, linkmesg = 0;
1486         u32 mac_mode, lcl_adv, rmt_adv;
1487         struct tg3 *tp = netdev_priv(dev);
1488         struct phy_device *phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
1489
1490         spin_lock_bh(&tp->lock);
1491
1492         mac_mode = tp->mac_mode & ~(MAC_MODE_PORT_MODE_MASK |
1493                                     MAC_MODE_HALF_DUPLEX);
1494
1495         oldflowctrl = tp->link_config.active_flowctrl;
1496
1497         if (phydev->link) {
1498                 lcl_adv = 0;
1499                 rmt_adv = 0;
1500
1501                 if (phydev->speed == SPEED_100 || phydev->speed == SPEED_10)
1502                         mac_mode |= MAC_MODE_PORT_MODE_MII;
1503                 else if (phydev->speed == SPEED_1000 ||
1504                          GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785)
1505                         mac_mode |= MAC_MODE_PORT_MODE_GMII;
1506                 else
1507                         mac_mode |= MAC_MODE_PORT_MODE_MII;
1508
1509                 if (phydev->duplex == DUPLEX_HALF)
1510                         mac_mode |= MAC_MODE_HALF_DUPLEX;
1511                 else {
1512                         lcl_adv = tg3_advert_flowctrl_1000T(
1513                                   tp->link_config.flowctrl);
1514
1515                         if (phydev->pause)
1516                                 rmt_adv = LPA_PAUSE_CAP;
1517                         if (phydev->asym_pause)
1518                                 rmt_adv |= LPA_PAUSE_ASYM;
1519                 }
1520
1521                 tg3_setup_flow_control(tp, lcl_adv, rmt_adv);
1522         } else
1523                 mac_mode |= MAC_MODE_PORT_MODE_GMII;
1524
1525         if (mac_mode != tp->mac_mode) {
1526                 tp->mac_mode = mac_mode;
1527                 tw32_f(MAC_MODE, tp->mac_mode);
1528                 udelay(40);
1529         }
1530
1531         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785) {
1532                 if (phydev->speed == SPEED_10)
1533                         tw32(MAC_MI_STAT,
1534                              MAC_MI_STAT_10MBPS_MODE |
1535                              MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
1536                 else
1537                         tw32(MAC_MI_STAT, MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
1538         }
1539
1540         if (phydev->speed == SPEED_1000 && phydev->duplex == DUPLEX_HALF)
1541                 tw32(MAC_TX_LENGTHS,
1542                      ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
1543                       (6 << TX_LENGTHS_IPG_SHIFT) |
1544                       (0xff << TX_LENGTHS_SLOT_TIME_SHIFT)));
1545         else
1546                 tw32(MAC_TX_LENGTHS,
1547                      ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
1548                       (6 << TX_LENGTHS_IPG_SHIFT) |
1549                       (32 << TX_LENGTHS_SLOT_TIME_SHIFT)));
1550
1551         if ((phydev->link && tp->link_config.active_speed == SPEED_INVALID) ||
1552             (!phydev->link && tp->link_config.active_speed != SPEED_INVALID) ||
1553             phydev->speed != tp->link_config.active_speed ||
1554             phydev->duplex != tp->link_config.active_duplex ||
1555             oldflowctrl != tp->link_config.active_flowctrl)
1556                 linkmesg = 1;
1557
1558         tp->link_config.active_speed = phydev->speed;
1559         tp->link_config.active_duplex = phydev->duplex;
1560
1561         spin_unlock_bh(&tp->lock);
1562
1563         if (linkmesg)
1564                 tg3_link_report(tp);
1565 }
1566
1567 static int tg3_phy_init(struct tg3 *tp)
1568 {
1569         struct phy_device *phydev;
1570
1571         if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED)
1572                 return 0;
1573
1574         /* Bring the PHY back to a known state. */
1575         tg3_bmcr_reset(tp);
1576
1577         phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
1578
1579         /* Attach the MAC to the PHY. */
1580         phydev = phy_connect(tp->dev, dev_name(&phydev->dev), tg3_adjust_link,
1581                              phydev->dev_flags, phydev->interface);
1582         if (IS_ERR(phydev)) {
1583                 dev_err(&tp->pdev->dev, "Could not attach to PHY\n");
1584                 return PTR_ERR(phydev);
1585         }
1586
1587         /* Mask with MAC supported features. */
1588         switch (phydev->interface) {
1589         case PHY_INTERFACE_MODE_GMII:
1590         case PHY_INTERFACE_MODE_RGMII:
1591                 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
1592                         phydev->supported &= (PHY_GBIT_FEATURES |
1593                                               SUPPORTED_Pause |
1594                                               SUPPORTED_Asym_Pause);
1595                         break;
1596                 }
1597                 /* fallthru */
1598         case PHY_INTERFACE_MODE_MII:
1599                 phydev->supported &= (PHY_BASIC_FEATURES |
1600                                       SUPPORTED_Pause |
1601                                       SUPPORTED_Asym_Pause);
1602                 break;
1603         default:
1604                 phy_disconnect(tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]);
1605                 return -EINVAL;
1606         }
1607
1608         tp->phy_flags |= TG3_PHYFLG_IS_CONNECTED;
1609
1610         phydev->advertising = phydev->supported;
1611
1612         return 0;
1613 }
1614
1615 static void tg3_phy_start(struct tg3 *tp)
1616 {
1617         struct phy_device *phydev;
1618
1619         if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
1620                 return;
1621
1622         phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
1623
1624         if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) {
1625                 tp->phy_flags &= ~TG3_PHYFLG_IS_LOW_POWER;
1626                 phydev->speed = tp->link_config.orig_speed;
1627                 phydev->duplex = tp->link_config.orig_duplex;
1628                 phydev->autoneg = tp->link_config.orig_autoneg;
1629                 phydev->advertising = tp->link_config.orig_advertising;
1630         }
1631
1632         phy_start(phydev);
1633
1634         phy_start_aneg(phydev);
1635 }
1636
1637 static void tg3_phy_stop(struct tg3 *tp)
1638 {
1639         if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
1640                 return;
1641
1642         phy_stop(tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]);
1643 }
1644
1645 static void tg3_phy_fini(struct tg3 *tp)
1646 {
1647         if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) {
1648                 phy_disconnect(tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]);
1649                 tp->phy_flags &= ~TG3_PHYFLG_IS_CONNECTED;
1650         }
1651 }
1652
1653 static void tg3_phy_fet_toggle_apd(struct tg3 *tp, bool enable)
1654 {
1655         u32 phytest;
1656
1657         if (!tg3_readphy(tp, MII_TG3_FET_TEST, &phytest)) {
1658                 u32 phy;
1659
1660                 tg3_writephy(tp, MII_TG3_FET_TEST,
1661                              phytest | MII_TG3_FET_SHADOW_EN);
1662                 if (!tg3_readphy(tp, MII_TG3_FET_SHDW_AUXSTAT2, &phy)) {
1663                         if (enable)
1664                                 phy |= MII_TG3_FET_SHDW_AUXSTAT2_APD;
1665                         else
1666                                 phy &= ~MII_TG3_FET_SHDW_AUXSTAT2_APD;
1667                         tg3_writephy(tp, MII_TG3_FET_SHDW_AUXSTAT2, phy);
1668                 }
1669                 tg3_writephy(tp, MII_TG3_FET_TEST, phytest);
1670         }
1671 }
1672
1673 static void tg3_phy_toggle_apd(struct tg3 *tp, bool enable)
1674 {
1675         u32 reg;
1676
1677         if (!tg3_flag(tp, 5705_PLUS) ||
1678             (tg3_flag(tp, 5717_PLUS) &&
1679              (tp->phy_flags & TG3_PHYFLG_MII_SERDES)))
1680                 return;
1681
1682         if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
1683                 tg3_phy_fet_toggle_apd(tp, enable);
1684                 return;
1685         }
1686
1687         reg = MII_TG3_MISC_SHDW_WREN |
1688               MII_TG3_MISC_SHDW_SCR5_SEL |
1689               MII_TG3_MISC_SHDW_SCR5_LPED |
1690               MII_TG3_MISC_SHDW_SCR5_DLPTLM |
1691               MII_TG3_MISC_SHDW_SCR5_SDTL |
1692               MII_TG3_MISC_SHDW_SCR5_C125OE;
1693         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5784 || !enable)
1694                 reg |= MII_TG3_MISC_SHDW_SCR5_DLLAPD;
1695
1696         tg3_writephy(tp, MII_TG3_MISC_SHDW, reg);
1697
1698
1699         reg = MII_TG3_MISC_SHDW_WREN |
1700               MII_TG3_MISC_SHDW_APD_SEL |
1701               MII_TG3_MISC_SHDW_APD_WKTM_84MS;
1702         if (enable)
1703                 reg |= MII_TG3_MISC_SHDW_APD_ENABLE;
1704
1705         tg3_writephy(tp, MII_TG3_MISC_SHDW, reg);
1706 }
1707
1708 static void tg3_phy_toggle_automdix(struct tg3 *tp, int enable)
1709 {
1710         u32 phy;
1711
1712         if (!tg3_flag(tp, 5705_PLUS) ||
1713             (tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
1714                 return;
1715
1716         if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
1717                 u32 ephy;
1718
1719                 if (!tg3_readphy(tp, MII_TG3_FET_TEST, &ephy)) {
1720                         u32 reg = MII_TG3_FET_SHDW_MISCCTRL;
1721
1722                         tg3_writephy(tp, MII_TG3_FET_TEST,
1723                                      ephy | MII_TG3_FET_SHADOW_EN);
1724                         if (!tg3_readphy(tp, reg, &phy)) {
1725                                 if (enable)
1726                                         phy |= MII_TG3_FET_SHDW_MISCCTRL_MDIX;
1727                                 else
1728                                         phy &= ~MII_TG3_FET_SHDW_MISCCTRL_MDIX;
1729                                 tg3_writephy(tp, reg, phy);
1730                         }
1731                         tg3_writephy(tp, MII_TG3_FET_TEST, ephy);
1732                 }
1733         } else {
1734                 int ret;
1735
1736                 ret = tg3_phy_auxctl_read(tp,
1737                                           MII_TG3_AUXCTL_SHDWSEL_MISC, &phy);
1738                 if (!ret) {
1739                         if (enable)
1740                                 phy |= MII_TG3_AUXCTL_MISC_FORCE_AMDIX;
1741                         else
1742                                 phy &= ~MII_TG3_AUXCTL_MISC_FORCE_AMDIX;
1743                         tg3_phy_auxctl_write(tp,
1744                                              MII_TG3_AUXCTL_SHDWSEL_MISC, phy);
1745                 }
1746         }
1747 }
1748
1749 static void tg3_phy_set_wirespeed(struct tg3 *tp)
1750 {
1751         int ret;
1752         u32 val;
1753
1754         if (tp->phy_flags & TG3_PHYFLG_NO_ETH_WIRE_SPEED)
1755                 return;
1756
1757         ret = tg3_phy_auxctl_read(tp, MII_TG3_AUXCTL_SHDWSEL_MISC, &val);
1758         if (!ret)
1759                 tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_MISC,
1760                                      val | MII_TG3_AUXCTL_MISC_WIRESPD_EN);
1761 }
1762
1763 static void tg3_phy_apply_otp(struct tg3 *tp)
1764 {
1765         u32 otp, phy;
1766
1767         if (!tp->phy_otp)
1768                 return;
1769
1770         otp = tp->phy_otp;
1771
1772         if (TG3_PHY_AUXCTL_SMDSP_ENABLE(tp))
1773                 return;
1774
1775         phy = ((otp & TG3_OTP_AGCTGT_MASK) >> TG3_OTP_AGCTGT_SHIFT);
1776         phy |= MII_TG3_DSP_TAP1_AGCTGT_DFLT;
1777         tg3_phydsp_write(tp, MII_TG3_DSP_TAP1, phy);
1778
1779         phy = ((otp & TG3_OTP_HPFFLTR_MASK) >> TG3_OTP_HPFFLTR_SHIFT) |
1780               ((otp & TG3_OTP_HPFOVER_MASK) >> TG3_OTP_HPFOVER_SHIFT);
1781         tg3_phydsp_write(tp, MII_TG3_DSP_AADJ1CH0, phy);
1782
1783         phy = ((otp & TG3_OTP_LPFDIS_MASK) >> TG3_OTP_LPFDIS_SHIFT);
1784         phy |= MII_TG3_DSP_AADJ1CH3_ADCCKADJ;
1785         tg3_phydsp_write(tp, MII_TG3_DSP_AADJ1CH3, phy);
1786
1787         phy = ((otp & TG3_OTP_VDAC_MASK) >> TG3_OTP_VDAC_SHIFT);
1788         tg3_phydsp_write(tp, MII_TG3_DSP_EXP75, phy);
1789
1790         phy = ((otp & TG3_OTP_10BTAMP_MASK) >> TG3_OTP_10BTAMP_SHIFT);
1791         tg3_phydsp_write(tp, MII_TG3_DSP_EXP96, phy);
1792
1793         phy = ((otp & TG3_OTP_ROFF_MASK) >> TG3_OTP_ROFF_SHIFT) |
1794               ((otp & TG3_OTP_RCOFF_MASK) >> TG3_OTP_RCOFF_SHIFT);
1795         tg3_phydsp_write(tp, MII_TG3_DSP_EXP97, phy);
1796
1797         TG3_PHY_AUXCTL_SMDSP_DISABLE(tp);
1798 }
1799
1800 static void tg3_phy_eee_adjust(struct tg3 *tp, u32 current_link_up)
1801 {
1802         u32 val;
1803
1804         if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP))
1805                 return;
1806
1807         tp->setlpicnt = 0;
1808
1809         if (tp->link_config.autoneg == AUTONEG_ENABLE &&
1810             current_link_up == 1 &&
1811             tp->link_config.active_duplex == DUPLEX_FULL &&
1812             (tp->link_config.active_speed == SPEED_100 ||
1813              tp->link_config.active_speed == SPEED_1000)) {
1814                 u32 eeectl;
1815
1816                 if (tp->link_config.active_speed == SPEED_1000)
1817                         eeectl = TG3_CPMU_EEE_CTRL_EXIT_16_5_US;
1818                 else
1819                         eeectl = TG3_CPMU_EEE_CTRL_EXIT_36_US;
1820
1821                 tw32(TG3_CPMU_EEE_CTRL, eeectl);
1822
1823                 tg3_phy_cl45_read(tp, MDIO_MMD_AN,
1824                                   TG3_CL45_D7_EEERES_STAT, &val);
1825
1826                 if (val == TG3_CL45_D7_EEERES_STAT_LP_1000T ||
1827                     val == TG3_CL45_D7_EEERES_STAT_LP_100TX)
1828                         tp->setlpicnt = 2;
1829         }
1830
1831         if (!tp->setlpicnt) {
1832                 val = tr32(TG3_CPMU_EEE_MODE);
1833                 tw32(TG3_CPMU_EEE_MODE, val & ~TG3_CPMU_EEEMD_LPI_ENABLE);
1834         }
1835 }
1836
1837 static void tg3_phy_eee_enable(struct tg3 *tp)
1838 {
1839         u32 val;
1840
1841         if (tp->link_config.active_speed == SPEED_1000 &&
1842             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
1843              GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
1844              GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765) &&
1845             !TG3_PHY_AUXCTL_SMDSP_ENABLE(tp)) {
1846                 tg3_phydsp_write(tp, MII_TG3_DSP_TAP26, 0x0003);
1847                 TG3_PHY_AUXCTL_SMDSP_DISABLE(tp);
1848         }
1849
1850         val = tr32(TG3_CPMU_EEE_MODE);
1851         tw32(TG3_CPMU_EEE_MODE, val | TG3_CPMU_EEEMD_LPI_ENABLE);
1852 }
1853
1854 static int tg3_wait_macro_done(struct tg3 *tp)
1855 {
1856         int limit = 100;
1857
1858         while (limit--) {
1859                 u32 tmp32;
1860
1861                 if (!tg3_readphy(tp, MII_TG3_DSP_CONTROL, &tmp32)) {
1862                         if ((tmp32 & 0x1000) == 0)
1863                                 break;
1864                 }
1865         }
1866         if (limit < 0)
1867                 return -EBUSY;
1868
1869         return 0;
1870 }
1871
1872 static int tg3_phy_write_and_check_testpat(struct tg3 *tp, int *resetp)
1873 {
1874         static const u32 test_pat[4][6] = {
1875         { 0x00005555, 0x00000005, 0x00002aaa, 0x0000000a, 0x00003456, 0x00000003 },
1876         { 0x00002aaa, 0x0000000a, 0x00003333, 0x00000003, 0x0000789a, 0x00000005 },
1877         { 0x00005a5a, 0x00000005, 0x00002a6a, 0x0000000a, 0x00001bcd, 0x00000003 },
1878         { 0x00002a5a, 0x0000000a, 0x000033c3, 0x00000003, 0x00002ef1, 0x00000005 }
1879         };
1880         int chan;
1881
1882         for (chan = 0; chan < 4; chan++) {
1883                 int i;
1884
1885                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
1886                              (chan * 0x2000) | 0x0200);
1887                 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0002);
1888
1889                 for (i = 0; i < 6; i++)
1890                         tg3_writephy(tp, MII_TG3_DSP_RW_PORT,
1891                                      test_pat[chan][i]);
1892
1893                 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0202);
1894                 if (tg3_wait_macro_done(tp)) {
1895                         *resetp = 1;
1896                         return -EBUSY;
1897                 }
1898
1899                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
1900                              (chan * 0x2000) | 0x0200);
1901                 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0082);
1902                 if (tg3_wait_macro_done(tp)) {
1903                         *resetp = 1;
1904                         return -EBUSY;
1905                 }
1906
1907                 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0802);
1908                 if (tg3_wait_macro_done(tp)) {
1909                         *resetp = 1;
1910                         return -EBUSY;
1911                 }
1912
1913                 for (i = 0; i < 6; i += 2) {
1914                         u32 low, high;
1915
1916                         if (tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &low) ||
1917                             tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &high) ||
1918                             tg3_wait_macro_done(tp)) {
1919                                 *resetp = 1;
1920                                 return -EBUSY;
1921                         }
1922                         low &= 0x7fff;
1923                         high &= 0x000f;
1924                         if (low != test_pat[chan][i] ||
1925                             high != test_pat[chan][i+1]) {
1926                                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000b);
1927                                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x4001);
1928                                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x4005);
1929
1930                                 return -EBUSY;
1931                         }
1932                 }
1933         }
1934
1935         return 0;
1936 }
1937
1938 static int tg3_phy_reset_chanpat(struct tg3 *tp)
1939 {
1940         int chan;
1941
1942         for (chan = 0; chan < 4; chan++) {
1943                 int i;
1944
1945                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
1946                              (chan * 0x2000) | 0x0200);
1947                 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0002);
1948                 for (i = 0; i < 6; i++)
1949                         tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x000);
1950                 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0202);
1951                 if (tg3_wait_macro_done(tp))
1952                         return -EBUSY;
1953         }
1954
1955         return 0;
1956 }
1957
1958 static int tg3_phy_reset_5703_4_5(struct tg3 *tp)
1959 {
1960         u32 reg32, phy9_orig;
1961         int retries, do_phy_reset, err;
1962
1963         retries = 10;
1964         do_phy_reset = 1;
1965         do {
1966                 if (do_phy_reset) {
1967                         err = tg3_bmcr_reset(tp);
1968                         if (err)
1969                                 return err;
1970                         do_phy_reset = 0;
1971                 }
1972
1973                 /* Disable transmitter and interrupt.  */
1974                 if (tg3_readphy(tp, MII_TG3_EXT_CTRL, &reg32))
1975                         continue;
1976
1977                 reg32 |= 0x3000;
1978                 tg3_writephy(tp, MII_TG3_EXT_CTRL, reg32);
1979
1980                 /* Set full-duplex, 1000 mbps.  */
1981                 tg3_writephy(tp, MII_BMCR,
1982                              BMCR_FULLDPLX | TG3_BMCR_SPEED1000);
1983
1984                 /* Set to master mode.  */
1985                 if (tg3_readphy(tp, MII_TG3_CTRL, &phy9_orig))
1986                         continue;
1987
1988                 tg3_writephy(tp, MII_TG3_CTRL,
1989                              (MII_TG3_CTRL_AS_MASTER |
1990                               MII_TG3_CTRL_ENABLE_AS_MASTER));
1991
1992                 err = TG3_PHY_AUXCTL_SMDSP_ENABLE(tp);
1993                 if (err)
1994                         return err;
1995
1996                 /* Block the PHY control access.  */
1997                 tg3_phydsp_write(tp, 0x8005, 0x0800);
1998
1999                 err = tg3_phy_write_and_check_testpat(tp, &do_phy_reset);
2000                 if (!err)
2001                         break;
2002         } while (--retries);
2003
2004         err = tg3_phy_reset_chanpat(tp);
2005         if (err)
2006                 return err;
2007
2008         tg3_phydsp_write(tp, 0x8005, 0x0000);
2009
2010         tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8200);
2011         tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0000);
2012
2013         TG3_PHY_AUXCTL_SMDSP_DISABLE(tp);
2014
2015         tg3_writephy(tp, MII_TG3_CTRL, phy9_orig);
2016
2017         if (!tg3_readphy(tp, MII_TG3_EXT_CTRL, &reg32)) {
2018                 reg32 &= ~0x3000;
2019                 tg3_writephy(tp, MII_TG3_EXT_CTRL, reg32);
2020         } else if (!err)
2021                 err = -EBUSY;
2022
2023         return err;
2024 }
2025
2026 /* This will reset the tigon3 PHY if there is no valid
2027  * link unless the FORCE argument is non-zero.
2028  */
2029 static int tg3_phy_reset(struct tg3 *tp)
2030 {
2031         u32 val, cpmuctrl;
2032         int err;
2033
2034         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
2035                 val = tr32(GRC_MISC_CFG);
2036                 tw32_f(GRC_MISC_CFG, val & ~GRC_MISC_CFG_EPHY_IDDQ);
2037                 udelay(40);
2038         }
2039         err  = tg3_readphy(tp, MII_BMSR, &val);
2040         err |= tg3_readphy(tp, MII_BMSR, &val);
2041         if (err != 0)
2042                 return -EBUSY;
2043
2044         if (netif_running(tp->dev) && netif_carrier_ok(tp->dev)) {
2045                 netif_carrier_off(tp->dev);
2046                 tg3_link_report(tp);
2047         }
2048
2049         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
2050             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
2051             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
2052                 err = tg3_phy_reset_5703_4_5(tp);
2053                 if (err)
2054                         return err;
2055                 goto out;
2056         }
2057
2058         cpmuctrl = 0;
2059         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 &&
2060             GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5784_AX) {
2061                 cpmuctrl = tr32(TG3_CPMU_CTRL);
2062                 if (cpmuctrl & CPMU_CTRL_GPHY_10MB_RXONLY)
2063                         tw32(TG3_CPMU_CTRL,
2064                              cpmuctrl & ~CPMU_CTRL_GPHY_10MB_RXONLY);
2065         }
2066
2067         err = tg3_bmcr_reset(tp);
2068         if (err)
2069                 return err;
2070
2071         if (cpmuctrl & CPMU_CTRL_GPHY_10MB_RXONLY) {
2072                 val = MII_TG3_DSP_EXP8_AEDW | MII_TG3_DSP_EXP8_REJ2MHz;
2073                 tg3_phydsp_write(tp, MII_TG3_DSP_EXP8, val);
2074
2075                 tw32(TG3_CPMU_CTRL, cpmuctrl);
2076         }
2077
2078         if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5784_AX ||
2079             GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5761_AX) {
2080                 val = tr32(TG3_CPMU_LSPD_1000MB_CLK);
2081                 if ((val & CPMU_LSPD_1000MB_MACCLK_MASK) ==
2082                     CPMU_LSPD_1000MB_MACCLK_12_5) {
2083                         val &= ~CPMU_LSPD_1000MB_MACCLK_MASK;
2084                         udelay(40);
2085                         tw32_f(TG3_CPMU_LSPD_1000MB_CLK, val);
2086                 }
2087         }
2088
2089         if (tg3_flag(tp, 5717_PLUS) &&
2090             (tp->phy_flags & TG3_PHYFLG_MII_SERDES))
2091                 return 0;
2092
2093         tg3_phy_apply_otp(tp);
2094
2095         if (tp->phy_flags & TG3_PHYFLG_ENABLE_APD)
2096                 tg3_phy_toggle_apd(tp, true);
2097         else
2098                 tg3_phy_toggle_apd(tp, false);
2099
2100 out:
2101         if ((tp->phy_flags & TG3_PHYFLG_ADC_BUG) &&
2102             !TG3_PHY_AUXCTL_SMDSP_ENABLE(tp)) {
2103                 tg3_phydsp_write(tp, 0x201f, 0x2aaa);
2104                 tg3_phydsp_write(tp, 0x000a, 0x0323);
2105                 TG3_PHY_AUXCTL_SMDSP_DISABLE(tp);
2106         }
2107
2108         if (tp->phy_flags & TG3_PHYFLG_5704_A0_BUG) {
2109                 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8d68);
2110                 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8d68);
2111         }
2112
2113         if (tp->phy_flags & TG3_PHYFLG_BER_BUG) {
2114                 if (!TG3_PHY_AUXCTL_SMDSP_ENABLE(tp)) {
2115                         tg3_phydsp_write(tp, 0x000a, 0x310b);
2116                         tg3_phydsp_write(tp, 0x201f, 0x9506);
2117                         tg3_phydsp_write(tp, 0x401f, 0x14e2);
2118                         TG3_PHY_AUXCTL_SMDSP_DISABLE(tp);
2119                 }
2120         } else if (tp->phy_flags & TG3_PHYFLG_JITTER_BUG) {
2121                 if (!TG3_PHY_AUXCTL_SMDSP_ENABLE(tp)) {
2122                         tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000a);
2123                         if (tp->phy_flags & TG3_PHYFLG_ADJUST_TRIM) {
2124                                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x110b);
2125                                 tg3_writephy(tp, MII_TG3_TEST1,
2126                                              MII_TG3_TEST1_TRIM_EN | 0x4);
2127                         } else
2128                                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x010b);
2129
2130                         TG3_PHY_AUXCTL_SMDSP_DISABLE(tp);
2131                 }
2132         }
2133
2134         /* Set Extended packet length bit (bit 14) on all chips that */
2135         /* support jumbo frames */
2136         if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) {
2137                 /* Cannot do read-modify-write on 5401 */
2138                 tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_AUXCTL, 0x4c20);
2139         } else if (tg3_flag(tp, JUMBO_CAPABLE)) {
2140                 /* Set bit 14 with read-modify-write to preserve other bits */
2141                 err = tg3_phy_auxctl_read(tp,
2142                                           MII_TG3_AUXCTL_SHDWSEL_AUXCTL, &val);
2143                 if (!err)
2144                         tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_AUXCTL,
2145                                            val | MII_TG3_AUXCTL_ACTL_EXTPKTLEN);
2146         }
2147
2148         /* Set phy register 0x10 bit 0 to high fifo elasticity to support
2149          * jumbo frames transmission.
2150          */
2151         if (tg3_flag(tp, JUMBO_CAPABLE)) {
2152                 if (!tg3_readphy(tp, MII_TG3_EXT_CTRL, &val))
2153                         tg3_writephy(tp, MII_TG3_EXT_CTRL,
2154                                      val | MII_TG3_EXT_CTRL_FIFO_ELASTIC);
2155         }
2156
2157         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
2158                 /* adjust output voltage */
2159                 tg3_writephy(tp, MII_TG3_FET_PTEST, 0x12);
2160         }
2161
2162         tg3_phy_toggle_automdix(tp, 1);
2163         tg3_phy_set_wirespeed(tp);
2164         return 0;
2165 }
2166
2167 static void tg3_frob_aux_power(struct tg3 *tp)
2168 {
2169         bool need_vaux = false;
2170
2171         /* The GPIOs do something completely different on 57765. */
2172         if (!tg3_flag(tp, IS_NIC) ||
2173             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
2174             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765)
2175                 return;
2176
2177         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
2178              GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714 ||
2179              GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
2180              GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) &&
2181             tp->pdev_peer != tp->pdev) {
2182                 struct net_device *dev_peer;
2183
2184                 dev_peer = pci_get_drvdata(tp->pdev_peer);
2185
2186                 /* remove_one() may have been run on the peer. */
2187                 if (dev_peer) {
2188                         struct tg3 *tp_peer = netdev_priv(dev_peer);
2189
2190                         if (tg3_flag(tp_peer, INIT_COMPLETE))
2191                                 return;
2192
2193                         if (tg3_flag(tp_peer, WOL_ENABLE) ||
2194                             tg3_flag(tp_peer, ENABLE_ASF))
2195                                 need_vaux = true;
2196                 }
2197         }
2198
2199         if (tg3_flag(tp, WOL_ENABLE) || tg3_flag(tp, ENABLE_ASF))
2200                 need_vaux = true;
2201
2202         if (need_vaux) {
2203                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
2204                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
2205                         tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
2206                                     (GRC_LCLCTRL_GPIO_OE0 |
2207                                      GRC_LCLCTRL_GPIO_OE1 |
2208                                      GRC_LCLCTRL_GPIO_OE2 |
2209                                      GRC_LCLCTRL_GPIO_OUTPUT0 |
2210                                      GRC_LCLCTRL_GPIO_OUTPUT1),
2211                                     100);
2212                 } else if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761 ||
2213                            tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761S) {
2214                         /* The 5761 non-e device swaps GPIO 0 and GPIO 2. */
2215                         u32 grc_local_ctrl = GRC_LCLCTRL_GPIO_OE0 |
2216                                              GRC_LCLCTRL_GPIO_OE1 |
2217                                              GRC_LCLCTRL_GPIO_OE2 |
2218                                              GRC_LCLCTRL_GPIO_OUTPUT0 |
2219                                              GRC_LCLCTRL_GPIO_OUTPUT1 |
2220                                              tp->grc_local_ctrl;
2221                         tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl, 100);
2222
2223                         grc_local_ctrl |= GRC_LCLCTRL_GPIO_OUTPUT2;
2224                         tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl, 100);
2225
2226                         grc_local_ctrl &= ~GRC_LCLCTRL_GPIO_OUTPUT0;
2227                         tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl, 100);
2228                 } else {
2229                         u32 no_gpio2;
2230                         u32 grc_local_ctrl = 0;
2231
2232                         /* Workaround to prevent overdrawing Amps. */
2233                         if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
2234                             ASIC_REV_5714) {
2235                                 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE3;
2236                                 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
2237                                             grc_local_ctrl, 100);
2238                         }
2239
2240                         /* On 5753 and variants, GPIO2 cannot be used. */
2241                         no_gpio2 = tp->nic_sram_data_cfg &
2242                                     NIC_SRAM_DATA_CFG_NO_GPIO2;
2243
2244                         grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE0 |
2245                                          GRC_LCLCTRL_GPIO_OE1 |
2246                                          GRC_LCLCTRL_GPIO_OE2 |
2247                                          GRC_LCLCTRL_GPIO_OUTPUT1 |
2248                                          GRC_LCLCTRL_GPIO_OUTPUT2;
2249                         if (no_gpio2) {
2250                                 grc_local_ctrl &= ~(GRC_LCLCTRL_GPIO_OE2 |
2251                                                     GRC_LCLCTRL_GPIO_OUTPUT2);
2252                         }
2253                         tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
2254                                                     grc_local_ctrl, 100);
2255
2256                         grc_local_ctrl |= GRC_LCLCTRL_GPIO_OUTPUT0;
2257
2258                         tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
2259                                                     grc_local_ctrl, 100);
2260
2261                         if (!no_gpio2) {
2262                                 grc_local_ctrl &= ~GRC_LCLCTRL_GPIO_OUTPUT2;
2263                                 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
2264                                             grc_local_ctrl, 100);
2265                         }
2266                 }
2267         } else {
2268                 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
2269                     GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701) {
2270                         tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
2271                                     (GRC_LCLCTRL_GPIO_OE1 |
2272                                      GRC_LCLCTRL_GPIO_OUTPUT1), 100);
2273
2274                         tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
2275                                     GRC_LCLCTRL_GPIO_OE1, 100);
2276
2277                         tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
2278                                     (GRC_LCLCTRL_GPIO_OE1 |
2279                                      GRC_LCLCTRL_GPIO_OUTPUT1), 100);
2280                 }
2281         }
2282 }
2283
2284 static int tg3_5700_link_polarity(struct tg3 *tp, u32 speed)
2285 {
2286         if (tp->led_ctrl == LED_CTRL_MODE_PHY_2)
2287                 return 1;
2288         else if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5411) {
2289                 if (speed != SPEED_10)
2290                         return 1;
2291         } else if (speed == SPEED_10)
2292                 return 1;
2293
2294         return 0;
2295 }
2296
2297 static int tg3_setup_phy(struct tg3 *, int);
2298
2299 #define RESET_KIND_SHUTDOWN     0
2300 #define RESET_KIND_INIT         1
2301 #define RESET_KIND_SUSPEND      2
2302
2303 static void tg3_write_sig_post_reset(struct tg3 *, int);
2304 static int tg3_halt_cpu(struct tg3 *, u32);
2305
2306 static void tg3_power_down_phy(struct tg3 *tp, bool do_low_power)
2307 {
2308         u32 val;
2309
2310         if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
2311                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
2312                         u32 sg_dig_ctrl = tr32(SG_DIG_CTRL);
2313                         u32 serdes_cfg = tr32(MAC_SERDES_CFG);
2314
2315                         sg_dig_ctrl |=
2316                                 SG_DIG_USING_HW_AUTONEG | SG_DIG_SOFT_RESET;
2317                         tw32(SG_DIG_CTRL, sg_dig_ctrl);
2318                         tw32(MAC_SERDES_CFG, serdes_cfg | (1 << 15));
2319                 }
2320                 return;
2321         }
2322
2323         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
2324                 tg3_bmcr_reset(tp);
2325                 val = tr32(GRC_MISC_CFG);
2326                 tw32_f(GRC_MISC_CFG, val | GRC_MISC_CFG_EPHY_IDDQ);
2327                 udelay(40);
2328                 return;
2329         } else if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
2330                 u32 phytest;
2331                 if (!tg3_readphy(tp, MII_TG3_FET_TEST, &phytest)) {
2332                         u32 phy;
2333
2334                         tg3_writephy(tp, MII_ADVERTISE, 0);
2335                         tg3_writephy(tp, MII_BMCR,
2336                                      BMCR_ANENABLE | BMCR_ANRESTART);
2337
2338                         tg3_writephy(tp, MII_TG3_FET_TEST,
2339                                      phytest | MII_TG3_FET_SHADOW_EN);
2340                         if (!tg3_readphy(tp, MII_TG3_FET_SHDW_AUXMODE4, &phy)) {
2341                                 phy |= MII_TG3_FET_SHDW_AUXMODE4_SBPD;
2342                                 tg3_writephy(tp,
2343                                              MII_TG3_FET_SHDW_AUXMODE4,
2344                                              phy);
2345                         }
2346                         tg3_writephy(tp, MII_TG3_FET_TEST, phytest);
2347                 }
2348                 return;
2349         } else if (do_low_power) {
2350                 tg3_writephy(tp, MII_TG3_EXT_CTRL,
2351                              MII_TG3_EXT_CTRL_FORCE_LED_OFF);
2352
2353                 val = MII_TG3_AUXCTL_PCTL_100TX_LPWR |
2354                       MII_TG3_AUXCTL_PCTL_SPR_ISOLATE |
2355                       MII_TG3_AUXCTL_PCTL_VREG_11V;
2356                 tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_PWRCTL, val);
2357         }
2358
2359         /* The PHY should not be powered down on some chips because
2360          * of bugs.
2361          */
2362         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
2363             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
2364             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780 &&
2365              (tp->phy_flags & TG3_PHYFLG_MII_SERDES)))
2366                 return;
2367
2368         if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5784_AX ||
2369             GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5761_AX) {
2370                 val = tr32(TG3_CPMU_LSPD_1000MB_CLK);
2371                 val &= ~CPMU_LSPD_1000MB_MACCLK_MASK;
2372                 val |= CPMU_LSPD_1000MB_MACCLK_12_5;
2373                 tw32_f(TG3_CPMU_LSPD_1000MB_CLK, val);
2374         }
2375
2376         tg3_writephy(tp, MII_BMCR, BMCR_PDOWN);
2377 }
2378
2379 /* tp->lock is held. */
2380 static int tg3_nvram_lock(struct tg3 *tp)
2381 {
2382         if (tg3_flag(tp, NVRAM)) {
2383                 int i;
2384
2385                 if (tp->nvram_lock_cnt == 0) {
2386                         tw32(NVRAM_SWARB, SWARB_REQ_SET1);
2387                         for (i = 0; i < 8000; i++) {
2388                                 if (tr32(NVRAM_SWARB) & SWARB_GNT1)
2389                                         break;
2390                                 udelay(20);
2391                         }
2392                         if (i == 8000) {
2393                                 tw32(NVRAM_SWARB, SWARB_REQ_CLR1);
2394                                 return -ENODEV;
2395                         }
2396                 }
2397                 tp->nvram_lock_cnt++;
2398         }
2399         return 0;
2400 }
2401
2402 /* tp->lock is held. */
2403 static void tg3_nvram_unlock(struct tg3 *tp)
2404 {
2405         if (tg3_flag(tp, NVRAM)) {
2406                 if (tp->nvram_lock_cnt > 0)
2407                         tp->nvram_lock_cnt--;
2408                 if (tp->nvram_lock_cnt == 0)
2409                         tw32_f(NVRAM_SWARB, SWARB_REQ_CLR1);
2410         }
2411 }
2412
2413 /* tp->lock is held. */
2414 static void tg3_enable_nvram_access(struct tg3 *tp)
2415 {
2416         if (tg3_flag(tp, 5750_PLUS) && !tg3_flag(tp, PROTECTED_NVRAM)) {
2417                 u32 nvaccess = tr32(NVRAM_ACCESS);
2418
2419                 tw32(NVRAM_ACCESS, nvaccess | ACCESS_ENABLE);
2420         }
2421 }
2422
2423 /* tp->lock is held. */
2424 static void tg3_disable_nvram_access(struct tg3 *tp)
2425 {
2426         if (tg3_flag(tp, 5750_PLUS) && !tg3_flag(tp, PROTECTED_NVRAM)) {
2427                 u32 nvaccess = tr32(NVRAM_ACCESS);
2428
2429                 tw32(NVRAM_ACCESS, nvaccess & ~ACCESS_ENABLE);
2430         }
2431 }
2432
2433 static int tg3_nvram_read_using_eeprom(struct tg3 *tp,
2434                                         u32 offset, u32 *val)
2435 {
2436         u32 tmp;
2437         int i;
2438
2439         if (offset > EEPROM_ADDR_ADDR_MASK || (offset % 4) != 0)
2440                 return -EINVAL;
2441
2442         tmp = tr32(GRC_EEPROM_ADDR) & ~(EEPROM_ADDR_ADDR_MASK |
2443                                         EEPROM_ADDR_DEVID_MASK |
2444                                         EEPROM_ADDR_READ);
2445         tw32(GRC_EEPROM_ADDR,
2446              tmp |
2447              (0 << EEPROM_ADDR_DEVID_SHIFT) |
2448              ((offset << EEPROM_ADDR_ADDR_SHIFT) &
2449               EEPROM_ADDR_ADDR_MASK) |
2450              EEPROM_ADDR_READ | EEPROM_ADDR_START);
2451
2452         for (i = 0; i < 1000; i++) {
2453                 tmp = tr32(GRC_EEPROM_ADDR);
2454
2455                 if (tmp & EEPROM_ADDR_COMPLETE)
2456                         break;
2457                 msleep(1);
2458         }
2459         if (!(tmp & EEPROM_ADDR_COMPLETE))
2460                 return -EBUSY;
2461
2462         tmp = tr32(GRC_EEPROM_DATA);
2463
2464         /*
2465          * The data will always be opposite the native endian
2466          * format.  Perform a blind byteswap to compensate.
2467          */
2468         *val = swab32(tmp);
2469
2470         return 0;
2471 }
2472
2473 #define NVRAM_CMD_TIMEOUT 10000
2474
2475 static int tg3_nvram_exec_cmd(struct tg3 *tp, u32 nvram_cmd)
2476 {
2477         int i;
2478
2479         tw32(NVRAM_CMD, nvram_cmd);
2480         for (i = 0; i < NVRAM_CMD_TIMEOUT; i++) {
2481                 udelay(10);
2482                 if (tr32(NVRAM_CMD) & NVRAM_CMD_DONE) {
2483                         udelay(10);
2484                         break;
2485                 }
2486         }
2487
2488         if (i == NVRAM_CMD_TIMEOUT)
2489                 return -EBUSY;
2490
2491         return 0;
2492 }
2493
2494 static u32 tg3_nvram_phys_addr(struct tg3 *tp, u32 addr)
2495 {
2496         if (tg3_flag(tp, NVRAM) &&
2497             tg3_flag(tp, NVRAM_BUFFERED) &&
2498             tg3_flag(tp, FLASH) &&
2499             !tg3_flag(tp, NO_NVRAM_ADDR_TRANS) &&
2500             (tp->nvram_jedecnum == JEDEC_ATMEL))
2501
2502                 addr = ((addr / tp->nvram_pagesize) <<
2503                         ATMEL_AT45DB0X1B_PAGE_POS) +
2504                        (addr % tp->nvram_pagesize);
2505
2506         return addr;
2507 }
2508
2509 static u32 tg3_nvram_logical_addr(struct tg3 *tp, u32 addr)
2510 {
2511         if (tg3_flag(tp, NVRAM) &&
2512             tg3_flag(tp, NVRAM_BUFFERED) &&
2513             tg3_flag(tp, FLASH) &&
2514             !tg3_flag(tp, NO_NVRAM_ADDR_TRANS) &&
2515             (tp->nvram_jedecnum == JEDEC_ATMEL))
2516
2517                 addr = ((addr >> ATMEL_AT45DB0X1B_PAGE_POS) *
2518                         tp->nvram_pagesize) +
2519                        (addr & ((1 << ATMEL_AT45DB0X1B_PAGE_POS) - 1));
2520
2521         return addr;
2522 }
2523
2524 /* NOTE: Data read in from NVRAM is byteswapped according to
2525  * the byteswapping settings for all other register accesses.
2526  * tg3 devices are BE devices, so on a BE machine, the data
2527  * returned will be exactly as it is seen in NVRAM.  On a LE
2528  * machine, the 32-bit value will be byteswapped.
2529  */
2530 static int tg3_nvram_read(struct tg3 *tp, u32 offset, u32 *val)
2531 {
2532         int ret;
2533
2534         if (!tg3_flag(tp, NVRAM))
2535                 return tg3_nvram_read_using_eeprom(tp, offset, val);
2536
2537         offset = tg3_nvram_phys_addr(tp, offset);
2538
2539         if (offset > NVRAM_ADDR_MSK)
2540                 return -EINVAL;
2541
2542         ret = tg3_nvram_lock(tp);
2543         if (ret)
2544                 return ret;
2545
2546         tg3_enable_nvram_access(tp);
2547
2548         tw32(NVRAM_ADDR, offset);
2549         ret = tg3_nvram_exec_cmd(tp, NVRAM_CMD_RD | NVRAM_CMD_GO |
2550                 NVRAM_CMD_FIRST | NVRAM_CMD_LAST | NVRAM_CMD_DONE);
2551
2552         if (ret == 0)
2553                 *val = tr32(NVRAM_RDDATA);
2554
2555         tg3_disable_nvram_access(tp);
2556
2557         tg3_nvram_unlock(tp);
2558
2559         return ret;
2560 }
2561
2562 /* Ensures NVRAM data is in bytestream format. */
2563 static int tg3_nvram_read_be32(struct tg3 *tp, u32 offset, __be32 *val)
2564 {
2565         u32 v;
2566         int res = tg3_nvram_read(tp, offset, &v);
2567         if (!res)
2568                 *val = cpu_to_be32(v);
2569         return res;
2570 }
2571
2572 /* tp->lock is held. */
2573 static void __tg3_set_mac_addr(struct tg3 *tp, int skip_mac_1)
2574 {
2575         u32 addr_high, addr_low;
2576         int i;
2577
2578         addr_high = ((tp->dev->dev_addr[0] << 8) |
2579                      tp->dev->dev_addr[1]);
2580         addr_low = ((tp->dev->dev_addr[2] << 24) |
2581                     (tp->dev->dev_addr[3] << 16) |
2582                     (tp->dev->dev_addr[4] <<  8) |
2583                     (tp->dev->dev_addr[5] <<  0));
2584         for (i = 0; i < 4; i++) {
2585                 if (i == 1 && skip_mac_1)
2586                         continue;
2587                 tw32(MAC_ADDR_0_HIGH + (i * 8), addr_high);
2588                 tw32(MAC_ADDR_0_LOW + (i * 8), addr_low);
2589         }
2590
2591         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
2592             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
2593                 for (i = 0; i < 12; i++) {
2594                         tw32(MAC_EXTADDR_0_HIGH + (i * 8), addr_high);
2595                         tw32(MAC_EXTADDR_0_LOW + (i * 8), addr_low);
2596                 }
2597         }
2598
2599         addr_high = (tp->dev->dev_addr[0] +
2600                      tp->dev->dev_addr[1] +
2601                      tp->dev->dev_addr[2] +
2602                      tp->dev->dev_addr[3] +
2603                      tp->dev->dev_addr[4] +
2604                      tp->dev->dev_addr[5]) &
2605                 TX_BACKOFF_SEED_MASK;
2606         tw32(MAC_TX_BACKOFF_SEED, addr_high);
2607 }
2608
2609 static void tg3_enable_register_access(struct tg3 *tp)
2610 {
2611         /*
2612          * Make sure register accesses (indirect or otherwise) will function
2613          * correctly.
2614          */
2615         pci_write_config_dword(tp->pdev,
2616                                TG3PCI_MISC_HOST_CTRL, tp->misc_host_ctrl);
2617 }
2618
2619 static int tg3_power_up(struct tg3 *tp)
2620 {
2621         tg3_enable_register_access(tp);
2622
2623         pci_set_power_state(tp->pdev, PCI_D0);
2624
2625         /* Switch out of Vaux if it is a NIC */
2626         if (tg3_flag(tp, IS_NIC))
2627                 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl, 100);
2628
2629         return 0;
2630 }
2631
2632 static int tg3_power_down_prepare(struct tg3 *tp)
2633 {
2634         u32 misc_host_ctrl;
2635         bool device_should_wake, do_low_power;
2636
2637         tg3_enable_register_access(tp);
2638
2639         /* Restore the CLKREQ setting. */
2640         if (tg3_flag(tp, CLKREQ_BUG)) {
2641                 u16 lnkctl;
2642
2643                 pci_read_config_word(tp->pdev,
2644                                      tp->pcie_cap + PCI_EXP_LNKCTL,
2645                                      &lnkctl);
2646                 lnkctl |= PCI_EXP_LNKCTL_CLKREQ_EN;
2647                 pci_write_config_word(tp->pdev,
2648                                       tp->pcie_cap + PCI_EXP_LNKCTL,
2649                                       lnkctl);
2650         }
2651
2652         misc_host_ctrl = tr32(TG3PCI_MISC_HOST_CTRL);
2653         tw32(TG3PCI_MISC_HOST_CTRL,
2654              misc_host_ctrl | MISC_HOST_CTRL_MASK_PCI_INT);
2655
2656         device_should_wake = device_may_wakeup(&tp->pdev->dev) &&
2657                              tg3_flag(tp, WOL_ENABLE);
2658
2659         if (tg3_flag(tp, USE_PHYLIB)) {
2660                 do_low_power = false;
2661                 if ((tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) &&
2662                     !(tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)) {
2663                         struct phy_device *phydev;
2664                         u32 phyid, advertising;
2665
2666                         phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
2667
2668                         tp->phy_flags |= TG3_PHYFLG_IS_LOW_POWER;
2669
2670                         tp->link_config.orig_speed = phydev->speed;
2671                         tp->link_config.orig_duplex = phydev->duplex;
2672                         tp->link_config.orig_autoneg = phydev->autoneg;
2673                         tp->link_config.orig_advertising = phydev->advertising;
2674
2675                         advertising = ADVERTISED_TP |
2676                                       ADVERTISED_Pause |
2677                                       ADVERTISED_Autoneg |
2678                                       ADVERTISED_10baseT_Half;
2679
2680                         if (tg3_flag(tp, ENABLE_ASF) || device_should_wake) {
2681                                 if (tg3_flag(tp, WOL_SPEED_100MB))
2682                                         advertising |=
2683                                                 ADVERTISED_100baseT_Half |
2684                                                 ADVERTISED_100baseT_Full |
2685                                                 ADVERTISED_10baseT_Full;
2686                                 else
2687                                         advertising |= ADVERTISED_10baseT_Full;
2688                         }
2689
2690                         phydev->advertising = advertising;
2691
2692                         phy_start_aneg(phydev);
2693
2694                         phyid = phydev->drv->phy_id & phydev->drv->phy_id_mask;
2695                         if (phyid != PHY_ID_BCMAC131) {
2696                                 phyid &= PHY_BCM_OUI_MASK;
2697                                 if (phyid == PHY_BCM_OUI_1 ||
2698                                     phyid == PHY_BCM_OUI_2 ||
2699                                     phyid == PHY_BCM_OUI_3)
2700                                         do_low_power = true;
2701                         }
2702                 }
2703         } else {
2704                 do_low_power = true;
2705
2706                 if (!(tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)) {
2707                         tp->phy_flags |= TG3_PHYFLG_IS_LOW_POWER;
2708                         tp->link_config.orig_speed = tp->link_config.speed;
2709                         tp->link_config.orig_duplex = tp->link_config.duplex;
2710                         tp->link_config.orig_autoneg = tp->link_config.autoneg;
2711                 }
2712
2713                 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES)) {
2714                         tp->link_config.speed = SPEED_10;
2715                         tp->link_config.duplex = DUPLEX_HALF;
2716                         tp->link_config.autoneg = AUTONEG_ENABLE;
2717                         tg3_setup_phy(tp, 0);
2718                 }
2719         }
2720
2721         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
2722                 u32 val;
2723
2724                 val = tr32(GRC_VCPU_EXT_CTRL);
2725                 tw32(GRC_VCPU_EXT_CTRL, val | GRC_VCPU_EXT_CTRL_DISABLE_WOL);
2726         } else if (!tg3_flag(tp, ENABLE_ASF)) {
2727                 int i;
2728                 u32 val;
2729
2730                 for (i = 0; i < 200; i++) {
2731                         tg3_read_mem(tp, NIC_SRAM_FW_ASF_STATUS_MBOX, &val);
2732                         if (val == ~NIC_SRAM_FIRMWARE_MBOX_MAGIC1)
2733                                 break;
2734                         msleep(1);
2735                 }
2736         }
2737         if (tg3_flag(tp, WOL_CAP))
2738                 tg3_write_mem(tp, NIC_SRAM_WOL_MBOX, WOL_SIGNATURE |
2739                                                      WOL_DRV_STATE_SHUTDOWN |
2740                                                      WOL_DRV_WOL |
2741                                                      WOL_SET_MAGIC_PKT);
2742
2743         if (device_should_wake) {
2744                 u32 mac_mode;
2745
2746                 if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES)) {
2747                         if (do_low_power &&
2748                             !(tp->phy_flags & TG3_PHYFLG_IS_FET)) {
2749                                 tg3_phy_auxctl_write(tp,
2750                                                MII_TG3_AUXCTL_SHDWSEL_PWRCTL,
2751                                                MII_TG3_AUXCTL_PCTL_WOL_EN |
2752                                                MII_TG3_AUXCTL_PCTL_100TX_LPWR |
2753                                                MII_TG3_AUXCTL_PCTL_CL_AB_TXDAC);
2754                                 udelay(40);
2755                         }
2756
2757                         if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
2758                                 mac_mode = MAC_MODE_PORT_MODE_GMII;
2759                         else
2760                                 mac_mode = MAC_MODE_PORT_MODE_MII;
2761
2762                         mac_mode |= tp->mac_mode & MAC_MODE_LINK_POLARITY;
2763                         if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
2764                             ASIC_REV_5700) {
2765                                 u32 speed = tg3_flag(tp, WOL_SPEED_100MB) ?
2766                                              SPEED_100 : SPEED_10;
2767                                 if (tg3_5700_link_polarity(tp, speed))
2768                                         mac_mode |= MAC_MODE_LINK_POLARITY;
2769                                 else
2770                                         mac_mode &= ~MAC_MODE_LINK_POLARITY;
2771                         }
2772                 } else {
2773                         mac_mode = MAC_MODE_PORT_MODE_TBI;
2774                 }
2775
2776                 if (!tg3_flag(tp, 5750_PLUS))
2777                         tw32(MAC_LED_CTRL, tp->led_ctrl);
2778
2779                 mac_mode |= MAC_MODE_MAGIC_PKT_ENABLE;
2780                 if ((tg3_flag(tp, 5705_PLUS) && !tg3_flag(tp, 5780_CLASS)) &&
2781                     (tg3_flag(tp, ENABLE_ASF) || tg3_flag(tp, ENABLE_APE)))
2782                         mac_mode |= MAC_MODE_KEEP_FRAME_IN_WOL;
2783
2784                 if (tg3_flag(tp, ENABLE_APE))
2785                         mac_mode |= MAC_MODE_APE_TX_EN |
2786                                     MAC_MODE_APE_RX_EN |
2787                                     MAC_MODE_TDE_ENABLE;
2788
2789                 tw32_f(MAC_MODE, mac_mode);
2790                 udelay(100);
2791
2792                 tw32_f(MAC_RX_MODE, RX_MODE_ENABLE);
2793                 udelay(10);
2794         }
2795
2796         if (!tg3_flag(tp, WOL_SPEED_100MB) &&
2797             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
2798              GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)) {
2799                 u32 base_val;
2800
2801                 base_val = tp->pci_clock_ctrl;
2802                 base_val |= (CLOCK_CTRL_RXCLK_DISABLE |
2803                              CLOCK_CTRL_TXCLK_DISABLE);
2804
2805                 tw32_wait_f(TG3PCI_CLOCK_CTRL, base_val | CLOCK_CTRL_ALTCLK |
2806                             CLOCK_CTRL_PWRDOWN_PLL133, 40);
2807         } else if (tg3_flag(tp, 5780_CLASS) ||
2808                    tg3_flag(tp, CPMU_PRESENT) ||
2809                    (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)) {
2810                 /* do nothing */
2811         } else if (!(tg3_flag(tp, 5750_PLUS) && tg3_flag(tp, ENABLE_ASF))) {
2812                 u32 newbits1, newbits2;
2813
2814                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
2815                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
2816                         newbits1 = (CLOCK_CTRL_RXCLK_DISABLE |
2817                                     CLOCK_CTRL_TXCLK_DISABLE |
2818                                     CLOCK_CTRL_ALTCLK);
2819                         newbits2 = newbits1 | CLOCK_CTRL_44MHZ_CORE;
2820                 } else if (tg3_flag(tp, 5705_PLUS)) {
2821                         newbits1 = CLOCK_CTRL_625_CORE;
2822                         newbits2 = newbits1 | CLOCK_CTRL_ALTCLK;
2823                 } else {
2824                         newbits1 = CLOCK_CTRL_ALTCLK;
2825                         newbits2 = newbits1 | CLOCK_CTRL_44MHZ_CORE;
2826                 }
2827
2828                 tw32_wait_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl | newbits1,
2829                             40);
2830
2831                 tw32_wait_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl | newbits2,
2832                             40);
2833
2834                 if (!tg3_flag(tp, 5705_PLUS)) {
2835                         u32 newbits3;
2836
2837                         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
2838                             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
2839                                 newbits3 = (CLOCK_CTRL_RXCLK_DISABLE |
2840                                             CLOCK_CTRL_TXCLK_DISABLE |
2841                                             CLOCK_CTRL_44MHZ_CORE);
2842                         } else {
2843                                 newbits3 = CLOCK_CTRL_44MHZ_CORE;
2844                         }
2845
2846                         tw32_wait_f(TG3PCI_CLOCK_CTRL,
2847                                     tp->pci_clock_ctrl | newbits3, 40);
2848                 }
2849         }
2850
2851         if (!(device_should_wake) && !tg3_flag(tp, ENABLE_ASF))
2852                 tg3_power_down_phy(tp, do_low_power);
2853
2854         tg3_frob_aux_power(tp);
2855
2856         /* Workaround for unstable PLL clock */
2857         if ((GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_AX) ||
2858             (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_BX)) {
2859                 u32 val = tr32(0x7d00);
2860
2861                 val &= ~((1 << 16) | (1 << 4) | (1 << 2) | (1 << 1) | 1);
2862                 tw32(0x7d00, val);
2863                 if (!tg3_flag(tp, ENABLE_ASF)) {
2864                         int err;
2865
2866                         err = tg3_nvram_lock(tp);
2867                         tg3_halt_cpu(tp, RX_CPU_BASE);
2868                         if (!err)
2869                                 tg3_nvram_unlock(tp);
2870                 }
2871         }
2872
2873         tg3_write_sig_post_reset(tp, RESET_KIND_SHUTDOWN);
2874
2875         return 0;
2876 }
2877
2878 static void tg3_power_down(struct tg3 *tp)
2879 {
2880         tg3_power_down_prepare(tp);
2881
2882         pci_wake_from_d3(tp->pdev, tg3_flag(tp, WOL_ENABLE));
2883         pci_set_power_state(tp->pdev, PCI_D3hot);
2884 }
2885
2886 static void tg3_aux_stat_to_speed_duplex(struct tg3 *tp, u32 val, u16 *speed, u8 *duplex)
2887 {
2888         switch (val & MII_TG3_AUX_STAT_SPDMASK) {
2889         case MII_TG3_AUX_STAT_10HALF:
2890                 *speed = SPEED_10;
2891                 *duplex = DUPLEX_HALF;
2892                 break;
2893
2894         case MII_TG3_AUX_STAT_10FULL:
2895                 *speed = SPEED_10;
2896                 *duplex = DUPLEX_FULL;
2897                 break;
2898
2899         case MII_TG3_AUX_STAT_100HALF:
2900                 *speed = SPEED_100;
2901                 *duplex = DUPLEX_HALF;
2902                 break;
2903
2904         case MII_TG3_AUX_STAT_100FULL:
2905                 *speed = SPEED_100;
2906                 *duplex = DUPLEX_FULL;
2907                 break;
2908
2909         case MII_TG3_AUX_STAT_1000HALF:
2910                 *speed = SPEED_1000;
2911                 *duplex = DUPLEX_HALF;
2912                 break;
2913
2914         case MII_TG3_AUX_STAT_1000FULL:
2915                 *speed = SPEED_1000;
2916                 *duplex = DUPLEX_FULL;
2917                 break;
2918
2919         default:
2920                 if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
2921                         *speed = (val & MII_TG3_AUX_STAT_100) ? SPEED_100 :
2922                                  SPEED_10;
2923                         *duplex = (val & MII_TG3_AUX_STAT_FULL) ? DUPLEX_FULL :
2924                                   DUPLEX_HALF;
2925                         break;
2926                 }
2927                 *speed = SPEED_INVALID;
2928                 *duplex = DUPLEX_INVALID;
2929                 break;
2930         }
2931 }
2932
2933 static int tg3_phy_autoneg_cfg(struct tg3 *tp, u32 advertise, u32 flowctrl)
2934 {
2935         int err = 0;
2936         u32 val, new_adv;
2937
2938         new_adv = ADVERTISE_CSMA;
2939         if (advertise & ADVERTISED_10baseT_Half)
2940                 new_adv |= ADVERTISE_10HALF;
2941         if (advertise & ADVERTISED_10baseT_Full)
2942                 new_adv |= ADVERTISE_10FULL;
2943         if (advertise & ADVERTISED_100baseT_Half)
2944                 new_adv |= ADVERTISE_100HALF;
2945         if (advertise & ADVERTISED_100baseT_Full)
2946                 new_adv |= ADVERTISE_100FULL;
2947
2948         new_adv |= tg3_advert_flowctrl_1000T(flowctrl);
2949
2950         err = tg3_writephy(tp, MII_ADVERTISE, new_adv);
2951         if (err)
2952                 goto done;
2953
2954         if (tp->phy_flags & TG3_PHYFLG_10_100_ONLY)
2955                 goto done;
2956
2957         new_adv = 0;
2958         if (advertise & ADVERTISED_1000baseT_Half)
2959                 new_adv |= MII_TG3_CTRL_ADV_1000_HALF;
2960         if (advertise & ADVERTISED_1000baseT_Full)
2961                 new_adv |= MII_TG3_CTRL_ADV_1000_FULL;
2962
2963         if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
2964             tp->pci_chip_rev_id == CHIPREV_ID_5701_B0)
2965                 new_adv |= (MII_TG3_CTRL_AS_MASTER |
2966                             MII_TG3_CTRL_ENABLE_AS_MASTER);
2967
2968         err = tg3_writephy(tp, MII_TG3_CTRL, new_adv);
2969         if (err)
2970                 goto done;
2971
2972         if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP))
2973                 goto done;
2974
2975         tw32(TG3_CPMU_EEE_MODE,
2976              tr32(TG3_CPMU_EEE_MODE) & ~TG3_CPMU_EEEMD_LPI_ENABLE);
2977
2978         err = TG3_PHY_AUXCTL_SMDSP_ENABLE(tp);
2979         if (!err) {
2980                 u32 err2;
2981
2982                 switch (GET_ASIC_REV(tp->pci_chip_rev_id)) {
2983                 case ASIC_REV_5717:
2984                 case ASIC_REV_57765:
2985                         if (!tg3_phydsp_read(tp, MII_TG3_DSP_CH34TP2, &val))
2986                                 tg3_phydsp_write(tp, MII_TG3_DSP_CH34TP2, val |
2987                                                  MII_TG3_DSP_CH34TP2_HIBW01);
2988                         /* Fall through */
2989                 case ASIC_REV_5719:
2990                         val = MII_TG3_DSP_TAP26_ALNOKO |
2991                               MII_TG3_DSP_TAP26_RMRXSTO |
2992                               MII_TG3_DSP_TAP26_OPCSINPT;
2993                         tg3_phydsp_write(tp, MII_TG3_DSP_TAP26, val);
2994                 }
2995
2996                 val = 0;
2997                 /* Advertise 100-BaseTX EEE ability */
2998                 if (advertise & ADVERTISED_100baseT_Full)
2999                         val |= MDIO_AN_EEE_ADV_100TX;
3000                 /* Advertise 1000-BaseT EEE ability */
3001                 if (advertise & ADVERTISED_1000baseT_Full)
3002                         val |= MDIO_AN_EEE_ADV_1000T;
3003                 err = tg3_phy_cl45_write(tp, MDIO_MMD_AN, MDIO_AN_EEE_ADV, val);
3004
3005                 err2 = TG3_PHY_AUXCTL_SMDSP_DISABLE(tp);
3006                 if (!err)
3007                         err = err2;
3008         }
3009
3010 done:
3011         return err;
3012 }
3013
3014 static void tg3_phy_copper_begin(struct tg3 *tp)
3015 {
3016         u32 new_adv;
3017         int i;
3018
3019         if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) {
3020                 new_adv = ADVERTISED_10baseT_Half |
3021                           ADVERTISED_10baseT_Full;
3022                 if (tg3_flag(tp, WOL_SPEED_100MB))
3023                         new_adv |= ADVERTISED_100baseT_Half |
3024                                    ADVERTISED_100baseT_Full;
3025
3026                 tg3_phy_autoneg_cfg(tp, new_adv,
3027                                     FLOW_CTRL_TX | FLOW_CTRL_RX);
3028         } else if (tp->link_config.speed == SPEED_INVALID) {
3029                 if (tp->phy_flags & TG3_PHYFLG_10_100_ONLY)
3030                         tp->link_config.advertising &=
3031                                 ~(ADVERTISED_1000baseT_Half |
3032                                   ADVERTISED_1000baseT_Full);
3033
3034                 tg3_phy_autoneg_cfg(tp, tp->link_config.advertising,
3035                                     tp->link_config.flowctrl);
3036         } else {
3037                 /* Asking for a specific link mode. */
3038                 if (tp->link_config.speed == SPEED_1000) {
3039                         if (tp->link_config.duplex == DUPLEX_FULL)
3040                                 new_adv = ADVERTISED_1000baseT_Full;
3041                         else
3042                                 new_adv = ADVERTISED_1000baseT_Half;
3043                 } else if (tp->link_config.speed == SPEED_100) {
3044                         if (tp->link_config.duplex == DUPLEX_FULL)
3045                                 new_adv = ADVERTISED_100baseT_Full;
3046                         else
3047                                 new_adv = ADVERTISED_100baseT_Half;
3048                 } else {
3049                         if (tp->link_config.duplex == DUPLEX_FULL)
3050                                 new_adv = ADVERTISED_10baseT_Full;
3051                         else
3052                                 new_adv = ADVERTISED_10baseT_Half;
3053                 }
3054
3055                 tg3_phy_autoneg_cfg(tp, new_adv,
3056                                     tp->link_config.flowctrl);
3057         }
3058
3059         if (tp->link_config.autoneg == AUTONEG_DISABLE &&
3060             tp->link_config.speed != SPEED_INVALID) {
3061                 u32 bmcr, orig_bmcr;
3062
3063                 tp->link_config.active_speed = tp->link_config.speed;
3064                 tp->link_config.active_duplex = tp->link_config.duplex;
3065
3066                 bmcr = 0;
3067                 switch (tp->link_config.speed) {
3068                 default:
3069                 case SPEED_10:
3070                         break;
3071
3072                 case SPEED_100:
3073                         bmcr |= BMCR_SPEED100;
3074                         break;
3075
3076                 case SPEED_1000:
3077                         bmcr |= TG3_BMCR_SPEED1000;
3078                         break;
3079                 }
3080
3081                 if (tp->link_config.duplex == DUPLEX_FULL)
3082                         bmcr |= BMCR_FULLDPLX;
3083
3084                 if (!tg3_readphy(tp, MII_BMCR, &orig_bmcr) &&
3085                     (bmcr != orig_bmcr)) {
3086                         tg3_writephy(tp, MII_BMCR, BMCR_LOOPBACK);
3087                         for (i = 0; i < 1500; i++) {
3088                                 u32 tmp;
3089
3090                                 udelay(10);
3091                                 if (tg3_readphy(tp, MII_BMSR, &tmp) ||
3092                                     tg3_readphy(tp, MII_BMSR, &tmp))
3093                                         continue;
3094                                 if (!(tmp & BMSR_LSTATUS)) {
3095                                         udelay(40);
3096                                         break;
3097                                 }
3098                         }
3099                         tg3_writephy(tp, MII_BMCR, bmcr);
3100                         udelay(40);
3101                 }
3102         } else {
3103                 tg3_writephy(tp, MII_BMCR,
3104                              BMCR_ANENABLE | BMCR_ANRESTART);
3105         }
3106 }
3107
3108 static int tg3_init_5401phy_dsp(struct tg3 *tp)
3109 {
3110         int err;
3111
3112         /* Turn off tap power management. */
3113         /* Set Extended packet length bit */
3114         err = tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_AUXCTL, 0x4c20);
3115
3116         err |= tg3_phydsp_write(tp, 0x0012, 0x1804);
3117         err |= tg3_phydsp_write(tp, 0x0013, 0x1204);
3118         err |= tg3_phydsp_write(tp, 0x8006, 0x0132);
3119         err |= tg3_phydsp_write(tp, 0x8006, 0x0232);
3120         err |= tg3_phydsp_write(tp, 0x201f, 0x0a20);
3121
3122         udelay(40);
3123
3124         return err;
3125 }
3126
3127 static int tg3_copper_is_advertising_all(struct tg3 *tp, u32 mask)
3128 {
3129         u32 adv_reg, all_mask = 0;
3130
3131         if (mask & ADVERTISED_10baseT_Half)
3132                 all_mask |= ADVERTISE_10HALF;
3133         if (mask & ADVERTISED_10baseT_Full)
3134                 all_mask |= ADVERTISE_10FULL;
3135         if (mask & ADVERTISED_100baseT_Half)
3136                 all_mask |= ADVERTISE_100HALF;
3137         if (mask & ADVERTISED_100baseT_Full)
3138                 all_mask |= ADVERTISE_100FULL;
3139
3140         if (tg3_readphy(tp, MII_ADVERTISE, &adv_reg))
3141                 return 0;
3142
3143         if ((adv_reg & all_mask) != all_mask)
3144                 return 0;
3145         if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
3146                 u32 tg3_ctrl;
3147
3148                 all_mask = 0;
3149                 if (mask & ADVERTISED_1000baseT_Half)
3150                         all_mask |= ADVERTISE_1000HALF;
3151                 if (mask & ADVERTISED_1000baseT_Full)
3152                         all_mask |= ADVERTISE_1000FULL;
3153
3154                 if (tg3_readphy(tp, MII_TG3_CTRL, &tg3_ctrl))
3155                         return 0;
3156
3157                 if ((tg3_ctrl & all_mask) != all_mask)
3158                         return 0;
3159         }
3160         return 1;
3161 }
3162
3163 static int tg3_adv_1000T_flowctrl_ok(struct tg3 *tp, u32 *lcladv, u32 *rmtadv)
3164 {
3165         u32 curadv, reqadv;
3166
3167         if (tg3_readphy(tp, MII_ADVERTISE, lcladv))
3168                 return 1;
3169
3170         curadv = *lcladv & (ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM);
3171         reqadv = tg3_advert_flowctrl_1000T(tp->link_config.flowctrl);
3172
3173         if (tp->link_config.active_duplex == DUPLEX_FULL) {
3174                 if (curadv != reqadv)
3175                         return 0;
3176
3177                 if (tg3_flag(tp, PAUSE_AUTONEG))
3178                         tg3_readphy(tp, MII_LPA, rmtadv);
3179         } else {
3180                 /* Reprogram the advertisement register, even if it
3181                  * does not affect the current link.  If the link
3182                  * gets renegotiated in the future, we can save an
3183                  * additional renegotiation cycle by advertising
3184                  * it correctly in the first place.
3185                  */
3186                 if (curadv != reqadv) {
3187                         *lcladv &= ~(ADVERTISE_PAUSE_CAP |
3188                                      ADVERTISE_PAUSE_ASYM);
3189                         tg3_writephy(tp, MII_ADVERTISE, *lcladv | reqadv);
3190                 }
3191         }
3192
3193         return 1;
3194 }
3195
3196 static int tg3_setup_copper_phy(struct tg3 *tp, int force_reset)
3197 {
3198         int current_link_up;
3199         u32 bmsr, val;
3200         u32 lcl_adv, rmt_adv;
3201         u16 current_speed;
3202         u8 current_duplex;
3203         int i, err;
3204
3205         tw32(MAC_EVENT, 0);
3206
3207         tw32_f(MAC_STATUS,
3208              (MAC_STATUS_SYNC_CHANGED |
3209               MAC_STATUS_CFG_CHANGED |
3210               MAC_STATUS_MI_COMPLETION |
3211               MAC_STATUS_LNKSTATE_CHANGED));
3212         udelay(40);
3213
3214         if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
3215                 tw32_f(MAC_MI_MODE,
3216                      (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
3217                 udelay(80);
3218         }
3219
3220         tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_PWRCTL, 0);
3221
3222         /* Some third-party PHYs need to be reset on link going
3223          * down.
3224          */
3225         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
3226              GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
3227              GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) &&
3228             netif_carrier_ok(tp->dev)) {
3229                 tg3_readphy(tp, MII_BMSR, &bmsr);
3230                 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
3231                     !(bmsr & BMSR_LSTATUS))
3232                         force_reset = 1;
3233         }
3234         if (force_reset)
3235                 tg3_phy_reset(tp);
3236
3237         if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) {
3238                 tg3_readphy(tp, MII_BMSR, &bmsr);
3239                 if (tg3_readphy(tp, MII_BMSR, &bmsr) ||
3240                     !tg3_flag(tp, INIT_COMPLETE))
3241                         bmsr = 0;
3242
3243                 if (!(bmsr & BMSR_LSTATUS)) {
3244                         err = tg3_init_5401phy_dsp(tp);
3245                         if (err)
3246                                 return err;
3247
3248                         tg3_readphy(tp, MII_BMSR, &bmsr);
3249                         for (i = 0; i < 1000; i++) {
3250                                 udelay(10);
3251                                 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
3252                                     (bmsr & BMSR_LSTATUS)) {
3253                                         udelay(40);
3254                                         break;
3255                                 }
3256                         }
3257
3258                         if ((tp->phy_id & TG3_PHY_ID_REV_MASK) ==
3259                             TG3_PHY_REV_BCM5401_B0 &&
3260                             !(bmsr & BMSR_LSTATUS) &&
3261                             tp->link_config.active_speed == SPEED_1000) {
3262                                 err = tg3_phy_reset(tp);
3263                                 if (!err)
3264                                         err = tg3_init_5401phy_dsp(tp);
3265                                 if (err)
3266                                         return err;
3267                         }
3268                 }
3269         } else if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
3270                    tp->pci_chip_rev_id == CHIPREV_ID_5701_B0) {
3271                 /* 5701 {A0,B0} CRC bug workaround */
3272                 tg3_writephy(tp, 0x15, 0x0a75);
3273                 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8c68);
3274                 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8d68);
3275                 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8c68);
3276         }
3277
3278         /* Clear pending interrupts... */
3279         tg3_readphy(tp, MII_TG3_ISTAT, &val);
3280         tg3_readphy(tp, MII_TG3_ISTAT, &val);
3281
3282         if (tp->phy_flags & TG3_PHYFLG_USE_MI_INTERRUPT)
3283                 tg3_writephy(tp, MII_TG3_IMASK, ~MII_TG3_INT_LINKCHG);
3284         else if (!(tp->phy_flags & TG3_PHYFLG_IS_FET))
3285                 tg3_writephy(tp, MII_TG3_IMASK, ~0);
3286
3287         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
3288             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
3289                 if (tp->led_ctrl == LED_CTRL_MODE_PHY_1)
3290                         tg3_writephy(tp, MII_TG3_EXT_CTRL,
3291                                      MII_TG3_EXT_CTRL_LNK3_LED_MODE);
3292                 else
3293                         tg3_writephy(tp, MII_TG3_EXT_CTRL, 0);
3294         }
3295
3296         current_link_up = 0;
3297         current_speed = SPEED_INVALID;
3298         current_duplex = DUPLEX_INVALID;
3299
3300         if (tp->phy_flags & TG3_PHYFLG_CAPACITIVE_COUPLING) {
3301                 err = tg3_phy_auxctl_read(tp,
3302                                           MII_TG3_AUXCTL_SHDWSEL_MISCTEST,
3303                                           &val);
3304                 if (!err && !(val & (1 << 10))) {
3305                         tg3_phy_auxctl_write(tp,
3306                                              MII_TG3_AUXCTL_SHDWSEL_MISCTEST,
3307                                              val | (1 << 10));
3308                         goto relink;
3309                 }
3310         }
3311
3312         bmsr = 0;
3313         for (i = 0; i < 100; i++) {
3314                 tg3_readphy(tp, MII_BMSR, &bmsr);
3315                 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
3316                     (bmsr & BMSR_LSTATUS))
3317                         break;
3318                 udelay(40);
3319         }
3320
3321         if (bmsr & BMSR_LSTATUS) {
3322                 u32 aux_stat, bmcr;
3323
3324                 tg3_readphy(tp, MII_TG3_AUX_STAT, &aux_stat);
3325                 for (i = 0; i < 2000; i++) {
3326                         udelay(10);
3327                         if (!tg3_readphy(tp, MII_TG3_AUX_STAT, &aux_stat) &&
3328                             aux_stat)
3329                                 break;
3330                 }
3331
3332                 tg3_aux_stat_to_speed_duplex(tp, aux_stat,
3333                                              &current_speed,
3334                                              &current_duplex);
3335
3336                 bmcr = 0;
3337                 for (i = 0; i < 200; i++) {
3338                         tg3_readphy(tp, MII_BMCR, &bmcr);
3339                         if (tg3_readphy(tp, MII_BMCR, &bmcr))
3340                                 continue;
3341                         if (bmcr && bmcr != 0x7fff)
3342                                 break;
3343                         udelay(10);
3344                 }
3345
3346                 lcl_adv = 0;
3347                 rmt_adv = 0;
3348
3349                 tp->link_config.active_speed = current_speed;
3350                 tp->link_config.active_duplex = current_duplex;
3351
3352                 if (tp->link_config.autoneg == AUTONEG_ENABLE) {
3353                         if ((bmcr & BMCR_ANENABLE) &&
3354                             tg3_copper_is_advertising_all(tp,
3355                                                 tp->link_config.advertising)) {
3356                                 if (tg3_adv_1000T_flowctrl_ok(tp, &lcl_adv,
3357                                                                   &rmt_adv))
3358                                         current_link_up = 1;
3359                         }
3360                 } else {
3361                         if (!(bmcr & BMCR_ANENABLE) &&
3362                             tp->link_config.speed == current_speed &&
3363                             tp->link_config.duplex == current_duplex &&
3364                             tp->link_config.flowctrl ==
3365                             tp->link_config.active_flowctrl) {
3366                                 current_link_up = 1;
3367                         }
3368                 }
3369
3370                 if (current_link_up == 1 &&
3371                     tp->link_config.active_duplex == DUPLEX_FULL)
3372                         tg3_setup_flow_control(tp, lcl_adv, rmt_adv);
3373         }
3374
3375 relink:
3376         if (current_link_up == 0 || (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)) {
3377                 tg3_phy_copper_begin(tp);
3378
3379                 tg3_readphy(tp, MII_BMSR, &bmsr);
3380                 if ((!tg3_readphy(tp, MII_BMSR, &bmsr) && (bmsr & BMSR_LSTATUS)) ||
3381                     (tp->mac_mode & MAC_MODE_PORT_INT_LPBACK))
3382                         current_link_up = 1;
3383         }
3384
3385         tp->mac_mode &= ~MAC_MODE_PORT_MODE_MASK;
3386         if (current_link_up == 1) {
3387                 if (tp->link_config.active_speed == SPEED_100 ||
3388                     tp->link_config.active_speed == SPEED_10)
3389                         tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
3390                 else
3391                         tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
3392         } else if (tp->phy_flags & TG3_PHYFLG_IS_FET)
3393                 tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
3394         else
3395                 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
3396
3397         tp->mac_mode &= ~MAC_MODE_HALF_DUPLEX;
3398         if (tp->link_config.active_duplex == DUPLEX_HALF)
3399                 tp->mac_mode |= MAC_MODE_HALF_DUPLEX;
3400
3401         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700) {
3402                 if (current_link_up == 1 &&
3403                     tg3_5700_link_polarity(tp, tp->link_config.active_speed))
3404                         tp->mac_mode |= MAC_MODE_LINK_POLARITY;
3405                 else
3406                         tp->mac_mode &= ~MAC_MODE_LINK_POLARITY;
3407         }
3408
3409         /* ??? Without this setting Netgear GA302T PHY does not
3410          * ??? send/receive packets...
3411          */
3412         if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5411 &&
3413             tp->pci_chip_rev_id == CHIPREV_ID_5700_ALTIMA) {
3414                 tp->mi_mode |= MAC_MI_MODE_AUTO_POLL;
3415                 tw32_f(MAC_MI_MODE, tp->mi_mode);
3416                 udelay(80);
3417         }
3418
3419         tw32_f(MAC_MODE, tp->mac_mode);
3420         udelay(40);
3421
3422         tg3_phy_eee_adjust(tp, current_link_up);
3423
3424         if (tg3_flag(tp, USE_LINKCHG_REG)) {
3425                 /* Polled via timer. */
3426                 tw32_f(MAC_EVENT, 0);
3427         } else {
3428                 tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
3429         }
3430         udelay(40);
3431
3432         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 &&
3433             current_link_up == 1 &&
3434             tp->link_config.active_speed == SPEED_1000 &&
3435             (tg3_flag(tp, PCIX_MODE) || tg3_flag(tp, PCI_HIGH_SPEED))) {
3436                 udelay(120);
3437                 tw32_f(MAC_STATUS,
3438                      (MAC_STATUS_SYNC_CHANGED |
3439                       MAC_STATUS_CFG_CHANGED));
3440                 udelay(40);
3441                 tg3_write_mem(tp,
3442                               NIC_SRAM_FIRMWARE_MBOX,
3443                               NIC_SRAM_FIRMWARE_MBOX_MAGIC2);
3444         }
3445
3446         /* Prevent send BD corruption. */
3447         if (tg3_flag(tp, CLKREQ_BUG)) {
3448                 u16 oldlnkctl, newlnkctl;
3449
3450                 pci_read_config_word(tp->pdev,
3451                                      tp->pcie_cap + PCI_EXP_LNKCTL,
3452                                      &oldlnkctl);
3453                 if (tp->link_config.active_speed == SPEED_100 ||
3454                     tp->link_config.active_speed == SPEED_10)
3455                         newlnkctl = oldlnkctl & ~PCI_EXP_LNKCTL_CLKREQ_EN;
3456                 else
3457                         newlnkctl = oldlnkctl | PCI_EXP_LNKCTL_CLKREQ_EN;
3458                 if (newlnkctl != oldlnkctl)
3459                         pci_write_config_word(tp->pdev,
3460                                               tp->pcie_cap + PCI_EXP_LNKCTL,
3461                                               newlnkctl);
3462         }
3463
3464         if (current_link_up != netif_carrier_ok(tp->dev)) {
3465                 if (current_link_up)
3466                         netif_carrier_on(tp->dev);
3467                 else
3468                         netif_carrier_off(tp->dev);
3469                 tg3_link_report(tp);
3470         }
3471
3472         return 0;
3473 }
3474
3475 struct tg3_fiber_aneginfo {
3476         int state;
3477 #define ANEG_STATE_UNKNOWN              0
3478 #define ANEG_STATE_AN_ENABLE            1
3479 #define ANEG_STATE_RESTART_INIT         2
3480 #define ANEG_STATE_RESTART              3
3481 #define ANEG_STATE_DISABLE_LINK_OK      4
3482 #define ANEG_STATE_ABILITY_DETECT_INIT  5
3483 #define ANEG_STATE_ABILITY_DETECT       6
3484 #define ANEG_STATE_ACK_DETECT_INIT      7
3485 #define ANEG_STATE_ACK_DETECT           8
3486 #define ANEG_STATE_COMPLETE_ACK_INIT    9
3487 #define ANEG_STATE_COMPLETE_ACK         10
3488 #define ANEG_STATE_IDLE_DETECT_INIT     11
3489 #define ANEG_STATE_IDLE_DETECT          12
3490 #define ANEG_STATE_LINK_OK              13
3491 #define ANEG_STATE_NEXT_PAGE_WAIT_INIT  14
3492 #define ANEG_STATE_NEXT_PAGE_WAIT       15
3493
3494         u32 flags;
3495 #define MR_AN_ENABLE            0x00000001
3496 #define MR_RESTART_AN           0x00000002
3497 #define MR_AN_COMPLETE          0x00000004
3498 #define MR_PAGE_RX              0x00000008
3499 #define MR_NP_LOADED            0x00000010
3500 #define MR_TOGGLE_TX            0x00000020
3501 #define MR_LP_ADV_FULL_DUPLEX   0x00000040
3502 #define MR_LP_ADV_HALF_DUPLEX   0x00000080
3503 #define MR_LP_ADV_SYM_PAUSE     0x00000100
3504 #define MR_LP_ADV_ASYM_PAUSE    0x00000200
3505 #define MR_LP_ADV_REMOTE_FAULT1 0x00000400
3506 #define MR_LP_ADV_REMOTE_FAULT2 0x00000800
3507 #define MR_LP_ADV_NEXT_PAGE     0x00001000
3508 #define MR_TOGGLE_RX            0x00002000
3509 #define MR_NP_RX                0x00004000
3510
3511 #define MR_LINK_OK              0x80000000
3512
3513         unsigned long link_time, cur_time;
3514
3515         u32 ability_match_cfg;
3516         int ability_match_count;
3517
3518         char ability_match, idle_match, ack_match;
3519
3520         u32 txconfig, rxconfig;
3521 #define ANEG_CFG_NP             0x00000080
3522 #define ANEG_CFG_ACK            0x00000040
3523 #define ANEG_CFG_RF2            0x00000020
3524 #define ANEG_CFG_RF1            0x00000010
3525 #define ANEG_CFG_PS2            0x00000001
3526 #define ANEG_CFG_PS1            0x00008000
3527 #define ANEG_CFG_HD             0x00004000
3528 #define ANEG_CFG_FD             0x00002000
3529 #define ANEG_CFG_INVAL          0x00001f06
3530
3531 };
3532 #define ANEG_OK         0
3533 #define ANEG_DONE       1
3534 #define ANEG_TIMER_ENAB 2
3535 #define ANEG_FAILED     -1
3536
3537 #define ANEG_STATE_SETTLE_TIME  10000
3538
3539 static int tg3_fiber_aneg_smachine(struct tg3 *tp,
3540                                    struct tg3_fiber_aneginfo *ap)
3541 {
3542         u16 flowctrl;
3543         unsigned long delta;
3544         u32 rx_cfg_reg;
3545         int ret;
3546
3547         if (ap->state == ANEG_STATE_UNKNOWN) {
3548                 ap->rxconfig = 0;
3549                 ap->link_time = 0;
3550                 ap->cur_time = 0;
3551                 ap->ability_match_cfg = 0;
3552                 ap->ability_match_count = 0;
3553                 ap->ability_match = 0;
3554                 ap->idle_match = 0;
3555                 ap->ack_match = 0;
3556         }
3557         ap->cur_time++;
3558
3559         if (tr32(MAC_STATUS) & MAC_STATUS_RCVD_CFG) {
3560                 rx_cfg_reg = tr32(MAC_RX_AUTO_NEG);
3561
3562                 if (rx_cfg_reg != ap->ability_match_cfg) {
3563                         ap->ability_match_cfg = rx_cfg_reg;
3564                         ap->ability_match = 0;
3565                         ap->ability_match_count = 0;
3566                 } else {
3567                         if (++ap->ability_match_count > 1) {
3568                                 ap->ability_match = 1;
3569                                 ap->ability_match_cfg = rx_cfg_reg;
3570                         }
3571                 }
3572                 if (rx_cfg_reg & ANEG_CFG_ACK)
3573                         ap->ack_match = 1;
3574                 else
3575                         ap->ack_match = 0;
3576
3577                 ap->idle_match = 0;
3578         } else {
3579                 ap->idle_match = 1;
3580                 ap->ability_match_cfg = 0;
3581                 ap->ability_match_count = 0;
3582                 ap->ability_match = 0;
3583                 ap->ack_match = 0;
3584
3585                 rx_cfg_reg = 0;
3586         }
3587
3588         ap->rxconfig = rx_cfg_reg;
3589         ret = ANEG_OK;
3590
3591         switch (ap->state) {
3592         case ANEG_STATE_UNKNOWN:
3593                 if (ap->flags & (MR_AN_ENABLE | MR_RESTART_AN))
3594                         ap->state = ANEG_STATE_AN_ENABLE;
3595
3596                 /* fallthru */
3597         case ANEG_STATE_AN_ENABLE:
3598                 ap->flags &= ~(MR_AN_COMPLETE | MR_PAGE_RX);
3599                 if (ap->flags & MR_AN_ENABLE) {
3600                         ap->link_time = 0;
3601                         ap->cur_time = 0;
3602                         ap->ability_match_cfg = 0;
3603                         ap->ability_match_count = 0;
3604                         ap->ability_match = 0;
3605                         ap->idle_match = 0;
3606                         ap->ack_match = 0;
3607
3608                         ap->state = ANEG_STATE_RESTART_INIT;
3609                 } else {
3610                         ap->state = ANEG_STATE_DISABLE_LINK_OK;
3611                 }
3612                 break;
3613
3614         case ANEG_STATE_RESTART_INIT:
3615                 ap->link_time = ap->cur_time;
3616                 ap->flags &= ~(MR_NP_LOADED);
3617                 ap->txconfig = 0;
3618                 tw32(MAC_TX_AUTO_NEG, 0);
3619                 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
3620                 tw32_f(MAC_MODE, tp->mac_mode);
3621                 udelay(40);
3622
3623                 ret = ANEG_TIMER_ENAB;
3624                 ap->state = ANEG_STATE_RESTART;
3625
3626                 /* fallthru */
3627         case ANEG_STATE_RESTART:
3628                 delta = ap->cur_time - ap->link_time;
3629                 if (delta > ANEG_STATE_SETTLE_TIME)
3630                         ap->state = ANEG_STATE_ABILITY_DETECT_INIT;
3631                 else
3632                         ret = ANEG_TIMER_ENAB;
3633                 break;
3634
3635         case ANEG_STATE_DISABLE_LINK_OK:
3636                 ret = ANEG_DONE;
3637                 break;
3638
3639         case ANEG_STATE_ABILITY_DETECT_INIT:
3640                 ap->flags &= ~(MR_TOGGLE_TX);
3641                 ap->txconfig = ANEG_CFG_FD;
3642                 flowctrl = tg3_advert_flowctrl_1000X(tp->link_config.flowctrl);
3643                 if (flowctrl & ADVERTISE_1000XPAUSE)
3644                         ap->txconfig |= ANEG_CFG_PS1;
3645                 if (flowctrl & ADVERTISE_1000XPSE_ASYM)
3646                         ap->txconfig |= ANEG_CFG_PS2;
3647                 tw32(MAC_TX_AUTO_NEG, ap->txconfig);
3648                 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
3649                 tw32_f(MAC_MODE, tp->mac_mode);
3650                 udelay(40);
3651
3652                 ap->state = ANEG_STATE_ABILITY_DETECT;
3653                 break;
3654
3655         case ANEG_STATE_ABILITY_DETECT:
3656                 if (ap->ability_match != 0 && ap->rxconfig != 0)
3657                         ap->state = ANEG_STATE_ACK_DETECT_INIT;
3658                 break;
3659
3660         case ANEG_STATE_ACK_DETECT_INIT:
3661                 ap->txconfig |= ANEG_CFG_ACK;
3662                 tw32(MAC_TX_AUTO_NEG, ap->txconfig);
3663                 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
3664                 tw32_f(MAC_MODE, tp->mac_mode);
3665                 udelay(40);
3666
3667                 ap->state = ANEG_STATE_ACK_DETECT;
3668
3669                 /* fallthru */
3670         case ANEG_STATE_ACK_DETECT:
3671                 if (ap->ack_match != 0) {
3672                         if ((ap->rxconfig & ~ANEG_CFG_ACK) ==
3673                             (ap->ability_match_cfg & ~ANEG_CFG_ACK)) {
3674                                 ap->state = ANEG_STATE_COMPLETE_ACK_INIT;
3675                         } else {
3676                                 ap->state = ANEG_STATE_AN_ENABLE;
3677                         }
3678                 } else if (ap->ability_match != 0 &&
3679                            ap->rxconfig == 0) {
3680                         ap->state = ANEG_STATE_AN_ENABLE;
3681                 }
3682                 break;
3683
3684         case ANEG_STATE_COMPLETE_ACK_INIT:
3685                 if (ap->rxconfig & ANEG_CFG_INVAL) {
3686                         ret = ANEG_FAILED;
3687                         break;
3688                 }
3689                 ap->flags &= ~(MR_LP_ADV_FULL_DUPLEX |
3690                                MR_LP_ADV_HALF_DUPLEX |
3691                                MR_LP_ADV_SYM_PAUSE |
3692                                MR_LP_ADV_ASYM_PAUSE |
3693                                MR_LP_ADV_REMOTE_FAULT1 |
3694                                MR_LP_ADV_REMOTE_FAULT2 |
3695                                MR_LP_ADV_NEXT_PAGE |
3696                                MR_TOGGLE_RX |
3697                                MR_NP_RX);
3698                 if (ap->rxconfig & ANEG_CFG_FD)
3699                         ap->flags |= MR_LP_ADV_FULL_DUPLEX;
3700                 if (ap->rxconfig & ANEG_CFG_HD)
3701                         ap->flags |= MR_LP_ADV_HALF_DUPLEX;
3702                 if (ap->rxconfig & ANEG_CFG_PS1)
3703                         ap->flags |= MR_LP_ADV_SYM_PAUSE;
3704                 if (ap->rxconfig & ANEG_CFG_PS2)
3705                         ap->flags |= MR_LP_ADV_ASYM_PAUSE;
3706                 if (ap->rxconfig & ANEG_CFG_RF1)
3707                         ap->flags |= MR_LP_ADV_REMOTE_FAULT1;
3708                 if (ap->rxconfig & ANEG_CFG_RF2)
3709                         ap->flags |= MR_LP_ADV_REMOTE_FAULT2;
3710                 if (ap->rxconfig & ANEG_CFG_NP)
3711                         ap->flags |= MR_LP_ADV_NEXT_PAGE;
3712
3713                 ap->link_time = ap->cur_time;
3714
3715                 ap->flags ^= (MR_TOGGLE_TX);
3716                 if (ap->rxconfig & 0x0008)
3717                         ap->flags |= MR_TOGGLE_RX;
3718                 if (ap->rxconfig & ANEG_CFG_NP)
3719                         ap->flags |= MR_NP_RX;
3720                 ap->flags |= MR_PAGE_RX;
3721
3722                 ap->state = ANEG_STATE_COMPLETE_ACK;
3723                 ret = ANEG_TIMER_ENAB;
3724                 break;
3725
3726         case ANEG_STATE_COMPLETE_ACK:
3727                 if (ap->ability_match != 0 &&
3728                     ap->rxconfig == 0) {
3729                         ap->state = ANEG_STATE_AN_ENABLE;
3730                         break;
3731                 }
3732                 delta = ap->cur_time - ap->link_time;
3733                 if (delta > ANEG_STATE_SETTLE_TIME) {
3734                         if (!(ap->flags & (MR_LP_ADV_NEXT_PAGE))) {
3735                                 ap->state = ANEG_STATE_IDLE_DETECT_INIT;
3736                         } else {
3737                                 if ((ap->txconfig & ANEG_CFG_NP) == 0 &&
3738                                     !(ap->flags & MR_NP_RX)) {
3739                                         ap->state = ANEG_STATE_IDLE_DETECT_INIT;
3740                                 } else {
3741                                         ret = ANEG_FAILED;
3742                                 }
3743                         }
3744                 }
3745                 break;
3746
3747         case ANEG_STATE_IDLE_DETECT_INIT:
3748                 ap->link_time = ap->cur_time;
3749                 tp->mac_mode &= ~MAC_MODE_SEND_CONFIGS;
3750                 tw32_f(MAC_MODE, tp->mac_mode);
3751                 udelay(40);
3752
3753                 ap->state = ANEG_STATE_IDLE_DETECT;
3754                 ret = ANEG_TIMER_ENAB;
3755                 break;
3756
3757         case ANEG_STATE_IDLE_DETECT:
3758                 if (ap->ability_match != 0 &&
3759                     ap->rxconfig == 0) {
3760                         ap->state = ANEG_STATE_AN_ENABLE;
3761                         break;
3762                 }
3763                 delta = ap->cur_time - ap->link_time;
3764                 if (delta > ANEG_STATE_SETTLE_TIME) {
3765                         /* XXX another gem from the Broadcom driver :( */
3766                         ap->state = ANEG_STATE_LINK_OK;
3767                 }
3768                 break;
3769
3770         case ANEG_STATE_LINK_OK:
3771                 ap->flags |= (MR_AN_COMPLETE | MR_LINK_OK);
3772                 ret = ANEG_DONE;
3773                 break;
3774
3775         case ANEG_STATE_NEXT_PAGE_WAIT_INIT:
3776                 /* ??? unimplemented */
3777                 break;
3778
3779         case ANEG_STATE_NEXT_PAGE_WAIT:
3780                 /* ??? unimplemented */
3781                 break;
3782
3783         default:
3784                 ret = ANEG_FAILED;
3785                 break;
3786         }
3787
3788         return ret;
3789 }
3790
3791 static int fiber_autoneg(struct tg3 *tp, u32 *txflags, u32 *rxflags)
3792 {
3793         int res = 0;
3794         struct tg3_fiber_aneginfo aninfo;
3795         int status = ANEG_FAILED;
3796         unsigned int tick;
3797         u32 tmp;
3798
3799         tw32_f(MAC_TX_AUTO_NEG, 0);
3800
3801         tmp = tp->mac_mode & ~MAC_MODE_PORT_MODE_MASK;
3802         tw32_f(MAC_MODE, tmp | MAC_MODE_PORT_MODE_GMII);
3803         udelay(40);
3804
3805         tw32_f(MAC_MODE, tp->mac_mode | MAC_MODE_SEND_CONFIGS);
3806         udelay(40);
3807
3808         memset(&aninfo, 0, sizeof(aninfo));
3809         aninfo.flags |= MR_AN_ENABLE;
3810         aninfo.state = ANEG_STATE_UNKNOWN;
3811         aninfo.cur_time = 0;
3812         tick = 0;
3813         while (++tick < 195000) {
3814                 status = tg3_fiber_aneg_smachine(tp, &aninfo);
3815                 if (status == ANEG_DONE || status == ANEG_FAILED)
3816                         break;
3817
3818                 udelay(1);
3819         }
3820
3821         tp->mac_mode &= ~MAC_MODE_SEND_CONFIGS;
3822         tw32_f(MAC_MODE, tp->mac_mode);
3823         udelay(40);
3824
3825         *txflags = aninfo.txconfig;
3826         *rxflags = aninfo.flags;
3827
3828         if (status == ANEG_DONE &&
3829             (aninfo.flags & (MR_AN_COMPLETE | MR_LINK_OK |
3830                              MR_LP_ADV_FULL_DUPLEX)))
3831                 res = 1;
3832
3833         return res;
3834 }
3835
3836 static void tg3_init_bcm8002(struct tg3 *tp)
3837 {
3838         u32 mac_status = tr32(MAC_STATUS);
3839         int i;
3840
3841         /* Reset when initting first time or we have a link. */
3842         if (tg3_flag(tp, INIT_COMPLETE) &&
3843             !(mac_status & MAC_STATUS_PCS_SYNCED))
3844                 return;
3845
3846         /* Set PLL lock range. */
3847         tg3_writephy(tp, 0x16, 0x8007);
3848
3849         /* SW reset */
3850         tg3_writephy(tp, MII_BMCR, BMCR_RESET);
3851
3852         /* Wait for reset to complete. */
3853         /* XXX schedule_timeout() ... */
3854         for (i = 0; i < 500; i++)
3855                 udelay(10);
3856
3857         /* Config mode; select PMA/Ch 1 regs. */
3858         tg3_writephy(tp, 0x10, 0x8411);
3859
3860         /* Enable auto-lock and comdet, select txclk for tx. */
3861         tg3_writephy(tp, 0x11, 0x0a10);
3862
3863         tg3_writephy(tp, 0x18, 0x00a0);
3864         tg3_writephy(tp, 0x16, 0x41ff);
3865
3866         /* Assert and deassert POR. */
3867         tg3_writephy(tp, 0x13, 0x0400);
3868         udelay(40);
3869         tg3_writephy(tp, 0x13, 0x0000);
3870
3871         tg3_writephy(tp, 0x11, 0x0a50);
3872         udelay(40);
3873         tg3_writephy(tp, 0x11, 0x0a10);
3874
3875         /* Wait for signal to stabilize */
3876         /* XXX schedule_timeout() ... */
3877         for (i = 0; i < 15000; i++)
3878                 udelay(10);
3879
3880         /* Deselect the channel register so we can read the PHYID
3881          * later.
3882          */
3883         tg3_writephy(tp, 0x10, 0x8011);
3884 }
3885
3886 static int tg3_setup_fiber_hw_autoneg(struct tg3 *tp, u32 mac_status)
3887 {
3888         u16 flowctrl;
3889         u32 sg_dig_ctrl, sg_dig_status;
3890         u32 serdes_cfg, expected_sg_dig_ctrl;
3891         int workaround, port_a;
3892         int current_link_up;
3893
3894         serdes_cfg = 0;
3895         expected_sg_dig_ctrl = 0;
3896         workaround = 0;
3897         port_a = 1;
3898         current_link_up = 0;
3899
3900         if (tp->pci_chip_rev_id != CHIPREV_ID_5704_A0 &&
3901             tp->pci_chip_rev_id != CHIPREV_ID_5704_A1) {
3902                 workaround = 1;
3903                 if (tr32(TG3PCI_DUAL_MAC_CTRL) & DUAL_MAC_CTRL_ID)
3904                         port_a = 0;
3905
3906                 /* preserve bits 0-11,13,14 for signal pre-emphasis */
3907                 /* preserve bits 20-23 for voltage regulator */
3908                 serdes_cfg = tr32(MAC_SERDES_CFG) & 0x00f06fff;
3909         }
3910
3911         sg_dig_ctrl = tr32(SG_DIG_CTRL);
3912
3913         if (tp->link_config.autoneg != AUTONEG_ENABLE) {
3914                 if (sg_dig_ctrl & SG_DIG_USING_HW_AUTONEG) {
3915                         if (workaround) {
3916                                 u32 val = serdes_cfg;
3917
3918                                 if (port_a)
3919                                         val |= 0xc010000;
3920                                 else
3921                                         val |= 0x4010000;
3922                                 tw32_f(MAC_SERDES_CFG, val);
3923                         }
3924
3925                         tw32_f(SG_DIG_CTRL, SG_DIG_COMMON_SETUP);
3926                 }
3927                 if (mac_status & MAC_STATUS_PCS_SYNCED) {
3928                         tg3_setup_flow_control(tp, 0, 0);
3929                         current_link_up = 1;
3930                 }
3931                 goto out;
3932         }
3933
3934         /* Want auto-negotiation.  */
3935         expected_sg_dig_ctrl = SG_DIG_USING_HW_AUTONEG | SG_DIG_COMMON_SETUP;
3936
3937         flowctrl = tg3_advert_flowctrl_1000X(tp->link_config.flowctrl);
3938         if (flowctrl & ADVERTISE_1000XPAUSE)
3939                 expected_sg_dig_ctrl |= SG_DIG_PAUSE_CAP;
3940         if (flowctrl & ADVERTISE_1000XPSE_ASYM)
3941                 expected_sg_dig_ctrl |= SG_DIG_ASYM_PAUSE;
3942
3943         if (sg_dig_ctrl != expected_sg_dig_ctrl) {
3944                 if ((tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT) &&
3945                     tp->serdes_counter &&
3946                     ((mac_status & (MAC_STATUS_PCS_SYNCED |
3947                                     MAC_STATUS_RCVD_CFG)) ==
3948                      MAC_STATUS_PCS_SYNCED)) {
3949                         tp->serdes_counter--;
3950                         current_link_up = 1;
3951                         goto out;
3952                 }
3953 restart_autoneg:
3954                 if (workaround)
3955                         tw32_f(MAC_SERDES_CFG, serdes_cfg | 0xc011000);
3956                 tw32_f(SG_DIG_CTRL, expected_sg_dig_ctrl | SG_DIG_SOFT_RESET);
3957                 udelay(5);
3958                 tw32_f(SG_DIG_CTRL, expected_sg_dig_ctrl);
3959
3960                 tp->serdes_counter = SERDES_AN_TIMEOUT_5704S;
3961                 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
3962         } else if (mac_status & (MAC_STATUS_PCS_SYNCED |
3963                                  MAC_STATUS_SIGNAL_DET)) {
3964                 sg_dig_status = tr32(SG_DIG_STATUS);
3965                 mac_status = tr32(MAC_STATUS);
3966
3967                 if ((sg_dig_status & SG_DIG_AUTONEG_COMPLETE) &&
3968                     (mac_status & MAC_STATUS_PCS_SYNCED)) {
3969                         u32 local_adv = 0, remote_adv = 0;
3970
3971                         if (sg_dig_ctrl & SG_DIG_PAUSE_CAP)
3972                                 local_adv |= ADVERTISE_1000XPAUSE;
3973                         if (sg_dig_ctrl & SG_DIG_ASYM_PAUSE)
3974                                 local_adv |= ADVERTISE_1000XPSE_ASYM;
3975
3976                         if (sg_dig_status & SG_DIG_PARTNER_PAUSE_CAPABLE)
3977                                 remote_adv |= LPA_1000XPAUSE;
3978                         if (sg_dig_status & SG_DIG_PARTNER_ASYM_PAUSE)
3979                                 remote_adv |= LPA_1000XPAUSE_ASYM;
3980
3981                         tg3_setup_flow_control(tp, local_adv, remote_adv);
3982                         current_link_up = 1;
3983                         tp->serdes_counter = 0;
3984                         tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
3985                 } else if (!(sg_dig_status & SG_DIG_AUTONEG_COMPLETE)) {
3986                         if (tp->serdes_counter)
3987                                 tp->serdes_counter--;
3988                         else {
3989                                 if (workaround) {
3990                                         u32 val = serdes_cfg;
3991
3992                                         if (port_a)
3993                                                 val |= 0xc010000;
3994                                         else
3995                                                 val |= 0x4010000;
3996
3997                                         tw32_f(MAC_SERDES_CFG, val);
3998                                 }
3999
4000                                 tw32_f(SG_DIG_CTRL, SG_DIG_COMMON_SETUP);
4001                                 udelay(40);
4002
4003                                 /* Link parallel detection - link is up */
4004                                 /* only if we have PCS_SYNC and not */
4005                                 /* receiving config code words */
4006                                 mac_status = tr32(MAC_STATUS);
4007                                 if ((mac_status & MAC_STATUS_PCS_SYNCED) &&
4008                                     !(mac_status & MAC_STATUS_RCVD_CFG)) {
4009                                         tg3_setup_flow_control(tp, 0, 0);
4010                                         current_link_up = 1;
4011                                         tp->phy_flags |=
4012                                                 TG3_PHYFLG_PARALLEL_DETECT;
4013                                         tp->serdes_counter =
4014                                                 SERDES_PARALLEL_DET_TIMEOUT;
4015                                 } else
4016                                         goto restart_autoneg;
4017                         }
4018                 }
4019         } else {
4020                 tp->serdes_counter = SERDES_AN_TIMEOUT_5704S;
4021                 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
4022         }
4023
4024 out:
4025         return current_link_up;
4026 }
4027
4028 static int tg3_setup_fiber_by_hand(struct tg3 *tp, u32 mac_status)
4029 {
4030         int current_link_up = 0;
4031
4032         if (!(mac_status & MAC_STATUS_PCS_SYNCED))
4033                 goto out;
4034
4035         if (tp->link_config.autoneg == AUTONEG_ENABLE) {
4036                 u32 txflags, rxflags;
4037                 int i;
4038
4039                 if (fiber_autoneg(tp, &txflags, &rxflags)) {
4040                         u32 local_adv = 0, remote_adv = 0;
4041
4042                         if (txflags & ANEG_CFG_PS1)
4043                                 local_adv |= ADVERTISE_1000XPAUSE;
4044                         if (txflags & ANEG_CFG_PS2)
4045                                 local_adv |= ADVERTISE_1000XPSE_ASYM;
4046
4047                         if (rxflags & MR_LP_ADV_SYM_PAUSE)
4048                                 remote_adv |= LPA_1000XPAUSE;
4049                         if (rxflags & MR_LP_ADV_ASYM_PAUSE)
4050                                 remote_adv |= LPA_1000XPAUSE_ASYM;
4051
4052                         tg3_setup_flow_control(tp, local_adv, remote_adv);
4053
4054                         current_link_up = 1;
4055                 }
4056                 for (i = 0; i < 30; i++) {
4057                         udelay(20);
4058                         tw32_f(MAC_STATUS,
4059                                (MAC_STATUS_SYNC_CHANGED |
4060                                 MAC_STATUS_CFG_CHANGED));
4061                         udelay(40);
4062                         if ((tr32(MAC_STATUS) &
4063                              (MAC_STATUS_SYNC_CHANGED |
4064                               MAC_STATUS_CFG_CHANGED)) == 0)
4065                                 break;
4066                 }
4067
4068                 mac_status = tr32(MAC_STATUS);
4069                 if (current_link_up == 0 &&
4070                     (mac_status & MAC_STATUS_PCS_SYNCED) &&
4071                     !(mac_status & MAC_STATUS_RCVD_CFG))
4072                         current_link_up = 1;
4073         } else {
4074                 tg3_setup_flow_control(tp, 0, 0);
4075
4076                 /* Forcing 1000FD link up. */
4077                 current_link_up = 1;
4078
4079                 tw32_f(MAC_MODE, (tp->mac_mode | MAC_MODE_SEND_CONFIGS));
4080                 udelay(40);
4081
4082                 tw32_f(MAC_MODE, tp->mac_mode);
4083                 udelay(40);
4084         }
4085
4086 out:
4087         return current_link_up;
4088 }
4089
4090 static int tg3_setup_fiber_phy(struct tg3 *tp, int force_reset)
4091 {
4092         u32 orig_pause_cfg;
4093         u16 orig_active_speed;
4094         u8 orig_active_duplex;
4095         u32 mac_status;
4096         int current_link_up;
4097         int i;
4098
4099         orig_pause_cfg = tp->link_config.active_flowctrl;
4100         orig_active_speed = tp->link_config.active_speed;
4101         orig_active_duplex = tp->link_config.active_duplex;
4102
4103         if (!tg3_flag(tp, HW_AUTONEG) &&
4104             netif_carrier_ok(tp->dev) &&
4105             tg3_flag(tp, INIT_COMPLETE)) {
4106                 mac_status = tr32(MAC_STATUS);
4107                 mac_status &= (MAC_STATUS_PCS_SYNCED |
4108                                MAC_STATUS_SIGNAL_DET |
4109                                MAC_STATUS_CFG_CHANGED |
4110                                MAC_STATUS_RCVD_CFG);
4111                 if (mac_status == (MAC_STATUS_PCS_SYNCED |
4112                                    MAC_STATUS_SIGNAL_DET)) {
4113                         tw32_f(MAC_STATUS, (MAC_STATUS_SYNC_CHANGED |
4114                                             MAC_STATUS_CFG_CHANGED));
4115                         return 0;
4116                 }
4117         }
4118
4119         tw32_f(MAC_TX_AUTO_NEG, 0);
4120
4121         tp->mac_mode &= ~(MAC_MODE_PORT_MODE_MASK | MAC_MODE_HALF_DUPLEX);
4122         tp->mac_mode |= MAC_MODE_PORT_MODE_TBI;
4123         tw32_f(MAC_MODE, tp->mac_mode);
4124         udelay(40);
4125
4126         if (tp->phy_id == TG3_PHY_ID_BCM8002)
4127                 tg3_init_bcm8002(tp);
4128
4129         /* Enable link change event even when serdes polling.  */
4130         tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
4131         udelay(40);
4132
4133         current_link_up = 0;
4134         mac_status = tr32(MAC_STATUS);
4135
4136         if (tg3_flag(tp, HW_AUTONEG))
4137                 current_link_up = tg3_setup_fiber_hw_autoneg(tp, mac_status);
4138         else
4139                 current_link_up = tg3_setup_fiber_by_hand(tp, mac_status);
4140
4141         tp->napi[0].hw_status->status =
4142                 (SD_STATUS_UPDATED |
4143                  (tp->napi[0].hw_status->status & ~SD_STATUS_LINK_CHG));
4144
4145         for (i = 0; i < 100; i++) {
4146                 tw32_f(MAC_STATUS, (MAC_STATUS_SYNC_CHANGED |
4147                                     MAC_STATUS_CFG_CHANGED));
4148                 udelay(5);
4149                 if ((tr32(MAC_STATUS) & (MAC_STATUS_SYNC_CHANGED |
4150                                          MAC_STATUS_CFG_CHANGED |
4151                                          MAC_STATUS_LNKSTATE_CHANGED)) == 0)
4152                         break;
4153         }
4154
4155         mac_status = tr32(MAC_STATUS);
4156         if ((mac_status & MAC_STATUS_PCS_SYNCED) == 0) {
4157                 current_link_up = 0;
4158                 if (tp->link_config.autoneg == AUTONEG_ENABLE &&
4159                     tp->serdes_counter == 0) {
4160                         tw32_f(MAC_MODE, (tp->mac_mode |
4161                                           MAC_MODE_SEND_CONFIGS));
4162                         udelay(1);
4163                         tw32_f(MAC_MODE, tp->mac_mode);
4164                 }
4165         }
4166
4167         if (current_link_up == 1) {
4168                 tp->link_config.active_speed = SPEED_1000;
4169                 tp->link_config.active_duplex = DUPLEX_FULL;
4170                 tw32(MAC_LED_CTRL, (tp->led_ctrl |
4171                                     LED_CTRL_LNKLED_OVERRIDE |
4172                                     LED_CTRL_1000MBPS_ON));
4173         } else {
4174                 tp->link_config.active_speed = SPEED_INVALID;
4175                 tp->link_config.active_duplex = DUPLEX_INVALID;
4176                 tw32(MAC_LED_CTRL, (tp->led_ctrl |
4177                                     LED_CTRL_LNKLED_OVERRIDE |
4178                                     LED_CTRL_TRAFFIC_OVERRIDE));
4179         }
4180
4181         if (current_link_up != netif_carrier_ok(tp->dev)) {
4182                 if (current_link_up)
4183                         netif_carrier_on(tp->dev);
4184                 else
4185                         netif_carrier_off(tp->dev);
4186                 tg3_link_report(tp);
4187         } else {
4188                 u32 now_pause_cfg = tp->link_config.active_flowctrl;
4189                 if (orig_pause_cfg != now_pause_cfg ||
4190                     orig_active_speed != tp->link_config.active_speed ||
4191                     orig_active_duplex != tp->link_config.active_duplex)
4192                         tg3_link_report(tp);
4193         }
4194
4195         return 0;
4196 }
4197
4198 static int tg3_setup_fiber_mii_phy(struct tg3 *tp, int force_reset)
4199 {
4200         int current_link_up, err = 0;
4201         u32 bmsr, bmcr;
4202         u16 current_speed;
4203         u8 current_duplex;
4204         u32 local_adv, remote_adv;
4205
4206         tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
4207         tw32_f(MAC_MODE, tp->mac_mode);
4208         udelay(40);
4209
4210         tw32(MAC_EVENT, 0);
4211
4212         tw32_f(MAC_STATUS,
4213              (MAC_STATUS_SYNC_CHANGED |
4214               MAC_STATUS_CFG_CHANGED |
4215               MAC_STATUS_MI_COMPLETION |
4216               MAC_STATUS_LNKSTATE_CHANGED));
4217         udelay(40);
4218
4219         if (force_reset)
4220                 tg3_phy_reset(tp);
4221
4222         current_link_up = 0;
4223         current_speed = SPEED_INVALID;
4224         current_duplex = DUPLEX_INVALID;
4225
4226         err |= tg3_readphy(tp, MII_BMSR, &bmsr);
4227         err |= tg3_readphy(tp, MII_BMSR, &bmsr);
4228         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) {
4229                 if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
4230                         bmsr |= BMSR_LSTATUS;
4231                 else
4232                         bmsr &= ~BMSR_LSTATUS;
4233         }
4234
4235         err |= tg3_readphy(tp, MII_BMCR, &bmcr);
4236
4237         if ((tp->link_config.autoneg == AUTONEG_ENABLE) && !force_reset &&
4238             (tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT)) {
4239                 /* do nothing, just check for link up at the end */
4240         } else if (tp->link_config.autoneg == AUTONEG_ENABLE) {
4241                 u32 adv, new_adv;
4242
4243                 err |= tg3_readphy(tp, MII_ADVERTISE, &adv);
4244                 new_adv = adv & ~(ADVERTISE_1000XFULL | ADVERTISE_1000XHALF |
4245                                   ADVERTISE_1000XPAUSE |
4246                                   ADVERTISE_1000XPSE_ASYM |
4247                                   ADVERTISE_SLCT);
4248
4249                 new_adv |= tg3_advert_flowctrl_1000X(tp->link_config.flowctrl);
4250
4251                 if (tp->link_config.advertising & ADVERTISED_1000baseT_Half)
4252                         new_adv |= ADVERTISE_1000XHALF;
4253                 if (tp->link_config.advertising & ADVERTISED_1000baseT_Full)
4254                         new_adv |= ADVERTISE_1000XFULL;
4255
4256                 if ((new_adv != adv) || !(bmcr & BMCR_ANENABLE)) {
4257                         tg3_writephy(tp, MII_ADVERTISE, new_adv);
4258                         bmcr |= BMCR_ANENABLE | BMCR_ANRESTART;
4259                         tg3_writephy(tp, MII_BMCR, bmcr);
4260
4261                         tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
4262                         tp->serdes_counter = SERDES_AN_TIMEOUT_5714S;
4263                         tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
4264
4265                         return err;
4266                 }
4267         } else {
4268                 u32 new_bmcr;
4269
4270                 bmcr &= ~BMCR_SPEED1000;
4271                 new_bmcr = bmcr & ~(BMCR_ANENABLE | BMCR_FULLDPLX);
4272
4273                 if (tp->link_config.duplex == DUPLEX_FULL)
4274                         new_bmcr |= BMCR_FULLDPLX;
4275
4276                 if (new_bmcr != bmcr) {
4277                         /* BMCR_SPEED1000 is a reserved bit that needs
4278                          * to be set on write.
4279                          */
4280                         new_bmcr |= BMCR_SPEED1000;
4281
4282                         /* Force a linkdown */
4283                         if (netif_carrier_ok(tp->dev)) {
4284                                 u32 adv;
4285
4286                                 err |= tg3_readphy(tp, MII_ADVERTISE, &adv);
4287                                 adv &= ~(ADVERTISE_1000XFULL |
4288                                          ADVERTISE_1000XHALF |
4289                                          ADVERTISE_SLCT);
4290                                 tg3_writephy(tp, MII_ADVERTISE, adv);
4291                                 tg3_writephy(tp, MII_BMCR, bmcr |
4292                                                            BMCR_ANRESTART |
4293                                                            BMCR_ANENABLE);
4294                                 udelay(10);
4295                                 netif_carrier_off(tp->dev);
4296                         }
4297                         tg3_writephy(tp, MII_BMCR, new_bmcr);
4298                         bmcr = new_bmcr;
4299                         err |= tg3_readphy(tp, MII_BMSR, &bmsr);
4300                         err |= tg3_readphy(tp, MII_BMSR, &bmsr);
4301                         if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
4302                             ASIC_REV_5714) {
4303                                 if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
4304                                         bmsr |= BMSR_LSTATUS;
4305                                 else
4306                                         bmsr &= ~BMSR_LSTATUS;
4307                         }
4308                         tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
4309                 }
4310         }
4311
4312         if (bmsr & BMSR_LSTATUS) {
4313                 current_speed = SPEED_1000;
4314                 current_link_up = 1;
4315                 if (bmcr & BMCR_FULLDPLX)
4316                         current_duplex = DUPLEX_FULL;
4317                 else
4318                         current_duplex = DUPLEX_HALF;
4319
4320                 local_adv = 0;
4321                 remote_adv = 0;
4322
4323                 if (bmcr & BMCR_ANENABLE) {
4324                         u32 common;
4325
4326                         err |= tg3_readphy(tp, MII_ADVERTISE, &local_adv);
4327                         err |= tg3_readphy(tp, MII_LPA, &remote_adv);
4328                         common = local_adv & remote_adv;
4329                         if (common & (ADVERTISE_1000XHALF |
4330                                       ADVERTISE_1000XFULL)) {
4331                                 if (common & ADVERTISE_1000XFULL)
4332                                         current_duplex = DUPLEX_FULL;
4333                                 else
4334                                         current_duplex = DUPLEX_HALF;
4335                         } else if (!tg3_flag(tp, 5780_CLASS)) {
4336                                 /* Link is up via parallel detect */
4337                         } else {
4338                                 current_link_up = 0;
4339                         }
4340                 }
4341         }
4342
4343         if (current_link_up == 1 && current_duplex == DUPLEX_FULL)
4344                 tg3_setup_flow_control(tp, local_adv, remote_adv);
4345
4346         tp->mac_mode &= ~MAC_MODE_HALF_DUPLEX;
4347         if (tp->link_config.active_duplex == DUPLEX_HALF)
4348                 tp->mac_mode |= MAC_MODE_HALF_DUPLEX;
4349
4350         tw32_f(MAC_MODE, tp->mac_mode);
4351         udelay(40);
4352
4353         tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
4354
4355         tp->link_config.active_speed = current_speed;
4356         tp->link_config.active_duplex = current_duplex;
4357
4358         if (current_link_up != netif_carrier_ok(tp->dev)) {
4359                 if (current_link_up)
4360                         netif_carrier_on(tp->dev);
4361                 else {
4362                         netif_carrier_off(tp->dev);
4363                         tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
4364                 }
4365                 tg3_link_report(tp);
4366         }
4367         return err;
4368 }
4369
4370 static void tg3_serdes_parallel_detect(struct tg3 *tp)
4371 {
4372         if (tp->serdes_counter) {
4373                 /* Give autoneg time to complete. */
4374                 tp->serdes_counter--;
4375                 return;
4376         }
4377
4378         if (!netif_carrier_ok(tp->dev) &&
4379             (tp->link_config.autoneg == AUTONEG_ENABLE)) {
4380                 u32 bmcr;
4381
4382                 tg3_readphy(tp, MII_BMCR, &bmcr);
4383                 if (bmcr & BMCR_ANENABLE) {
4384                         u32 phy1, phy2;
4385
4386                         /* Select shadow register 0x1f */
4387                         tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x7c00);
4388                         tg3_readphy(tp, MII_TG3_MISC_SHDW, &phy1);
4389
4390                         /* Select expansion interrupt status register */
4391                         tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
4392                                          MII_TG3_DSP_EXP1_INT_STAT);
4393                         tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &phy2);
4394                         tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &phy2);
4395
4396                         if ((phy1 & 0x10) && !(phy2 & 0x20)) {
4397                                 /* We have signal detect and not receiving
4398                                  * config code words, link is up by parallel
4399                                  * detection.
4400                                  */
4401
4402                                 bmcr &= ~BMCR_ANENABLE;
4403                                 bmcr |= BMCR_SPEED1000 | BMCR_FULLDPLX;
4404                                 tg3_writephy(tp, MII_BMCR, bmcr);
4405                                 tp->phy_flags |= TG3_PHYFLG_PARALLEL_DETECT;
4406                         }
4407                 }
4408         } else if (netif_carrier_ok(tp->dev) &&
4409                    (tp->link_config.autoneg == AUTONEG_ENABLE) &&
4410                    (tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT)) {
4411                 u32 phy2;
4412
4413                 /* Select expansion interrupt status register */
4414                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
4415                                  MII_TG3_DSP_EXP1_INT_STAT);
4416                 tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &phy2);
4417                 if (phy2 & 0x20) {
4418                         u32 bmcr;
4419
4420                         /* Config code words received, turn on autoneg. */
4421                         tg3_readphy(tp, MII_BMCR, &bmcr);
4422                         tg3_writephy(tp, MII_BMCR, bmcr | BMCR_ANENABLE);
4423
4424                         tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
4425
4426                 }
4427         }
4428 }
4429
4430 static int tg3_setup_phy(struct tg3 *tp, int force_reset)
4431 {
4432         u32 val;
4433         int err;
4434
4435         if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
4436                 err = tg3_setup_fiber_phy(tp, force_reset);
4437         else if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
4438                 err = tg3_setup_fiber_mii_phy(tp, force_reset);
4439         else
4440                 err = tg3_setup_copper_phy(tp, force_reset);
4441
4442         if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5784_AX) {
4443                 u32 scale;
4444
4445                 val = tr32(TG3_CPMU_CLCK_STAT) & CPMU_CLCK_STAT_MAC_CLCK_MASK;
4446                 if (val == CPMU_CLCK_STAT_MAC_CLCK_62_5)
4447                         scale = 65;
4448                 else if (val == CPMU_CLCK_STAT_MAC_CLCK_6_25)
4449                         scale = 6;
4450                 else
4451                         scale = 12;
4452
4453                 val = tr32(GRC_MISC_CFG) & ~GRC_MISC_CFG_PRESCALAR_MASK;
4454                 val |= (scale << GRC_MISC_CFG_PRESCALAR_SHIFT);
4455                 tw32(GRC_MISC_CFG, val);
4456         }
4457
4458         val = (2 << TX_LENGTHS_IPG_CRS_SHIFT) |
4459               (6 << TX_LENGTHS_IPG_SHIFT);
4460         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720)
4461                 val |= tr32(MAC_TX_LENGTHS) &
4462                        (TX_LENGTHS_JMB_FRM_LEN_MSK |
4463                         TX_LENGTHS_CNT_DWN_VAL_MSK);
4464
4465         if (tp->link_config.active_speed == SPEED_1000 &&
4466             tp->link_config.active_duplex == DUPLEX_HALF)
4467                 tw32(MAC_TX_LENGTHS, val |
4468                      (0xff << TX_LENGTHS_SLOT_TIME_SHIFT));
4469         else
4470                 tw32(MAC_TX_LENGTHS, val |
4471                      (32 << TX_LENGTHS_SLOT_TIME_SHIFT));
4472
4473         if (!tg3_flag(tp, 5705_PLUS)) {
4474                 if (netif_carrier_ok(tp->dev)) {
4475                         tw32(HOSTCC_STAT_COAL_TICKS,
4476                              tp->coal.stats_block_coalesce_usecs);
4477                 } else {
4478                         tw32(HOSTCC_STAT_COAL_TICKS, 0);
4479                 }
4480         }
4481
4482         if (tg3_flag(tp, ASPM_WORKAROUND)) {
4483                 val = tr32(PCIE_PWR_MGMT_THRESH);
4484                 if (!netif_carrier_ok(tp->dev))
4485                         val = (val & ~PCIE_PWR_MGMT_L1_THRESH_MSK) |
4486                               tp->pwrmgmt_thresh;
4487                 else
4488                         val |= PCIE_PWR_MGMT_L1_THRESH_MSK;
4489                 tw32(PCIE_PWR_MGMT_THRESH, val);
4490         }
4491
4492         return err;
4493 }
4494
4495 static inline int tg3_irq_sync(struct tg3 *tp)
4496 {
4497         return tp->irq_sync;
4498 }
4499
4500 static inline void tg3_rd32_loop(struct tg3 *tp, u32 *dst, u32 off, u32 len)
4501 {
4502         int i;
4503
4504         dst = (u32 *)((u8 *)dst + off);
4505         for (i = 0; i < len; i += sizeof(u32))
4506                 *dst++ = tr32(off + i);
4507 }
4508
4509 static void tg3_dump_legacy_regs(struct tg3 *tp, u32 *regs)
4510 {
4511         tg3_rd32_loop(tp, regs, TG3PCI_VENDOR, 0xb0);
4512         tg3_rd32_loop(tp, regs, MAILBOX_INTERRUPT_0, 0x200);
4513         tg3_rd32_loop(tp, regs, MAC_MODE, 0x4f0);
4514         tg3_rd32_loop(tp, regs, SNDDATAI_MODE, 0xe0);
4515         tg3_rd32_loop(tp, regs, SNDDATAC_MODE, 0x04);
4516         tg3_rd32_loop(tp, regs, SNDBDS_MODE, 0x80);
4517         tg3_rd32_loop(tp, regs, SNDBDI_MODE, 0x48);
4518         tg3_rd32_loop(tp, regs, SNDBDC_MODE, 0x04);
4519         tg3_rd32_loop(tp, regs, RCVLPC_MODE, 0x20);
4520         tg3_rd32_loop(tp, regs, RCVLPC_SELLST_BASE, 0x15c);
4521         tg3_rd32_loop(tp, regs, RCVDBDI_MODE, 0x0c);
4522         tg3_rd32_loop(tp, regs, RCVDBDI_JUMBO_BD, 0x3c);
4523         tg3_rd32_loop(tp, regs, RCVDBDI_BD_PROD_IDX_0, 0x44);
4524         tg3_rd32_loop(tp, regs, RCVDCC_MODE, 0x04);
4525         tg3_rd32_loop(tp, regs, RCVBDI_MODE, 0x20);
4526         tg3_rd32_loop(tp, regs, RCVCC_MODE, 0x14);
4527         tg3_rd32_loop(tp, regs, RCVLSC_MODE, 0x08);
4528         tg3_rd32_loop(tp, regs, MBFREE_MODE, 0x08);
4529         tg3_rd32_loop(tp, regs, HOSTCC_MODE, 0x100);
4530
4531         if (tg3_flag(tp, SUPPORT_MSIX))
4532                 tg3_rd32_loop(tp, regs, HOSTCC_RXCOL_TICKS_VEC1, 0x180);
4533
4534         tg3_rd32_loop(tp, regs, MEMARB_MODE, 0x10);
4535         tg3_rd32_loop(tp, regs, BUFMGR_MODE, 0x58);
4536         tg3_rd32_loop(tp, regs, RDMAC_MODE, 0x08);
4537         tg3_rd32_loop(tp, regs, WDMAC_MODE, 0x08);
4538         tg3_rd32_loop(tp, regs, RX_CPU_MODE, 0x04);
4539         tg3_rd32_loop(tp, regs, RX_CPU_STATE, 0x04);
4540         tg3_rd32_loop(tp, regs, RX_CPU_PGMCTR, 0x04);
4541         tg3_rd32_loop(tp, regs, RX_CPU_HWBKPT, 0x04);
4542
4543         if (!tg3_flag(tp, 5705_PLUS)) {
4544                 tg3_rd32_loop(tp, regs, TX_CPU_MODE, 0x04);
4545                 tg3_rd32_loop(tp, regs, TX_CPU_STATE, 0x04);
4546                 tg3_rd32_loop(tp, regs, TX_CPU_PGMCTR, 0x04);
4547         }
4548
4549         tg3_rd32_loop(tp, regs, GRCMBOX_INTERRUPT_0, 0x110);
4550         tg3_rd32_loop(tp, regs, FTQ_RESET, 0x120);
4551         tg3_rd32_loop(tp, regs, MSGINT_MODE, 0x0c);
4552         tg3_rd32_loop(tp, regs, DMAC_MODE, 0x04);
4553         tg3_rd32_loop(tp, regs, GRC_MODE, 0x4c);
4554
4555         if (tg3_flag(tp, NVRAM))
4556                 tg3_rd32_loop(tp, regs, NVRAM_CMD, 0x24);
4557 }
4558
4559 static void tg3_dump_state(struct tg3 *tp)
4560 {
4561         int i;
4562         u32 *regs;
4563
4564         regs = kzalloc(TG3_REG_BLK_SIZE, GFP_ATOMIC);
4565         if (!regs) {
4566                 netdev_err(tp->dev, "Failed allocating register dump buffer\n");
4567                 return;
4568         }
4569
4570         if (tg3_flag(tp, PCI_EXPRESS)) {
4571                 /* Read up to but not including private PCI registers */
4572                 for (i = 0; i < TG3_PCIE_TLDLPL_PORT; i += sizeof(u32))
4573                         regs[i / sizeof(u32)] = tr32(i);
4574         } else
4575                 tg3_dump_legacy_regs(tp, regs);
4576
4577         for (i = 0; i < TG3_REG_BLK_SIZE / sizeof(u32); i += 4) {
4578                 if (!regs[i + 0] && !regs[i + 1] &&
4579                     !regs[i + 2] && !regs[i + 3])
4580                         continue;
4581
4582                 netdev_err(tp->dev, "0x%08x: 0x%08x, 0x%08x, 0x%08x, 0x%08x\n",
4583                            i * 4,
4584                            regs[i + 0], regs[i + 1], regs[i + 2], regs[i + 3]);
4585         }
4586
4587         kfree(regs);
4588
4589         for (i = 0; i < tp->irq_cnt; i++) {
4590                 struct tg3_napi *tnapi = &tp->napi[i];
4591
4592                 /* SW status block */
4593                 netdev_err(tp->dev,
4594                          "%d: Host status block [%08x:%08x:(%04x:%04x:%04x):(%04x:%04x)]\n",
4595                            i,
4596                            tnapi->hw_status->status,
4597                            tnapi->hw_status->status_tag,
4598                            tnapi->hw_status->rx_jumbo_consumer,
4599                            tnapi->hw_status->rx_consumer,
4600                            tnapi->hw_status->rx_mini_consumer,
4601                            tnapi->hw_status->idx[0].rx_producer,
4602                            tnapi->hw_status->idx[0].tx_consumer);
4603
4604                 netdev_err(tp->dev,
4605                 "%d: NAPI info [%08x:%08x:(%04x:%04x:%04x):%04x:(%04x:%04x:%04x:%04x)]\n",
4606                            i,
4607                            tnapi->last_tag, tnapi->last_irq_tag,
4608                            tnapi->tx_prod, tnapi->tx_cons, tnapi->tx_pending,
4609                            tnapi->rx_rcb_ptr,
4610                            tnapi->prodring.rx_std_prod_idx,
4611                            tnapi->prodring.rx_std_cons_idx,
4612                            tnapi->prodring.rx_jmb_prod_idx,
4613                            tnapi->prodring.rx_jmb_cons_idx);
4614         }
4615 }
4616
4617 /* This is called whenever we suspect that the system chipset is re-
4618  * ordering the sequence of MMIO to the tx send mailbox. The symptom
4619  * is bogus tx completions. We try to recover by setting the
4620  * TG3_FLAG_MBOX_WRITE_REORDER flag and resetting the chip later
4621  * in the workqueue.
4622  */
4623 static void tg3_tx_recover(struct tg3 *tp)
4624 {
4625         BUG_ON(tg3_flag(tp, MBOX_WRITE_REORDER) ||
4626                tp->write32_tx_mbox == tg3_write_indirect_mbox);
4627
4628         netdev_warn(tp->dev,
4629                     "The system may be re-ordering memory-mapped I/O "
4630                     "cycles to the network device, attempting to recover. "
4631                     "Please report the problem to the driver maintainer "
4632                     "and include system chipset information.\n");
4633
4634         spin_lock(&tp->lock);
4635         tg3_flag_set(tp, TX_RECOVERY_PENDING);
4636         spin_unlock(&tp->lock);
4637 }
4638
4639 static inline u32 tg3_tx_avail(struct tg3_napi *tnapi)
4640 {
4641         /* Tell compiler to fetch tx indices from memory. */
4642         barrier();
4643         return tnapi->tx_pending -
4644                ((tnapi->tx_prod - tnapi->tx_cons) & (TG3_TX_RING_SIZE - 1));
4645 }
4646
4647 /* Tigon3 never reports partial packet sends.  So we do not
4648  * need special logic to handle SKBs that have not had all
4649  * of their frags sent yet, like SunGEM does.
4650  */
4651 static void tg3_tx(struct tg3_napi *tnapi)
4652 {
4653         struct tg3 *tp = tnapi->tp;
4654         u32 hw_idx = tnapi->hw_status->idx[0].tx_consumer;
4655         u32 sw_idx = tnapi->tx_cons;
4656         struct netdev_queue *txq;
4657         int index = tnapi - tp->napi;
4658
4659         if (tg3_flag(tp, ENABLE_TSS))
4660                 index--;
4661
4662         txq = netdev_get_tx_queue(tp->dev, index);
4663
4664         while (sw_idx != hw_idx) {
4665                 struct ring_info *ri = &tnapi->tx_buffers[sw_idx];
4666                 struct sk_buff *skb = ri->skb;
4667                 int i, tx_bug = 0;
4668
4669                 if (unlikely(skb == NULL)) {
4670                         tg3_tx_recover(tp);
4671                         return;
4672                 }
4673
4674                 pci_unmap_single(tp->pdev,
4675                                  dma_unmap_addr(ri, mapping),
4676                                  skb_headlen(skb),
4677                                  PCI_DMA_TODEVICE);
4678
4679                 ri->skb = NULL;
4680
4681                 sw_idx = NEXT_TX(sw_idx);
4682
4683                 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
4684                         ri = &tnapi->tx_buffers[sw_idx];
4685                         if (unlikely(ri->skb != NULL || sw_idx == hw_idx))
4686                                 tx_bug = 1;
4687
4688                         pci_unmap_page(tp->pdev,
4689                                        dma_unmap_addr(ri, mapping),
4690                                        skb_shinfo(skb)->frags[i].size,
4691                                        PCI_DMA_TODEVICE);
4692                         sw_idx = NEXT_TX(sw_idx);
4693                 }
4694
4695                 dev_kfree_skb(skb);
4696
4697                 if (unlikely(tx_bug)) {
4698                         tg3_tx_recover(tp);
4699                         return;
4700                 }
4701         }
4702
4703         tnapi->tx_cons = sw_idx;
4704
4705         /* Need to make the tx_cons update visible to tg3_start_xmit()
4706          * before checking for netif_queue_stopped().  Without the
4707          * memory barrier, there is a small possibility that tg3_start_xmit()
4708          * will miss it and cause the queue to be stopped forever.
4709          */
4710         smp_mb();
4711
4712         if (unlikely(netif_tx_queue_stopped(txq) &&
4713                      (tg3_tx_avail(tnapi) > TG3_TX_WAKEUP_THRESH(tnapi)))) {
4714                 __netif_tx_lock(txq, smp_processor_id());
4715                 if (netif_tx_queue_stopped(txq) &&
4716                     (tg3_tx_avail(tnapi) > TG3_TX_WAKEUP_THRESH(tnapi)))
4717                         netif_tx_wake_queue(txq);
4718                 __netif_tx_unlock(txq);
4719         }
4720 }
4721
4722 static void tg3_rx_skb_free(struct tg3 *tp, struct ring_info *ri, u32 map_sz)
4723 {
4724         if (!ri->skb)
4725                 return;
4726
4727         pci_unmap_single(tp->pdev, dma_unmap_addr(ri, mapping),
4728                          map_sz, PCI_DMA_FROMDEVICE);
4729         dev_kfree_skb_any(ri->skb);
4730         ri->skb = NULL;
4731 }
4732
4733 /* Returns size of skb allocated or < 0 on error.
4734  *
4735  * We only need to fill in the address because the other members
4736  * of the RX descriptor are invariant, see tg3_init_rings.
4737  *
4738  * Note the purposeful assymetry of cpu vs. chip accesses.  For
4739  * posting buffers we only dirty the first cache line of the RX
4740  * descriptor (containing the address).  Whereas for the RX status
4741  * buffers the cpu only reads the last cacheline of the RX descriptor
4742  * (to fetch the error flags, vlan tag, checksum, and opaque cookie).
4743  */
4744 static int tg3_alloc_rx_skb(struct tg3 *tp, struct tg3_rx_prodring_set *tpr,
4745                             u32 opaque_key, u32 dest_idx_unmasked)
4746 {
4747         struct tg3_rx_buffer_desc *desc;
4748         struct ring_info *map;
4749         struct sk_buff *skb;
4750         dma_addr_t mapping;
4751         int skb_size, dest_idx;
4752
4753         switch (opaque_key) {
4754         case RXD_OPAQUE_RING_STD:
4755                 dest_idx = dest_idx_unmasked & tp->rx_std_ring_mask;
4756                 desc = &tpr->rx_std[dest_idx];
4757                 map = &tpr->rx_std_buffers[dest_idx];
4758                 skb_size = tp->rx_pkt_map_sz;
4759                 break;
4760
4761         case RXD_OPAQUE_RING_JUMBO:
4762                 dest_idx = dest_idx_unmasked & tp->rx_jmb_ring_mask;
4763                 desc = &tpr->rx_jmb[dest_idx].std;
4764                 map = &tpr->rx_jmb_buffers[dest_idx];
4765                 skb_size = TG3_RX_JMB_MAP_SZ;
4766                 break;
4767
4768         default:
4769                 return -EINVAL;
4770         }
4771
4772         /* Do not overwrite any of the map or rp information
4773          * until we are sure we can commit to a new buffer.
4774          *
4775          * Callers depend upon this behavior and assume that
4776          * we leave everything unchanged if we fail.
4777          */
4778         skb = netdev_alloc_skb(tp->dev, skb_size + tp->rx_offset);
4779         if (skb == NULL)
4780                 return -ENOMEM;
4781
4782         skb_reserve(skb, tp->rx_offset);
4783
4784         mapping = pci_map_single(tp->pdev, skb->data, skb_size,
4785                                  PCI_DMA_FROMDEVICE);
4786         if (pci_dma_mapping_error(tp->pdev, mapping)) {
4787                 dev_kfree_skb(skb);
4788                 return -EIO;
4789         }
4790
4791         map->skb = skb;
4792         dma_unmap_addr_set(map, mapping, mapping);
4793
4794         desc->addr_hi = ((u64)mapping >> 32);
4795         desc->addr_lo = ((u64)mapping & 0xffffffff);
4796
4797         return skb_size;
4798 }
4799
4800 /* We only need to move over in the address because the other
4801  * members of the RX descriptor are invariant.  See notes above
4802  * tg3_alloc_rx_skb for full details.
4803  */
4804 static void tg3_recycle_rx(struct tg3_napi *tnapi,
4805                            struct tg3_rx_prodring_set *dpr,
4806                            u32 opaque_key, int src_idx,
4807                            u32 dest_idx_unmasked)
4808 {
4809         struct tg3 *tp = tnapi->tp;
4810         struct tg3_rx_buffer_desc *src_desc, *dest_desc;
4811         struct ring_info *src_map, *dest_map;
4812         struct tg3_rx_prodring_set *spr = &tp->napi[0].prodring;
4813         int dest_idx;
4814
4815         switch (opaque_key) {
4816         case RXD_OPAQUE_RING_STD:
4817                 dest_idx = dest_idx_unmasked & tp->rx_std_ring_mask;
4818                 dest_desc = &dpr->rx_std[dest_idx];
4819                 dest_map = &dpr->rx_std_buffers[dest_idx];
4820                 src_desc = &spr->rx_std[src_idx];
4821                 src_map = &spr->rx_std_buffers[src_idx];
4822                 break;
4823
4824         case RXD_OPAQUE_RING_JUMBO:
4825                 dest_idx = dest_idx_unmasked & tp->rx_jmb_ring_mask;
4826                 dest_desc = &dpr->rx_jmb[dest_idx].std;
4827                 dest_map = &dpr->rx_jmb_buffers[dest_idx];
4828                 src_desc = &spr->rx_jmb[src_idx].std;
4829                 src_map = &spr->rx_jmb_buffers[src_idx];
4830                 break;
4831
4832         default:
4833                 return;
4834         }
4835
4836         dest_map->skb = src_map->skb;
4837         dma_unmap_addr_set(dest_map, mapping,
4838                            dma_unmap_addr(src_map, mapping));
4839         dest_desc->addr_hi = src_desc->addr_hi;
4840         dest_desc->addr_lo = src_desc->addr_lo;
4841
4842         /* Ensure that the update to the skb happens after the physical
4843          * addresses have been transferred to the new BD location.
4844          */
4845         smp_wmb();
4846
4847         src_map->skb = NULL;
4848 }
4849
4850 /* The RX ring scheme is composed of multiple rings which post fresh
4851  * buffers to the chip, and one special ring the chip uses to report
4852  * status back to the host.
4853  *
4854  * The special ring reports the status of received packets to the
4855  * host.  The chip does not write into the original descriptor the
4856  * RX buffer was obtained from.  The chip simply takes the original
4857  * descriptor as provided by the host, updates the status and length
4858  * field, then writes this into the next status ring entry.
4859  *
4860  * Each ring the host uses to post buffers to the chip is described
4861  * by a TG3_BDINFO entry in the chips SRAM area.  When a packet arrives,
4862  * it is first placed into the on-chip ram.  When the packet's length
4863  * is known, it walks down the TG3_BDINFO entries to select the ring.
4864  * Each TG3_BDINFO specifies a MAXLEN field and the first TG3_BDINFO
4865  * which is within the range of the new packet's length is chosen.
4866  *
4867  * The "separate ring for rx status" scheme may sound queer, but it makes
4868  * sense from a cache coherency perspective.  If only the host writes
4869  * to the buffer post rings, and only the chip writes to the rx status
4870  * rings, then cache lines never move beyond shared-modified state.
4871  * If both the host and chip were to write into the same ring, cache line
4872  * eviction could occur since both entities want it in an exclusive state.
4873  */
4874 static int tg3_rx(struct tg3_napi *tnapi, int budget)
4875 {
4876         struct tg3 *tp = tnapi->tp;
4877         u32 work_mask, rx_std_posted = 0;
4878         u32 std_prod_idx, jmb_prod_idx;
4879         u32 sw_idx = tnapi->rx_rcb_ptr;
4880         u16 hw_idx;
4881         int received;
4882         struct tg3_rx_prodring_set *tpr = &tnapi->prodring;
4883
4884         hw_idx = *(tnapi->rx_rcb_prod_idx);
4885         /*
4886          * We need to order the read of hw_idx and the read of
4887          * the opaque cookie.
4888          */
4889         rmb();
4890         work_mask = 0;
4891         received = 0;
4892         std_prod_idx = tpr->rx_std_prod_idx;
4893         jmb_prod_idx = tpr->rx_jmb_prod_idx;
4894         while (sw_idx != hw_idx && budget > 0) {
4895                 struct ring_info *ri;
4896                 struct tg3_rx_buffer_desc *desc = &tnapi->rx_rcb[sw_idx];
4897                 unsigned int len;
4898                 struct sk_buff *skb;
4899                 dma_addr_t dma_addr;
4900                 u32 opaque_key, desc_idx, *post_ptr;
4901
4902                 desc_idx = desc->opaque & RXD_OPAQUE_INDEX_MASK;
4903                 opaque_key = desc->opaque & RXD_OPAQUE_RING_MASK;
4904                 if (opaque_key == RXD_OPAQUE_RING_STD) {
4905                         ri = &tp->napi[0].prodring.rx_std_buffers[desc_idx];
4906                         dma_addr = dma_unmap_addr(ri, mapping);
4907                         skb = ri->skb;
4908                         post_ptr = &std_prod_idx;
4909                         rx_std_posted++;
4910                 } else if (opaque_key == RXD_OPAQUE_RING_JUMBO) {
4911                         ri = &tp->napi[0].prodring.rx_jmb_buffers[desc_idx];
4912                         dma_addr = dma_unmap_addr(ri, mapping);
4913                         skb = ri->skb;
4914                         post_ptr = &jmb_prod_idx;
4915                 } else
4916                         goto next_pkt_nopost;
4917
4918                 work_mask |= opaque_key;
4919
4920                 if ((desc->err_vlan & RXD_ERR_MASK) != 0 &&
4921                     (desc->err_vlan != RXD_ERR_ODD_NIBBLE_RCVD_MII)) {
4922                 drop_it:
4923                         tg3_recycle_rx(tnapi, tpr, opaque_key,
4924                                        desc_idx, *post_ptr);
4925                 drop_it_no_recycle:
4926                         /* Other statistics kept track of by card. */
4927                         tp->rx_dropped++;
4928                         goto next_pkt;
4929                 }
4930
4931                 len = ((desc->idx_len & RXD_LEN_MASK) >> RXD_LEN_SHIFT) -
4932                       ETH_FCS_LEN;
4933
4934                 if (len > TG3_RX_COPY_THRESH(tp)) {
4935                         int skb_size;
4936
4937                         skb_size = tg3_alloc_rx_skb(tp, tpr, opaque_key,
4938                                                     *post_ptr);
4939                         if (skb_size < 0)
4940                                 goto drop_it;
4941
4942                         pci_unmap_single(tp->pdev, dma_addr, skb_size,
4943                                          PCI_DMA_FROMDEVICE);
4944
4945                         /* Ensure that the update to the skb happens
4946                          * after the usage of the old DMA mapping.
4947                          */
4948                         smp_wmb();
4949
4950                         ri->skb = NULL;
4951
4952                         skb_put(skb, len);
4953                 } else {
4954                         struct sk_buff *copy_skb;
4955
4956                         tg3_recycle_rx(tnapi, tpr, opaque_key,
4957                                        desc_idx, *post_ptr);
4958
4959                         copy_skb = netdev_alloc_skb(tp->dev, len +
4960                                                     TG3_RAW_IP_ALIGN);
4961                         if (copy_skb == NULL)
4962                                 goto drop_it_no_recycle;
4963
4964                         skb_reserve(copy_skb, TG3_RAW_IP_ALIGN);
4965                         skb_put(copy_skb, len);
4966                         pci_dma_sync_single_for_cpu(tp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE);
4967                         skb_copy_from_linear_data(skb, copy_skb->data, len);
4968                         pci_dma_sync_single_for_device(tp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE);
4969
4970                         /* We'll reuse the original ring buffer. */
4971                         skb = copy_skb;
4972                 }
4973
4974                 if ((tp->dev->features & NETIF_F_RXCSUM) &&
4975                     (desc->type_flags & RXD_FLAG_TCPUDP_CSUM) &&
4976                     (((desc->ip_tcp_csum & RXD_TCPCSUM_MASK)
4977                       >> RXD_TCPCSUM_SHIFT) == 0xffff))
4978                         skb->ip_summed = CHECKSUM_UNNECESSARY;
4979                 else
4980                         skb_checksum_none_assert(skb);
4981
4982                 skb->protocol = eth_type_trans(skb, tp->dev);
4983
4984                 if (len > (tp->dev->mtu + ETH_HLEN) &&
4985                     skb->protocol != htons(ETH_P_8021Q)) {
4986                         dev_kfree_skb(skb);
4987                         goto drop_it_no_recycle;
4988                 }
4989
4990                 if (desc->type_flags & RXD_FLAG_VLAN &&
4991                     !(tp->rx_mode & RX_MODE_KEEP_VLAN_TAG))
4992                         __vlan_hwaccel_put_tag(skb,
4993                                                desc->err_vlan & RXD_VLAN_MASK);
4994
4995                 napi_gro_receive(&tnapi->napi, skb);
4996
4997                 received++;
4998                 budget--;
4999
5000 next_pkt:
5001                 (*post_ptr)++;
5002
5003                 if (unlikely(rx_std_posted >= tp->rx_std_max_post)) {
5004                         tpr->rx_std_prod_idx = std_prod_idx &
5005                                                tp->rx_std_ring_mask;
5006                         tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG,
5007                                      tpr->rx_std_prod_idx);
5008                         work_mask &= ~RXD_OPAQUE_RING_STD;
5009                         rx_std_posted = 0;
5010                 }
5011 next_pkt_nopost:
5012                 sw_idx++;
5013                 sw_idx &= tp->rx_ret_ring_mask;
5014
5015                 /* Refresh hw_idx to see if there is new work */
5016                 if (sw_idx == hw_idx) {
5017                         hw_idx = *(tnapi->rx_rcb_prod_idx);
5018                         rmb();
5019                 }
5020         }
5021
5022         /* ACK the status ring. */
5023         tnapi->rx_rcb_ptr = sw_idx;
5024         tw32_rx_mbox(tnapi->consmbox, sw_idx);
5025
5026         /* Refill RX ring(s). */
5027         if (!tg3_flag(tp, ENABLE_RSS)) {
5028                 if (work_mask & RXD_OPAQUE_RING_STD) {
5029                         tpr->rx_std_prod_idx = std_prod_idx &
5030                                                tp->rx_std_ring_mask;
5031                         tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG,
5032                                      tpr->rx_std_prod_idx);
5033                 }
5034                 if (work_mask & RXD_OPAQUE_RING_JUMBO) {
5035                         tpr->rx_jmb_prod_idx = jmb_prod_idx &
5036                                                tp->rx_jmb_ring_mask;
5037                         tw32_rx_mbox(TG3_RX_JMB_PROD_IDX_REG,
5038                                      tpr->rx_jmb_prod_idx);
5039                 }
5040                 mmiowb();
5041         } else if (work_mask) {
5042                 /* rx_std_buffers[] and rx_jmb_buffers[] entries must be
5043                  * updated before the producer indices can be updated.
5044                  */
5045                 smp_wmb();
5046
5047                 tpr->rx_std_prod_idx = std_prod_idx & tp->rx_std_ring_mask;
5048                 tpr->rx_jmb_prod_idx = jmb_prod_idx & tp->rx_jmb_ring_mask;
5049
5050                 if (tnapi != &tp->napi[1])
5051                         napi_schedule(&tp->napi[1].napi);
5052         }
5053
5054         return received;
5055 }
5056
5057 static void tg3_poll_link(struct tg3 *tp)
5058 {
5059         /* handle link change and other phy events */
5060         if (!(tg3_flag(tp, USE_LINKCHG_REG) || tg3_flag(tp, POLL_SERDES))) {
5061                 struct tg3_hw_status *sblk = tp->napi[0].hw_status;
5062
5063                 if (sblk->status & SD_STATUS_LINK_CHG) {
5064                         sblk->status = SD_STATUS_UPDATED |
5065                                        (sblk->status & ~SD_STATUS_LINK_CHG);
5066                         spin_lock(&tp->lock);
5067                         if (tg3_flag(tp, USE_PHYLIB)) {
5068                                 tw32_f(MAC_STATUS,
5069                                      (MAC_STATUS_SYNC_CHANGED |
5070                                       MAC_STATUS_CFG_CHANGED |
5071                                       MAC_STATUS_MI_COMPLETION |
5072                                       MAC_STATUS_LNKSTATE_CHANGED));
5073                                 udelay(40);
5074                         } else
5075                                 tg3_setup_phy(tp, 0);
5076                         spin_unlock(&tp->lock);
5077                 }
5078         }
5079 }
5080
5081 static int tg3_rx_prodring_xfer(struct tg3 *tp,
5082                                 struct tg3_rx_prodring_set *dpr,
5083                                 struct tg3_rx_prodring_set *spr)
5084 {
5085         u32 si, di, cpycnt, src_prod_idx;
5086         int i, err = 0;
5087
5088         while (1) {
5089                 src_prod_idx = spr->rx_std_prod_idx;
5090
5091                 /* Make sure updates to the rx_std_buffers[] entries and the
5092                  * standard producer index are seen in the correct order.
5093                  */
5094                 smp_rmb();
5095
5096                 if (spr->rx_std_cons_idx == src_prod_idx)
5097                         break;
5098
5099                 if (spr->rx_std_cons_idx < src_prod_idx)
5100                         cpycnt = src_prod_idx - spr->rx_std_cons_idx;
5101                 else
5102                         cpycnt = tp->rx_std_ring_mask + 1 -
5103                                  spr->rx_std_cons_idx;
5104
5105                 cpycnt = min(cpycnt,
5106                              tp->rx_std_ring_mask + 1 - dpr->rx_std_prod_idx);
5107
5108                 si = spr->rx_std_cons_idx;
5109                 di = dpr->rx_std_prod_idx;
5110
5111                 for (i = di; i < di + cpycnt; i++) {
5112                         if (dpr->rx_std_buffers[i].skb) {
5113                                 cpycnt = i - di;
5114                                 err = -ENOSPC;
5115                                 break;
5116                         }
5117                 }
5118
5119                 if (!cpycnt)
5120                         break;
5121
5122                 /* Ensure that updates to the rx_std_buffers ring and the
5123                  * shadowed hardware producer ring from tg3_recycle_skb() are
5124                  * ordered correctly WRT the skb check above.
5125                  */
5126                 smp_rmb();
5127
5128                 memcpy(&dpr->rx_std_buffers[di],
5129                        &spr->rx_std_buffers[si],
5130                        cpycnt * sizeof(struct ring_info));
5131
5132                 for (i = 0; i < cpycnt; i++, di++, si++) {
5133                         struct tg3_rx_buffer_desc *sbd, *dbd;
5134                         sbd = &spr->rx_std[si];
5135                         dbd = &dpr->rx_std[di];
5136                         dbd->addr_hi = sbd->addr_hi;
5137                         dbd->addr_lo = sbd->addr_lo;
5138                 }
5139
5140                 spr->rx_std_cons_idx = (spr->rx_std_cons_idx + cpycnt) &
5141                                        tp->rx_std_ring_mask;
5142                 dpr->rx_std_prod_idx = (dpr->rx_std_prod_idx + cpycnt) &
5143                                        tp->rx_std_ring_mask;
5144         }
5145
5146         while (1) {
5147                 src_prod_idx = spr->rx_jmb_prod_idx;
5148
5149                 /* Make sure updates to the rx_jmb_buffers[] entries and
5150                  * the jumbo producer index are seen in the correct order.
5151                  */
5152                 smp_rmb();
5153
5154                 if (spr->rx_jmb_cons_idx == src_prod_idx)
5155                         break;
5156
5157                 if (spr->rx_jmb_cons_idx < src_prod_idx)
5158                         cpycnt = src_prod_idx - spr->rx_jmb_cons_idx;
5159                 else
5160                         cpycnt = tp->rx_jmb_ring_mask + 1 -
5161                                  spr->rx_jmb_cons_idx;
5162
5163                 cpycnt = min(cpycnt,
5164                              tp->rx_jmb_ring_mask + 1 - dpr->rx_jmb_prod_idx);
5165
5166                 si = spr->rx_jmb_cons_idx;
5167                 di = dpr->rx_jmb_prod_idx;
5168
5169                 for (i = di; i < di + cpycnt; i++) {
5170                         if (dpr->rx_jmb_buffers[i].skb) {
5171                                 cpycnt = i - di;
5172                                 err = -ENOSPC;
5173                                 break;
5174                         }
5175                 }
5176
5177                 if (!cpycnt)
5178                         break;
5179
5180                 /* Ensure that updates to the rx_jmb_buffers ring and the
5181                  * shadowed hardware producer ring from tg3_recycle_skb() are
5182                  * ordered correctly WRT the skb check above.
5183                  */
5184                 smp_rmb();
5185
5186                 memcpy(&dpr->rx_jmb_buffers[di],
5187                        &spr->rx_jmb_buffers[si],
5188                        cpycnt * sizeof(struct ring_info));
5189
5190                 for (i = 0; i < cpycnt; i++, di++, si++) {
5191                         struct tg3_rx_buffer_desc *sbd, *dbd;
5192                         sbd = &spr->rx_jmb[si].std;
5193                         dbd = &dpr->rx_jmb[di].std;
5194                         dbd->addr_hi = sbd->addr_hi;
5195                         dbd->addr_lo = sbd->addr_lo;
5196                 }
5197
5198                 spr->rx_jmb_cons_idx = (spr->rx_jmb_cons_idx + cpycnt) &
5199                                        tp->rx_jmb_ring_mask;
5200                 dpr->rx_jmb_prod_idx = (dpr->rx_jmb_prod_idx + cpycnt) &
5201                                        tp->rx_jmb_ring_mask;
5202         }
5203
5204         return err;
5205 }
5206
5207 static int tg3_poll_work(struct tg3_napi *tnapi, int work_done, int budget)
5208 {
5209         struct tg3 *tp = tnapi->tp;
5210
5211         /* run TX completion thread */
5212         if (tnapi->hw_status->idx[0].tx_consumer != tnapi->tx_cons) {
5213                 tg3_tx(tnapi);
5214                 if (unlikely(tg3_flag(tp, TX_RECOVERY_PENDING)))
5215                         return work_done;
5216         }
5217
5218         /* run RX thread, within the bounds set by NAPI.
5219          * All RX "locking" is done by ensuring outside
5220          * code synchronizes with tg3->napi.poll()
5221          */
5222         if (*(tnapi->rx_rcb_prod_idx) != tnapi->rx_rcb_ptr)
5223                 work_done += tg3_rx(tnapi, budget - work_done);
5224
5225         if (tg3_flag(tp, ENABLE_RSS) && tnapi == &tp->napi[1]) {
5226                 struct tg3_rx_prodring_set *dpr = &tp->napi[0].prodring;
5227                 int i, err = 0;
5228                 u32 std_prod_idx = dpr->rx_std_prod_idx;
5229                 u32 jmb_prod_idx = dpr->rx_jmb_prod_idx;
5230
5231                 for (i = 1; i < tp->irq_cnt; i++)
5232                         err |= tg3_rx_prodring_xfer(tp, dpr,
5233                                                     &tp->napi[i].prodring);
5234
5235                 wmb();
5236
5237                 if (std_prod_idx != dpr->rx_std_prod_idx)
5238                         tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG,
5239                                      dpr->rx_std_prod_idx);
5240
5241                 if (jmb_prod_idx != dpr->rx_jmb_prod_idx)
5242                         tw32_rx_mbox(TG3_RX_JMB_PROD_IDX_REG,
5243                                      dpr->rx_jmb_prod_idx);
5244
5245                 mmiowb();
5246
5247                 if (err)
5248                         tw32_f(HOSTCC_MODE, tp->coal_now);
5249         }
5250
5251         return work_done;
5252 }
5253
5254 static int tg3_poll_msix(struct napi_struct *napi, int budget)
5255 {
5256         struct tg3_napi *tnapi = container_of(napi, struct tg3_napi, napi);
5257         struct tg3 *tp = tnapi->tp;
5258         int work_done = 0;
5259         struct tg3_hw_status *sblk = tnapi->hw_status;
5260
5261         while (1) {
5262                 work_done = tg3_poll_work(tnapi, work_done, budget);
5263
5264                 if (unlikely(tg3_flag(tp, TX_RECOVERY_PENDING)))
5265                         goto tx_recovery;
5266
5267                 if (unlikely(work_done >= budget))
5268                         break;
5269
5270                 /* tp->last_tag is used in tg3_int_reenable() below
5271                  * to tell the hw how much work has been processed,
5272                  * so we must read it before checking for more work.
5273                  */
5274                 tnapi->last_tag = sblk->status_tag;
5275                 tnapi->last_irq_tag = tnapi->last_tag;
5276                 rmb();
5277
5278                 /* check for RX/TX work to do */
5279                 if (likely(sblk->idx[0].tx_consumer == tnapi->tx_cons &&
5280                            *(tnapi->rx_rcb_prod_idx) == tnapi->rx_rcb_ptr)) {
5281                         napi_complete(napi);
5282                         /* Reenable interrupts. */
5283                         tw32_mailbox(tnapi->int_mbox, tnapi->last_tag << 24);
5284                         mmiowb();
5285                         break;
5286                 }
5287         }
5288
5289         return work_done;
5290
5291 tx_recovery:
5292         /* work_done is guaranteed to be less than budget. */
5293         napi_complete(napi);
5294         schedule_work(&tp->reset_task);
5295         return work_done;
5296 }
5297
5298 static void tg3_process_error(struct tg3 *tp)
5299 {
5300         u32 val;
5301         bool real_error = false;
5302
5303         if (tg3_flag(tp, ERROR_PROCESSED))
5304                 return;
5305
5306         /* Check Flow Attention register */
5307         val = tr32(HOSTCC_FLOW_ATTN);
5308         if (val & ~HOSTCC_FLOW_ATTN_MBUF_LWM) {
5309                 netdev_err(tp->dev, "FLOW Attention error.  Resetting chip.\n");
5310                 real_error = true;
5311         }
5312
5313         if (tr32(MSGINT_STATUS) & ~MSGINT_STATUS_MSI_REQ) {
5314                 netdev_err(tp->dev, "MSI Status error.  Resetting chip.\n");
5315                 real_error = true;
5316         }
5317
5318         if (tr32(RDMAC_STATUS) || tr32(WDMAC_STATUS)) {
5319                 netdev_err(tp->dev, "DMA Status error.  Resetting chip.\n");
5320                 real_error = true;
5321         }
5322
5323         if (!real_error)
5324                 return;
5325
5326         tg3_dump_state(tp);
5327
5328         tg3_flag_set(tp, ERROR_PROCESSED);
5329         schedule_work(&tp->reset_task);
5330 }
5331
5332 static int tg3_poll(struct napi_struct *napi, int budget)
5333 {
5334         struct tg3_napi *tnapi = container_of(napi, struct tg3_napi, napi);
5335         struct tg3 *tp = tnapi->tp;
5336         int work_done = 0;
5337         struct tg3_hw_status *sblk = tnapi->hw_status;
5338
5339         while (1) {
5340                 if (sblk->status & SD_STATUS_ERROR)
5341                         tg3_process_error(tp);
5342
5343                 tg3_poll_link(tp);
5344
5345                 work_done = tg3_poll_work(tnapi, work_done, budget);
5346
5347                 if (unlikely(tg3_flag(tp, TX_RECOVERY_PENDING)))
5348                         goto tx_recovery;
5349
5350                 if (unlikely(work_done >= budget))
5351                         break;
5352
5353                 if (tg3_flag(tp, TAGGED_STATUS)) {
5354                         /* tp->last_tag is used in tg3_int_reenable() below
5355                          * to tell the hw how much work has been processed,
5356                          * so we must read it before checking for more work.
5357                          */
5358                         tnapi->last_tag = sblk->status_tag;
5359                         tnapi->last_irq_tag = tnapi->last_tag;
5360                         rmb();
5361                 } else
5362                         sblk->status &= ~SD_STATUS_UPDATED;
5363
5364                 if (likely(!tg3_has_work(tnapi))) {
5365                         napi_complete(napi);
5366                         tg3_int_reenable(tnapi);
5367                         break;
5368                 }
5369         }
5370
5371         return work_done;
5372
5373 tx_recovery:
5374         /* work_done is guaranteed to be less than budget. */
5375         napi_complete(napi);
5376         schedule_work(&tp->reset_task);
5377         return work_done;
5378 }
5379
5380 static void tg3_napi_disable(struct tg3 *tp)
5381 {
5382         int i;
5383
5384         for (i = tp->irq_cnt - 1; i >= 0; i--)
5385                 napi_disable(&tp->napi[i].napi);
5386 }
5387
5388 static void tg3_napi_enable(struct tg3 *tp)
5389 {
5390         int i;
5391
5392         for (i = 0; i < tp->irq_cnt; i++)
5393                 napi_enable(&tp->napi[i].napi);
5394 }
5395
5396 static void tg3_napi_init(struct tg3 *tp)
5397 {
5398         int i;
5399
5400         netif_napi_add(tp->dev, &tp->napi[0].napi, tg3_poll, 64);
5401         for (i = 1; i < tp->irq_cnt; i++)
5402                 netif_napi_add(tp->dev, &tp->napi[i].napi, tg3_poll_msix, 64);
5403 }
5404
5405 static void tg3_napi_fini(struct tg3 *tp)
5406 {
5407         int i;
5408
5409         for (i = 0; i < tp->irq_cnt; i++)
5410                 netif_napi_del(&tp->napi[i].napi);
5411 }
5412
5413 static inline void tg3_netif_stop(struct tg3 *tp)
5414 {
5415         tp->dev->trans_start = jiffies; /* prevent tx timeout */
5416         tg3_napi_disable(tp);
5417         netif_tx_disable(tp->dev);
5418 }
5419
5420 static inline void tg3_netif_start(struct tg3 *tp)
5421 {
5422         /* NOTE: unconditional netif_tx_wake_all_queues is only
5423          * appropriate so long as all callers are assured to
5424          * have free tx slots (such as after tg3_init_hw)
5425          */
5426         netif_tx_wake_all_queues(tp->dev);
5427
5428         tg3_napi_enable(tp);
5429         tp->napi[0].hw_status->status |= SD_STATUS_UPDATED;
5430         tg3_enable_ints(tp);
5431 }
5432
5433 static void tg3_irq_quiesce(struct tg3 *tp)
5434 {
5435         int i;
5436
5437         BUG_ON(tp->irq_sync);
5438
5439         tp->irq_sync = 1;
5440         smp_mb();
5441
5442         for (i = 0; i < tp->irq_cnt; i++)
5443                 synchronize_irq(tp->napi[i].irq_vec);
5444 }
5445
5446 /* Fully shutdown all tg3 driver activity elsewhere in the system.
5447  * If irq_sync is non-zero, then the IRQ handler must be synchronized
5448  * with as well.  Most of the time, this is not necessary except when
5449  * shutting down the device.
5450  */
5451 static inline void tg3_full_lock(struct tg3 *tp, int irq_sync)
5452 {
5453         spin_lock_bh(&tp->lock);
5454         if (irq_sync)
5455                 tg3_irq_quiesce(tp);
5456 }
5457
5458 static inline void tg3_full_unlock(struct tg3 *tp)
5459 {
5460         spin_unlock_bh(&tp->lock);
5461 }
5462
5463 /* One-shot MSI handler - Chip automatically disables interrupt
5464  * after sending MSI so driver doesn't have to do it.
5465  */
5466 static irqreturn_t tg3_msi_1shot(int irq, void *dev_id)
5467 {
5468         struct tg3_napi *tnapi = dev_id;
5469         struct tg3 *tp = tnapi->tp;
5470
5471         prefetch(tnapi->hw_status);
5472         if (tnapi->rx_rcb)
5473                 prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
5474
5475         if (likely(!tg3_irq_sync(tp)))
5476                 napi_schedule(&tnapi->napi);
5477
5478         return IRQ_HANDLED;
5479 }
5480
5481 /* MSI ISR - No need to check for interrupt sharing and no need to
5482  * flush status block and interrupt mailbox. PCI ordering rules
5483  * guarantee that MSI will arrive after the status block.
5484  */
5485 static irqreturn_t tg3_msi(int irq, void *dev_id)
5486 {
5487         struct tg3_napi *tnapi = dev_id;
5488         struct tg3 *tp = tnapi->tp;
5489
5490         prefetch(tnapi->hw_status);
5491         if (tnapi->rx_rcb)
5492                 prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
5493         /*
5494          * Writing any value to intr-mbox-0 clears PCI INTA# and
5495          * chip-internal interrupt pending events.
5496          * Writing non-zero to intr-mbox-0 additional tells the
5497          * NIC to stop sending us irqs, engaging "in-intr-handler"
5498          * event coalescing.
5499          */
5500         tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
5501         if (likely(!tg3_irq_sync(tp)))
5502                 napi_schedule(&tnapi->napi);
5503
5504         return IRQ_RETVAL(1);
5505 }
5506
5507 static irqreturn_t tg3_interrupt(int irq, void *dev_id)
5508 {
5509         struct tg3_napi *tnapi = dev_id;
5510         struct tg3 *tp = tnapi->tp;
5511         struct tg3_hw_status *sblk = tnapi->hw_status;
5512         unsigned int handled = 1;
5513
5514         /* In INTx mode, it is possible for the interrupt to arrive at
5515          * the CPU before the status block posted prior to the interrupt.
5516          * Reading the PCI State register will confirm whether the
5517          * interrupt is ours and will flush the status block.
5518          */
5519         if (unlikely(!(sblk->status & SD_STATUS_UPDATED))) {
5520                 if (tg3_flag(tp, CHIP_RESETTING) ||
5521                     (tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
5522                         handled = 0;
5523                         goto out;
5524                 }
5525         }
5526
5527         /*
5528          * Writing any value to intr-mbox-0 clears PCI INTA# and
5529          * chip-internal interrupt pending events.
5530          * Writing non-zero to intr-mbox-0 additional tells the
5531          * NIC to stop sending us irqs, engaging "in-intr-handler"
5532          * event coalescing.
5533          *
5534          * Flush the mailbox to de-assert the IRQ immediately to prevent
5535          * spurious interrupts.  The flush impacts performance but
5536          * excessive spurious interrupts can be worse in some cases.
5537          */
5538         tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
5539         if (tg3_irq_sync(tp))
5540                 goto out;
5541         sblk->status &= ~SD_STATUS_UPDATED;
5542         if (likely(tg3_has_work(tnapi))) {
5543                 prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
5544                 napi_schedule(&tnapi->napi);
5545         } else {
5546                 /* No work, shared interrupt perhaps?  re-enable
5547                  * interrupts, and flush that PCI write
5548                  */
5549                 tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
5550                                0x00000000);
5551         }
5552 out:
5553         return IRQ_RETVAL(handled);
5554 }
5555
5556 static irqreturn_t tg3_interrupt_tagged(int irq, void *dev_id)
5557 {
5558         struct tg3_napi *tnapi = dev_id;
5559         struct tg3 *tp = tnapi->tp;
5560         struct tg3_hw_status *sblk = tnapi->hw_status;
5561         unsigned int handled = 1;
5562
5563         /* In INTx mode, it is possible for the interrupt to arrive at
5564          * the CPU before the status block posted prior to the interrupt.
5565          * Reading the PCI State register will confirm whether the
5566          * interrupt is ours and will flush the status block.
5567          */
5568         if (unlikely(sblk->status_tag == tnapi->last_irq_tag)) {
5569                 if (tg3_flag(tp, CHIP_RESETTING) ||
5570                     (tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
5571                         handled = 0;
5572                         goto out;
5573                 }
5574         }
5575
5576         /*
5577          * writing any value to intr-mbox-0 clears PCI INTA# and
5578          * chip-internal interrupt pending events.
5579          * writing non-zero to intr-mbox-0 additional tells the
5580          * NIC to stop sending us irqs, engaging "in-intr-handler"
5581          * event coalescing.
5582          *
5583          * Flush the mailbox to de-assert the IRQ immediately to prevent
5584          * spurious interrupts.  The flush impacts performance but
5585          * excessive spurious interrupts can be worse in some cases.
5586          */
5587         tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
5588
5589         /*
5590          * In a shared interrupt configuration, sometimes other devices'
5591          * interrupts will scream.  We record the current status tag here
5592          * so that the above check can report that the screaming interrupts
5593          * are unhandled.  Eventually they will be silenced.
5594          */
5595         tnapi->last_irq_tag = sblk->status_tag;
5596
5597         if (tg3_irq_sync(tp))
5598                 goto out;
5599
5600         prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
5601
5602         napi_schedule(&tnapi->napi);
5603
5604 out:
5605         return IRQ_RETVAL(handled);
5606 }
5607
5608 /* ISR for interrupt test */
5609 static irqreturn_t tg3_test_isr(int irq, void *dev_id)
5610 {
5611         struct tg3_napi *tnapi = dev_id;
5612         struct tg3 *tp = tnapi->tp;
5613         struct tg3_hw_status *sblk = tnapi->hw_status;
5614
5615         if ((sblk->status & SD_STATUS_UPDATED) ||
5616             !(tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
5617                 tg3_disable_ints(tp);
5618                 return IRQ_RETVAL(1);
5619         }
5620         return IRQ_RETVAL(0);
5621 }
5622
5623 static int tg3_init_hw(struct tg3 *, int);
5624 static int tg3_halt(struct tg3 *, int, int);
5625
5626 /* Restart hardware after configuration changes, self-test, etc.
5627  * Invoked with tp->lock held.
5628  */
5629 static int tg3_restart_hw(struct tg3 *tp, int reset_phy)
5630         __releases(tp->lock)
5631         __acquires(tp->lock)
5632 {
5633         int err;
5634
5635         err = tg3_init_hw(tp, reset_phy);
5636         if (err) {
5637                 netdev_err(tp->dev,
5638                            "Failed to re-initialize device, aborting\n");
5639                 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
5640                 tg3_full_unlock(tp);
5641                 del_timer_sync(&tp->timer);
5642                 tp->irq_sync = 0;
5643                 tg3_napi_enable(tp);
5644                 dev_close(tp->dev);
5645                 tg3_full_lock(tp, 0);
5646         }
5647         return err;
5648 }
5649
5650 #ifdef CONFIG_NET_POLL_CONTROLLER
5651 static void tg3_poll_controller(struct net_device *dev)
5652 {
5653         int i;
5654         struct tg3 *tp = netdev_priv(dev);
5655
5656         for (i = 0; i < tp->irq_cnt; i++)
5657                 tg3_interrupt(tp->napi[i].irq_vec, &tp->napi[i]);
5658 }
5659 #endif
5660
5661 static void tg3_reset_task(struct work_struct *work)
5662 {
5663         struct tg3 *tp = container_of(work, struct tg3, reset_task);
5664         int err;
5665         unsigned int restart_timer;
5666
5667         tg3_full_lock(tp, 0);
5668
5669         if (!netif_running(tp->dev)) {
5670                 tg3_full_unlock(tp);
5671                 return;
5672         }
5673
5674         tg3_full_unlock(tp);
5675
5676         tg3_phy_stop(tp);
5677
5678         tg3_netif_stop(tp);
5679
5680         tg3_full_lock(tp, 1);
5681
5682         restart_timer = tg3_flag(tp, RESTART_TIMER);
5683         tg3_flag_clear(tp, RESTART_TIMER);
5684
5685         if (tg3_flag(tp, TX_RECOVERY_PENDING)) {
5686                 tp->write32_tx_mbox = tg3_write32_tx_mbox;
5687                 tp->write32_rx_mbox = tg3_write_flush_reg32;
5688                 tg3_flag_set(tp, MBOX_WRITE_REORDER);
5689                 tg3_flag_clear(tp, TX_RECOVERY_PENDING);
5690         }
5691
5692         tg3_halt(tp, RESET_KIND_SHUTDOWN, 0);
5693         err = tg3_init_hw(tp, 1);
5694         if (err)
5695                 goto out;
5696
5697         tg3_netif_start(tp);
5698
5699         if (restart_timer)
5700                 mod_timer(&tp->timer, jiffies + 1);
5701
5702 out:
5703         tg3_full_unlock(tp);
5704
5705         if (!err)
5706                 tg3_phy_start(tp);
5707 }
5708
5709 static void tg3_tx_timeout(struct net_device *dev)
5710 {
5711         struct tg3 *tp = netdev_priv(dev);
5712
5713         if (netif_msg_tx_err(tp)) {
5714                 netdev_err(dev, "transmit timed out, resetting\n");
5715                 tg3_dump_state(tp);
5716         }
5717
5718         schedule_work(&tp->reset_task);
5719 }
5720
5721 /* Test for DMA buffers crossing any 4GB boundaries: 4G, 8G, etc */
5722 static inline int tg3_4g_overflow_test(dma_addr_t mapping, int len)
5723 {
5724         u32 base = (u32) mapping & 0xffffffff;
5725
5726         return (base > 0xffffdcc0) && (base + len + 8 < base);
5727 }
5728
5729 /* Test for DMA addresses > 40-bit */
5730 static inline int tg3_40bit_overflow_test(struct tg3 *tp, dma_addr_t mapping,
5731                                           int len)
5732 {
5733 #if defined(CONFIG_HIGHMEM) && (BITS_PER_LONG == 64)
5734         if (tg3_flag(tp, 40BIT_DMA_BUG))
5735                 return ((u64) mapping + len) > DMA_BIT_MASK(40);
5736         return 0;
5737 #else
5738         return 0;
5739 #endif
5740 }
5741
5742 static void tg3_set_txd(struct tg3_napi *tnapi, int entry,
5743                         dma_addr_t mapping, int len, u32 flags,
5744                         u32 mss_and_is_end)
5745 {
5746         struct tg3_tx_buffer_desc *txd = &tnapi->tx_ring[entry];
5747         int is_end = (mss_and_is_end & 0x1);
5748         u32 mss = (mss_and_is_end >> 1);
5749         u32 vlan_tag = 0;
5750
5751         if (is_end)
5752                 flags |= TXD_FLAG_END;
5753         if (flags & TXD_FLAG_VLAN) {
5754                 vlan_tag = flags >> 16;
5755                 flags &= 0xffff;
5756         }
5757         vlan_tag |= (mss << TXD_MSS_SHIFT);
5758
5759         txd->addr_hi = ((u64) mapping >> 32);
5760         txd->addr_lo = ((u64) mapping & 0xffffffff);
5761         txd->len_flags = (len << TXD_LEN_SHIFT) | flags;
5762         txd->vlan_tag = vlan_tag << TXD_VLAN_TAG_SHIFT;
5763 }
5764
5765 static void tg3_skb_error_unmap(struct tg3_napi *tnapi,
5766                                 struct sk_buff *skb, int last)
5767 {
5768         int i;
5769         u32 entry = tnapi->tx_prod;
5770         struct ring_info *txb = &tnapi->tx_buffers[entry];
5771
5772         pci_unmap_single(tnapi->tp->pdev,
5773                          dma_unmap_addr(txb, mapping),
5774                          skb_headlen(skb),
5775                          PCI_DMA_TODEVICE);
5776         for (i = 0; i <= last; i++) {
5777                 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
5778
5779                 entry = NEXT_TX(entry);
5780                 txb = &tnapi->tx_buffers[entry];
5781
5782                 pci_unmap_page(tnapi->tp->pdev,
5783                                dma_unmap_addr(txb, mapping),
5784                                frag->size, PCI_DMA_TODEVICE);
5785         }
5786 }
5787
5788 /* Workaround 4GB and 40-bit hardware DMA bugs. */
5789 static int tigon3_dma_hwbug_workaround(struct tg3_napi *tnapi,
5790                                        struct sk_buff *skb,
5791                                        u32 base_flags, u32 mss)
5792 {
5793         struct tg3 *tp = tnapi->tp;
5794         struct sk_buff *new_skb;
5795         dma_addr_t new_addr = 0;
5796         u32 entry = tnapi->tx_prod;
5797         int ret = 0;
5798
5799         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701)
5800                 new_skb = skb_copy(skb, GFP_ATOMIC);
5801         else {
5802                 int more_headroom = 4 - ((unsigned long)skb->data & 3);
5803
5804                 new_skb = skb_copy_expand(skb,
5805                                           skb_headroom(skb) + more_headroom,
5806                                           skb_tailroom(skb), GFP_ATOMIC);
5807         }
5808
5809         if (!new_skb) {
5810                 ret = -1;
5811         } else {
5812                 /* New SKB is guaranteed to be linear. */
5813                 new_addr = pci_map_single(tp->pdev, new_skb->data, new_skb->len,
5814                                           PCI_DMA_TODEVICE);
5815                 /* Make sure the mapping succeeded */
5816                 if (pci_dma_mapping_error(tp->pdev, new_addr)) {
5817                         ret = -1;
5818                         dev_kfree_skb(new_skb);
5819
5820                 /* Make sure new skb does not cross any 4G boundaries.
5821                  * Drop the packet if it does.
5822                  */
5823                 } else if (tg3_flag(tp, 4G_DMA_BNDRY_BUG) &&
5824                            tg3_4g_overflow_test(new_addr, new_skb->len)) {
5825                         pci_unmap_single(tp->pdev, new_addr, new_skb->len,
5826                                          PCI_DMA_TODEVICE);
5827                         ret = -1;
5828                         dev_kfree_skb(new_skb);
5829                 } else {
5830