tg3: Create funcs for power source switching
[linux-2.6.git] / drivers / net / tg3.c
1 /*
2  * tg3.c: Broadcom Tigon3 ethernet driver.
3  *
4  * Copyright (C) 2001, 2002, 2003, 2004 David S. Miller (davem@redhat.com)
5  * Copyright (C) 2001, 2002, 2003 Jeff Garzik (jgarzik@pobox.com)
6  * Copyright (C) 2004 Sun Microsystems Inc.
7  * Copyright (C) 2005-2011 Broadcom Corporation.
8  *
9  * Firmware is:
10  *      Derived from proprietary unpublished source code,
11  *      Copyright (C) 2000-2003 Broadcom Corporation.
12  *
13  *      Permission is hereby granted for the distribution of this firmware
14  *      data in hexadecimal or equivalent format, provided this copyright
15  *      notice is accompanying it.
16  */
17
18
19 #include <linux/module.h>
20 #include <linux/moduleparam.h>
21 #include <linux/stringify.h>
22 #include <linux/kernel.h>
23 #include <linux/types.h>
24 #include <linux/compiler.h>
25 #include <linux/slab.h>
26 #include <linux/delay.h>
27 #include <linux/in.h>
28 #include <linux/init.h>
29 #include <linux/interrupt.h>
30 #include <linux/ioport.h>
31 #include <linux/pci.h>
32 #include <linux/netdevice.h>
33 #include <linux/etherdevice.h>
34 #include <linux/skbuff.h>
35 #include <linux/ethtool.h>
36 #include <linux/mdio.h>
37 #include <linux/mii.h>
38 #include <linux/phy.h>
39 #include <linux/brcmphy.h>
40 #include <linux/if_vlan.h>
41 #include <linux/ip.h>
42 #include <linux/tcp.h>
43 #include <linux/workqueue.h>
44 #include <linux/prefetch.h>
45 #include <linux/dma-mapping.h>
46 #include <linux/firmware.h>
47
48 #include <net/checksum.h>
49 #include <net/ip.h>
50
51 #include <asm/system.h>
52 #include <linux/io.h>
53 #include <asm/byteorder.h>
54 #include <linux/uaccess.h>
55
56 #ifdef CONFIG_SPARC
57 #include <asm/idprom.h>
58 #include <asm/prom.h>
59 #endif
60
61 #define BAR_0   0
62 #define BAR_2   2
63
64 #include "tg3.h"
65
66 /* Functions & macros to verify TG3_FLAGS types */
67
68 static inline int _tg3_flag(enum TG3_FLAGS flag, unsigned long *bits)
69 {
70         return test_bit(flag, bits);
71 }
72
73 static inline void _tg3_flag_set(enum TG3_FLAGS flag, unsigned long *bits)
74 {
75         set_bit(flag, bits);
76 }
77
78 static inline void _tg3_flag_clear(enum TG3_FLAGS flag, unsigned long *bits)
79 {
80         clear_bit(flag, bits);
81 }
82
83 #define tg3_flag(tp, flag)                              \
84         _tg3_flag(TG3_FLAG_##flag, (tp)->tg3_flags)
85 #define tg3_flag_set(tp, flag)                          \
86         _tg3_flag_set(TG3_FLAG_##flag, (tp)->tg3_flags)
87 #define tg3_flag_clear(tp, flag)                        \
88         _tg3_flag_clear(TG3_FLAG_##flag, (tp)->tg3_flags)
89
90 #define DRV_MODULE_NAME         "tg3"
91 #define TG3_MAJ_NUM                     3
92 #define TG3_MIN_NUM                     119
93 #define DRV_MODULE_VERSION      \
94         __stringify(TG3_MAJ_NUM) "." __stringify(TG3_MIN_NUM)
95 #define DRV_MODULE_RELDATE      "May 18, 2011"
96
97 #define TG3_DEF_MAC_MODE        0
98 #define TG3_DEF_RX_MODE         0
99 #define TG3_DEF_TX_MODE         0
100 #define TG3_DEF_MSG_ENABLE        \
101         (NETIF_MSG_DRV          | \
102          NETIF_MSG_PROBE        | \
103          NETIF_MSG_LINK         | \
104          NETIF_MSG_TIMER        | \
105          NETIF_MSG_IFDOWN       | \
106          NETIF_MSG_IFUP         | \
107          NETIF_MSG_RX_ERR       | \
108          NETIF_MSG_TX_ERR)
109
110 #define TG3_GRC_LCLCTL_PWRSW_DELAY      100
111
112 /* length of time before we decide the hardware is borked,
113  * and dev->tx_timeout() should be called to fix the problem
114  */
115
116 #define TG3_TX_TIMEOUT                  (5 * HZ)
117
118 /* hardware minimum and maximum for a single frame's data payload */
119 #define TG3_MIN_MTU                     60
120 #define TG3_MAX_MTU(tp) \
121         (tg3_flag(tp, JUMBO_CAPABLE) ? 9000 : 1500)
122
123 /* These numbers seem to be hard coded in the NIC firmware somehow.
124  * You can't change the ring sizes, but you can change where you place
125  * them in the NIC onboard memory.
126  */
127 #define TG3_RX_STD_RING_SIZE(tp) \
128         (tg3_flag(tp, LRG_PROD_RING_CAP) ? \
129          TG3_RX_STD_MAX_SIZE_5717 : TG3_RX_STD_MAX_SIZE_5700)
130 #define TG3_DEF_RX_RING_PENDING         200
131 #define TG3_RX_JMB_RING_SIZE(tp) \
132         (tg3_flag(tp, LRG_PROD_RING_CAP) ? \
133          TG3_RX_JMB_MAX_SIZE_5717 : TG3_RX_JMB_MAX_SIZE_5700)
134 #define TG3_DEF_RX_JUMBO_RING_PENDING   100
135 #define TG3_RSS_INDIR_TBL_SIZE          128
136
137 /* Do not place this n-ring entries value into the tp struct itself,
138  * we really want to expose these constants to GCC so that modulo et
139  * al.  operations are done with shifts and masks instead of with
140  * hw multiply/modulo instructions.  Another solution would be to
141  * replace things like '% foo' with '& (foo - 1)'.
142  */
143
144 #define TG3_TX_RING_SIZE                512
145 #define TG3_DEF_TX_RING_PENDING         (TG3_TX_RING_SIZE - 1)
146
147 #define TG3_RX_STD_RING_BYTES(tp) \
148         (sizeof(struct tg3_rx_buffer_desc) * TG3_RX_STD_RING_SIZE(tp))
149 #define TG3_RX_JMB_RING_BYTES(tp) \
150         (sizeof(struct tg3_ext_rx_buffer_desc) * TG3_RX_JMB_RING_SIZE(tp))
151 #define TG3_RX_RCB_RING_BYTES(tp) \
152         (sizeof(struct tg3_rx_buffer_desc) * (tp->rx_ret_ring_mask + 1))
153 #define TG3_TX_RING_BYTES       (sizeof(struct tg3_tx_buffer_desc) * \
154                                  TG3_TX_RING_SIZE)
155 #define NEXT_TX(N)              (((N) + 1) & (TG3_TX_RING_SIZE - 1))
156
157 #define TG3_DMA_BYTE_ENAB               64
158
159 #define TG3_RX_STD_DMA_SZ               1536
160 #define TG3_RX_JMB_DMA_SZ               9046
161
162 #define TG3_RX_DMA_TO_MAP_SZ(x)         ((x) + TG3_DMA_BYTE_ENAB)
163
164 #define TG3_RX_STD_MAP_SZ               TG3_RX_DMA_TO_MAP_SZ(TG3_RX_STD_DMA_SZ)
165 #define TG3_RX_JMB_MAP_SZ               TG3_RX_DMA_TO_MAP_SZ(TG3_RX_JMB_DMA_SZ)
166
167 #define TG3_RX_STD_BUFF_RING_SIZE(tp) \
168         (sizeof(struct ring_info) * TG3_RX_STD_RING_SIZE(tp))
169
170 #define TG3_RX_JMB_BUFF_RING_SIZE(tp) \
171         (sizeof(struct ring_info) * TG3_RX_JMB_RING_SIZE(tp))
172
173 /* Due to a hardware bug, the 5701 can only DMA to memory addresses
174  * that are at least dword aligned when used in PCIX mode.  The driver
175  * works around this bug by double copying the packet.  This workaround
176  * is built into the normal double copy length check for efficiency.
177  *
178  * However, the double copy is only necessary on those architectures
179  * where unaligned memory accesses are inefficient.  For those architectures
180  * where unaligned memory accesses incur little penalty, we can reintegrate
181  * the 5701 in the normal rx path.  Doing so saves a device structure
182  * dereference by hardcoding the double copy threshold in place.
183  */
184 #define TG3_RX_COPY_THRESHOLD           256
185 #if NET_IP_ALIGN == 0 || defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS)
186         #define TG3_RX_COPY_THRESH(tp)  TG3_RX_COPY_THRESHOLD
187 #else
188         #define TG3_RX_COPY_THRESH(tp)  ((tp)->rx_copy_thresh)
189 #endif
190
191 /* minimum number of free TX descriptors required to wake up TX process */
192 #define TG3_TX_WAKEUP_THRESH(tnapi)             ((tnapi)->tx_pending / 4)
193
194 #define TG3_RAW_IP_ALIGN 2
195
196 #define TG3_FW_UPDATE_TIMEOUT_SEC       5
197
198 #define FIRMWARE_TG3            "tigon/tg3.bin"
199 #define FIRMWARE_TG3TSO         "tigon/tg3_tso.bin"
200 #define FIRMWARE_TG3TSO5        "tigon/tg3_tso5.bin"
201
202 static char version[] __devinitdata =
203         DRV_MODULE_NAME ".c:v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")";
204
205 MODULE_AUTHOR("David S. Miller (davem@redhat.com) and Jeff Garzik (jgarzik@pobox.com)");
206 MODULE_DESCRIPTION("Broadcom Tigon3 ethernet driver");
207 MODULE_LICENSE("GPL");
208 MODULE_VERSION(DRV_MODULE_VERSION);
209 MODULE_FIRMWARE(FIRMWARE_TG3);
210 MODULE_FIRMWARE(FIRMWARE_TG3TSO);
211 MODULE_FIRMWARE(FIRMWARE_TG3TSO5);
212
213 static int tg3_debug = -1;      /* -1 == use TG3_DEF_MSG_ENABLE as value */
214 module_param(tg3_debug, int, 0);
215 MODULE_PARM_DESC(tg3_debug, "Tigon3 bitmapped debugging message enable value");
216
217 static DEFINE_PCI_DEVICE_TABLE(tg3_pci_tbl) = {
218         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5700)},
219         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5701)},
220         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702)},
221         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703)},
222         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704)},
223         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702FE)},
224         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705)},
225         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705_2)},
226         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705M)},
227         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705M_2)},
228         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702X)},
229         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703X)},
230         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704S)},
231         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702A3)},
232         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703A3)},
233         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5782)},
234         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5788)},
235         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5789)},
236         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5901)},
237         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5901_2)},
238         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704S_2)},
239         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705F)},
240         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5721)},
241         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5722)},
242         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751)},
243         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751M)},
244         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751F)},
245         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5752)},
246         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5752M)},
247         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753)},
248         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753M)},
249         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753F)},
250         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5754)},
251         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5754M)},
252         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5755)},
253         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5755M)},
254         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5756)},
255         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5786)},
256         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787)},
257         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787M)},
258         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787F)},
259         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5714)},
260         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5714S)},
261         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5715)},
262         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5715S)},
263         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5780)},
264         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5780S)},
265         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5781)},
266         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5906)},
267         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5906M)},
268         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5784)},
269         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5764)},
270         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5723)},
271         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5761)},
272         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5761E)},
273         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5761S)},
274         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5761SE)},
275         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5785_G)},
276         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5785_F)},
277         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57780)},
278         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57760)},
279         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57790)},
280         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57788)},
281         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5717)},
282         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5718)},
283         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57781)},
284         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57785)},
285         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57761)},
286         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57765)},
287         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57791)},
288         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57795)},
289         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5719)},
290         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5720)},
291         {PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9DXX)},
292         {PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9MXX)},
293         {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1000)},
294         {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1001)},
295         {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1003)},
296         {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC9100)},
297         {PCI_DEVICE(PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_TIGON3)},
298         {PCI_DEVICE(0x10cf, 0x11a2)}, /* Fujitsu 1000base-SX with BCM5703SKHB */
299         {}
300 };
301
302 MODULE_DEVICE_TABLE(pci, tg3_pci_tbl);
303
304 static const struct {
305         const char string[ETH_GSTRING_LEN];
306 } ethtool_stats_keys[] = {
307         { "rx_octets" },
308         { "rx_fragments" },
309         { "rx_ucast_packets" },
310         { "rx_mcast_packets" },
311         { "rx_bcast_packets" },
312         { "rx_fcs_errors" },
313         { "rx_align_errors" },
314         { "rx_xon_pause_rcvd" },
315         { "rx_xoff_pause_rcvd" },
316         { "rx_mac_ctrl_rcvd" },
317         { "rx_xoff_entered" },
318         { "rx_frame_too_long_errors" },
319         { "rx_jabbers" },
320         { "rx_undersize_packets" },
321         { "rx_in_length_errors" },
322         { "rx_out_length_errors" },
323         { "rx_64_or_less_octet_packets" },
324         { "rx_65_to_127_octet_packets" },
325         { "rx_128_to_255_octet_packets" },
326         { "rx_256_to_511_octet_packets" },
327         { "rx_512_to_1023_octet_packets" },
328         { "rx_1024_to_1522_octet_packets" },
329         { "rx_1523_to_2047_octet_packets" },
330         { "rx_2048_to_4095_octet_packets" },
331         { "rx_4096_to_8191_octet_packets" },
332         { "rx_8192_to_9022_octet_packets" },
333
334         { "tx_octets" },
335         { "tx_collisions" },
336
337         { "tx_xon_sent" },
338         { "tx_xoff_sent" },
339         { "tx_flow_control" },
340         { "tx_mac_errors" },
341         { "tx_single_collisions" },
342         { "tx_mult_collisions" },
343         { "tx_deferred" },
344         { "tx_excessive_collisions" },
345         { "tx_late_collisions" },
346         { "tx_collide_2times" },
347         { "tx_collide_3times" },
348         { "tx_collide_4times" },
349         { "tx_collide_5times" },
350         { "tx_collide_6times" },
351         { "tx_collide_7times" },
352         { "tx_collide_8times" },
353         { "tx_collide_9times" },
354         { "tx_collide_10times" },
355         { "tx_collide_11times" },
356         { "tx_collide_12times" },
357         { "tx_collide_13times" },
358         { "tx_collide_14times" },
359         { "tx_collide_15times" },
360         { "tx_ucast_packets" },
361         { "tx_mcast_packets" },
362         { "tx_bcast_packets" },
363         { "tx_carrier_sense_errors" },
364         { "tx_discards" },
365         { "tx_errors" },
366
367         { "dma_writeq_full" },
368         { "dma_write_prioq_full" },
369         { "rxbds_empty" },
370         { "rx_discards" },
371         { "rx_errors" },
372         { "rx_threshold_hit" },
373
374         { "dma_readq_full" },
375         { "dma_read_prioq_full" },
376         { "tx_comp_queue_full" },
377
378         { "ring_set_send_prod_index" },
379         { "ring_status_update" },
380         { "nic_irqs" },
381         { "nic_avoided_irqs" },
382         { "nic_tx_threshold_hit" },
383
384         { "mbuf_lwm_thresh_hit" },
385 };
386
387 #define TG3_NUM_STATS   ARRAY_SIZE(ethtool_stats_keys)
388
389
390 static const struct {
391         const char string[ETH_GSTRING_LEN];
392 } ethtool_test_keys[] = {
393         { "nvram test     (online) " },
394         { "link test      (online) " },
395         { "register test  (offline)" },
396         { "memory test    (offline)" },
397         { "loopback test  (offline)" },
398         { "interrupt test (offline)" },
399 };
400
401 #define TG3_NUM_TEST    ARRAY_SIZE(ethtool_test_keys)
402
403
404 static void tg3_write32(struct tg3 *tp, u32 off, u32 val)
405 {
406         writel(val, tp->regs + off);
407 }
408
409 static u32 tg3_read32(struct tg3 *tp, u32 off)
410 {
411         return readl(tp->regs + off);
412 }
413
414 static void tg3_ape_write32(struct tg3 *tp, u32 off, u32 val)
415 {
416         writel(val, tp->aperegs + off);
417 }
418
419 static u32 tg3_ape_read32(struct tg3 *tp, u32 off)
420 {
421         return readl(tp->aperegs + off);
422 }
423
424 static void tg3_write_indirect_reg32(struct tg3 *tp, u32 off, u32 val)
425 {
426         unsigned long flags;
427
428         spin_lock_irqsave(&tp->indirect_lock, flags);
429         pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off);
430         pci_write_config_dword(tp->pdev, TG3PCI_REG_DATA, val);
431         spin_unlock_irqrestore(&tp->indirect_lock, flags);
432 }
433
434 static void tg3_write_flush_reg32(struct tg3 *tp, u32 off, u32 val)
435 {
436         writel(val, tp->regs + off);
437         readl(tp->regs + off);
438 }
439
440 static u32 tg3_read_indirect_reg32(struct tg3 *tp, u32 off)
441 {
442         unsigned long flags;
443         u32 val;
444
445         spin_lock_irqsave(&tp->indirect_lock, flags);
446         pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off);
447         pci_read_config_dword(tp->pdev, TG3PCI_REG_DATA, &val);
448         spin_unlock_irqrestore(&tp->indirect_lock, flags);
449         return val;
450 }
451
452 static void tg3_write_indirect_mbox(struct tg3 *tp, u32 off, u32 val)
453 {
454         unsigned long flags;
455
456         if (off == (MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW)) {
457                 pci_write_config_dword(tp->pdev, TG3PCI_RCV_RET_RING_CON_IDX +
458                                        TG3_64BIT_REG_LOW, val);
459                 return;
460         }
461         if (off == TG3_RX_STD_PROD_IDX_REG) {
462                 pci_write_config_dword(tp->pdev, TG3PCI_STD_RING_PROD_IDX +
463                                        TG3_64BIT_REG_LOW, val);
464                 return;
465         }
466
467         spin_lock_irqsave(&tp->indirect_lock, flags);
468         pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off + 0x5600);
469         pci_write_config_dword(tp->pdev, TG3PCI_REG_DATA, val);
470         spin_unlock_irqrestore(&tp->indirect_lock, flags);
471
472         /* In indirect mode when disabling interrupts, we also need
473          * to clear the interrupt bit in the GRC local ctrl register.
474          */
475         if ((off == (MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW)) &&
476             (val == 0x1)) {
477                 pci_write_config_dword(tp->pdev, TG3PCI_MISC_LOCAL_CTRL,
478                                        tp->grc_local_ctrl|GRC_LCLCTRL_CLEARINT);
479         }
480 }
481
482 static u32 tg3_read_indirect_mbox(struct tg3 *tp, u32 off)
483 {
484         unsigned long flags;
485         u32 val;
486
487         spin_lock_irqsave(&tp->indirect_lock, flags);
488         pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off + 0x5600);
489         pci_read_config_dword(tp->pdev, TG3PCI_REG_DATA, &val);
490         spin_unlock_irqrestore(&tp->indirect_lock, flags);
491         return val;
492 }
493
494 /* usec_wait specifies the wait time in usec when writing to certain registers
495  * where it is unsafe to read back the register without some delay.
496  * GRC_LOCAL_CTRL is one example if the GPIOs are toggled to switch power.
497  * TG3PCI_CLOCK_CTRL is another example if the clock frequencies are changed.
498  */
499 static void _tw32_flush(struct tg3 *tp, u32 off, u32 val, u32 usec_wait)
500 {
501         if (tg3_flag(tp, PCIX_TARGET_HWBUG) || tg3_flag(tp, ICH_WORKAROUND))
502                 /* Non-posted methods */
503                 tp->write32(tp, off, val);
504         else {
505                 /* Posted method */
506                 tg3_write32(tp, off, val);
507                 if (usec_wait)
508                         udelay(usec_wait);
509                 tp->read32(tp, off);
510         }
511         /* Wait again after the read for the posted method to guarantee that
512          * the wait time is met.
513          */
514         if (usec_wait)
515                 udelay(usec_wait);
516 }
517
518 static inline void tw32_mailbox_flush(struct tg3 *tp, u32 off, u32 val)
519 {
520         tp->write32_mbox(tp, off, val);
521         if (!tg3_flag(tp, MBOX_WRITE_REORDER) && !tg3_flag(tp, ICH_WORKAROUND))
522                 tp->read32_mbox(tp, off);
523 }
524
525 static void tg3_write32_tx_mbox(struct tg3 *tp, u32 off, u32 val)
526 {
527         void __iomem *mbox = tp->regs + off;
528         writel(val, mbox);
529         if (tg3_flag(tp, TXD_MBOX_HWBUG))
530                 writel(val, mbox);
531         if (tg3_flag(tp, MBOX_WRITE_REORDER))
532                 readl(mbox);
533 }
534
535 static u32 tg3_read32_mbox_5906(struct tg3 *tp, u32 off)
536 {
537         return readl(tp->regs + off + GRCMBOX_BASE);
538 }
539
540 static void tg3_write32_mbox_5906(struct tg3 *tp, u32 off, u32 val)
541 {
542         writel(val, tp->regs + off + GRCMBOX_BASE);
543 }
544
545 #define tw32_mailbox(reg, val)          tp->write32_mbox(tp, reg, val)
546 #define tw32_mailbox_f(reg, val)        tw32_mailbox_flush(tp, (reg), (val))
547 #define tw32_rx_mbox(reg, val)          tp->write32_rx_mbox(tp, reg, val)
548 #define tw32_tx_mbox(reg, val)          tp->write32_tx_mbox(tp, reg, val)
549 #define tr32_mailbox(reg)               tp->read32_mbox(tp, reg)
550
551 #define tw32(reg, val)                  tp->write32(tp, reg, val)
552 #define tw32_f(reg, val)                _tw32_flush(tp, (reg), (val), 0)
553 #define tw32_wait_f(reg, val, us)       _tw32_flush(tp, (reg), (val), (us))
554 #define tr32(reg)                       tp->read32(tp, reg)
555
556 static void tg3_write_mem(struct tg3 *tp, u32 off, u32 val)
557 {
558         unsigned long flags;
559
560         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906 &&
561             (off >= NIC_SRAM_STATS_BLK) && (off < NIC_SRAM_TX_BUFFER_DESC))
562                 return;
563
564         spin_lock_irqsave(&tp->indirect_lock, flags);
565         if (tg3_flag(tp, SRAM_USE_CONFIG)) {
566                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off);
567                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
568
569                 /* Always leave this as zero. */
570                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
571         } else {
572                 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, off);
573                 tw32_f(TG3PCI_MEM_WIN_DATA, val);
574
575                 /* Always leave this as zero. */
576                 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, 0);
577         }
578         spin_unlock_irqrestore(&tp->indirect_lock, flags);
579 }
580
581 static void tg3_read_mem(struct tg3 *tp, u32 off, u32 *val)
582 {
583         unsigned long flags;
584
585         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906 &&
586             (off >= NIC_SRAM_STATS_BLK) && (off < NIC_SRAM_TX_BUFFER_DESC)) {
587                 *val = 0;
588                 return;
589         }
590
591         spin_lock_irqsave(&tp->indirect_lock, flags);
592         if (tg3_flag(tp, SRAM_USE_CONFIG)) {
593                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off);
594                 pci_read_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
595
596                 /* Always leave this as zero. */
597                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
598         } else {
599                 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, off);
600                 *val = tr32(TG3PCI_MEM_WIN_DATA);
601
602                 /* Always leave this as zero. */
603                 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, 0);
604         }
605         spin_unlock_irqrestore(&tp->indirect_lock, flags);
606 }
607
608 static void tg3_ape_lock_init(struct tg3 *tp)
609 {
610         int i;
611         u32 regbase;
612
613         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
614                 regbase = TG3_APE_LOCK_GRANT;
615         else
616                 regbase = TG3_APE_PER_LOCK_GRANT;
617
618         /* Make sure the driver hasn't any stale locks. */
619         for (i = 0; i < 8; i++)
620                 tg3_ape_write32(tp, regbase + 4 * i, APE_LOCK_GRANT_DRIVER);
621 }
622
623 static int tg3_ape_lock(struct tg3 *tp, int locknum)
624 {
625         int i, off;
626         int ret = 0;
627         u32 status, req, gnt;
628
629         if (!tg3_flag(tp, ENABLE_APE))
630                 return 0;
631
632         switch (locknum) {
633         case TG3_APE_LOCK_GRC:
634         case TG3_APE_LOCK_MEM:
635                 break;
636         default:
637                 return -EINVAL;
638         }
639
640         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761) {
641                 req = TG3_APE_LOCK_REQ;
642                 gnt = TG3_APE_LOCK_GRANT;
643         } else {
644                 req = TG3_APE_PER_LOCK_REQ;
645                 gnt = TG3_APE_PER_LOCK_GRANT;
646         }
647
648         off = 4 * locknum;
649
650         tg3_ape_write32(tp, req + off, APE_LOCK_REQ_DRIVER);
651
652         /* Wait for up to 1 millisecond to acquire lock. */
653         for (i = 0; i < 100; i++) {
654                 status = tg3_ape_read32(tp, gnt + off);
655                 if (status == APE_LOCK_GRANT_DRIVER)
656                         break;
657                 udelay(10);
658         }
659
660         if (status != APE_LOCK_GRANT_DRIVER) {
661                 /* Revoke the lock request. */
662                 tg3_ape_write32(tp, gnt + off,
663                                 APE_LOCK_GRANT_DRIVER);
664
665                 ret = -EBUSY;
666         }
667
668         return ret;
669 }
670
671 static void tg3_ape_unlock(struct tg3 *tp, int locknum)
672 {
673         u32 gnt;
674
675         if (!tg3_flag(tp, ENABLE_APE))
676                 return;
677
678         switch (locknum) {
679         case TG3_APE_LOCK_GRC:
680         case TG3_APE_LOCK_MEM:
681                 break;
682         default:
683                 return;
684         }
685
686         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
687                 gnt = TG3_APE_LOCK_GRANT;
688         else
689                 gnt = TG3_APE_PER_LOCK_GRANT;
690
691         tg3_ape_write32(tp, gnt + 4 * locknum, APE_LOCK_GRANT_DRIVER);
692 }
693
694 static void tg3_disable_ints(struct tg3 *tp)
695 {
696         int i;
697
698         tw32(TG3PCI_MISC_HOST_CTRL,
699              (tp->misc_host_ctrl | MISC_HOST_CTRL_MASK_PCI_INT));
700         for (i = 0; i < tp->irq_max; i++)
701                 tw32_mailbox_f(tp->napi[i].int_mbox, 0x00000001);
702 }
703
704 static void tg3_enable_ints(struct tg3 *tp)
705 {
706         int i;
707
708         tp->irq_sync = 0;
709         wmb();
710
711         tw32(TG3PCI_MISC_HOST_CTRL,
712              (tp->misc_host_ctrl & ~MISC_HOST_CTRL_MASK_PCI_INT));
713
714         tp->coal_now = tp->coalesce_mode | HOSTCC_MODE_ENABLE;
715         for (i = 0; i < tp->irq_cnt; i++) {
716                 struct tg3_napi *tnapi = &tp->napi[i];
717
718                 tw32_mailbox_f(tnapi->int_mbox, tnapi->last_tag << 24);
719                 if (tg3_flag(tp, 1SHOT_MSI))
720                         tw32_mailbox_f(tnapi->int_mbox, tnapi->last_tag << 24);
721
722                 tp->coal_now |= tnapi->coal_now;
723         }
724
725         /* Force an initial interrupt */
726         if (!tg3_flag(tp, TAGGED_STATUS) &&
727             (tp->napi[0].hw_status->status & SD_STATUS_UPDATED))
728                 tw32(GRC_LOCAL_CTRL, tp->grc_local_ctrl | GRC_LCLCTRL_SETINT);
729         else
730                 tw32(HOSTCC_MODE, tp->coal_now);
731
732         tp->coal_now &= ~(tp->napi[0].coal_now | tp->napi[1].coal_now);
733 }
734
735 static inline unsigned int tg3_has_work(struct tg3_napi *tnapi)
736 {
737         struct tg3 *tp = tnapi->tp;
738         struct tg3_hw_status *sblk = tnapi->hw_status;
739         unsigned int work_exists = 0;
740
741         /* check for phy events */
742         if (!(tg3_flag(tp, USE_LINKCHG_REG) || tg3_flag(tp, POLL_SERDES))) {
743                 if (sblk->status & SD_STATUS_LINK_CHG)
744                         work_exists = 1;
745         }
746         /* check for RX/TX work to do */
747         if (sblk->idx[0].tx_consumer != tnapi->tx_cons ||
748             *(tnapi->rx_rcb_prod_idx) != tnapi->rx_rcb_ptr)
749                 work_exists = 1;
750
751         return work_exists;
752 }
753
754 /* tg3_int_reenable
755  *  similar to tg3_enable_ints, but it accurately determines whether there
756  *  is new work pending and can return without flushing the PIO write
757  *  which reenables interrupts
758  */
759 static void tg3_int_reenable(struct tg3_napi *tnapi)
760 {
761         struct tg3 *tp = tnapi->tp;
762
763         tw32_mailbox(tnapi->int_mbox, tnapi->last_tag << 24);
764         mmiowb();
765
766         /* When doing tagged status, this work check is unnecessary.
767          * The last_tag we write above tells the chip which piece of
768          * work we've completed.
769          */
770         if (!tg3_flag(tp, TAGGED_STATUS) && tg3_has_work(tnapi))
771                 tw32(HOSTCC_MODE, tp->coalesce_mode |
772                      HOSTCC_MODE_ENABLE | tnapi->coal_now);
773 }
774
775 static void tg3_switch_clocks(struct tg3 *tp)
776 {
777         u32 clock_ctrl;
778         u32 orig_clock_ctrl;
779
780         if (tg3_flag(tp, CPMU_PRESENT) || tg3_flag(tp, 5780_CLASS))
781                 return;
782
783         clock_ctrl = tr32(TG3PCI_CLOCK_CTRL);
784
785         orig_clock_ctrl = clock_ctrl;
786         clock_ctrl &= (CLOCK_CTRL_FORCE_CLKRUN |
787                        CLOCK_CTRL_CLKRUN_OENABLE |
788                        0x1f);
789         tp->pci_clock_ctrl = clock_ctrl;
790
791         if (tg3_flag(tp, 5705_PLUS)) {
792                 if (orig_clock_ctrl & CLOCK_CTRL_625_CORE) {
793                         tw32_wait_f(TG3PCI_CLOCK_CTRL,
794                                     clock_ctrl | CLOCK_CTRL_625_CORE, 40);
795                 }
796         } else if ((orig_clock_ctrl & CLOCK_CTRL_44MHZ_CORE) != 0) {
797                 tw32_wait_f(TG3PCI_CLOCK_CTRL,
798                             clock_ctrl |
799                             (CLOCK_CTRL_44MHZ_CORE | CLOCK_CTRL_ALTCLK),
800                             40);
801                 tw32_wait_f(TG3PCI_CLOCK_CTRL,
802                             clock_ctrl | (CLOCK_CTRL_ALTCLK),
803                             40);
804         }
805         tw32_wait_f(TG3PCI_CLOCK_CTRL, clock_ctrl, 40);
806 }
807
808 #define PHY_BUSY_LOOPS  5000
809
810 static int tg3_readphy(struct tg3 *tp, int reg, u32 *val)
811 {
812         u32 frame_val;
813         unsigned int loops;
814         int ret;
815
816         if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
817                 tw32_f(MAC_MI_MODE,
818                      (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
819                 udelay(80);
820         }
821
822         *val = 0x0;
823
824         frame_val  = ((tp->phy_addr << MI_COM_PHY_ADDR_SHIFT) &
825                       MI_COM_PHY_ADDR_MASK);
826         frame_val |= ((reg << MI_COM_REG_ADDR_SHIFT) &
827                       MI_COM_REG_ADDR_MASK);
828         frame_val |= (MI_COM_CMD_READ | MI_COM_START);
829
830         tw32_f(MAC_MI_COM, frame_val);
831
832         loops = PHY_BUSY_LOOPS;
833         while (loops != 0) {
834                 udelay(10);
835                 frame_val = tr32(MAC_MI_COM);
836
837                 if ((frame_val & MI_COM_BUSY) == 0) {
838                         udelay(5);
839                         frame_val = tr32(MAC_MI_COM);
840                         break;
841                 }
842                 loops -= 1;
843         }
844
845         ret = -EBUSY;
846         if (loops != 0) {
847                 *val = frame_val & MI_COM_DATA_MASK;
848                 ret = 0;
849         }
850
851         if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
852                 tw32_f(MAC_MI_MODE, tp->mi_mode);
853                 udelay(80);
854         }
855
856         return ret;
857 }
858
859 static int tg3_writephy(struct tg3 *tp, int reg, u32 val)
860 {
861         u32 frame_val;
862         unsigned int loops;
863         int ret;
864
865         if ((tp->phy_flags & TG3_PHYFLG_IS_FET) &&
866             (reg == MII_CTRL1000 || reg == MII_TG3_AUX_CTRL))
867                 return 0;
868
869         if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
870                 tw32_f(MAC_MI_MODE,
871                      (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
872                 udelay(80);
873         }
874
875         frame_val  = ((tp->phy_addr << MI_COM_PHY_ADDR_SHIFT) &
876                       MI_COM_PHY_ADDR_MASK);
877         frame_val |= ((reg << MI_COM_REG_ADDR_SHIFT) &
878                       MI_COM_REG_ADDR_MASK);
879         frame_val |= (val & MI_COM_DATA_MASK);
880         frame_val |= (MI_COM_CMD_WRITE | MI_COM_START);
881
882         tw32_f(MAC_MI_COM, frame_val);
883
884         loops = PHY_BUSY_LOOPS;
885         while (loops != 0) {
886                 udelay(10);
887                 frame_val = tr32(MAC_MI_COM);
888                 if ((frame_val & MI_COM_BUSY) == 0) {
889                         udelay(5);
890                         frame_val = tr32(MAC_MI_COM);
891                         break;
892                 }
893                 loops -= 1;
894         }
895
896         ret = -EBUSY;
897         if (loops != 0)
898                 ret = 0;
899
900         if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
901                 tw32_f(MAC_MI_MODE, tp->mi_mode);
902                 udelay(80);
903         }
904
905         return ret;
906 }
907
908 static int tg3_phy_cl45_write(struct tg3 *tp, u32 devad, u32 addr, u32 val)
909 {
910         int err;
911
912         err = tg3_writephy(tp, MII_TG3_MMD_CTRL, devad);
913         if (err)
914                 goto done;
915
916         err = tg3_writephy(tp, MII_TG3_MMD_ADDRESS, addr);
917         if (err)
918                 goto done;
919
920         err = tg3_writephy(tp, MII_TG3_MMD_CTRL,
921                            MII_TG3_MMD_CTRL_DATA_NOINC | devad);
922         if (err)
923                 goto done;
924
925         err = tg3_writephy(tp, MII_TG3_MMD_ADDRESS, val);
926
927 done:
928         return err;
929 }
930
931 static int tg3_phy_cl45_read(struct tg3 *tp, u32 devad, u32 addr, u32 *val)
932 {
933         int err;
934
935         err = tg3_writephy(tp, MII_TG3_MMD_CTRL, devad);
936         if (err)
937                 goto done;
938
939         err = tg3_writephy(tp, MII_TG3_MMD_ADDRESS, addr);
940         if (err)
941                 goto done;
942
943         err = tg3_writephy(tp, MII_TG3_MMD_CTRL,
944                            MII_TG3_MMD_CTRL_DATA_NOINC | devad);
945         if (err)
946                 goto done;
947
948         err = tg3_readphy(tp, MII_TG3_MMD_ADDRESS, val);
949
950 done:
951         return err;
952 }
953
954 static int tg3_phydsp_read(struct tg3 *tp, u32 reg, u32 *val)
955 {
956         int err;
957
958         err = tg3_writephy(tp, MII_TG3_DSP_ADDRESS, reg);
959         if (!err)
960                 err = tg3_readphy(tp, MII_TG3_DSP_RW_PORT, val);
961
962         return err;
963 }
964
965 static int tg3_phydsp_write(struct tg3 *tp, u32 reg, u32 val)
966 {
967         int err;
968
969         err = tg3_writephy(tp, MII_TG3_DSP_ADDRESS, reg);
970         if (!err)
971                 err = tg3_writephy(tp, MII_TG3_DSP_RW_PORT, val);
972
973         return err;
974 }
975
976 static int tg3_phy_auxctl_read(struct tg3 *tp, int reg, u32 *val)
977 {
978         int err;
979
980         err = tg3_writephy(tp, MII_TG3_AUX_CTRL,
981                            (reg << MII_TG3_AUXCTL_MISC_RDSEL_SHIFT) |
982                            MII_TG3_AUXCTL_SHDWSEL_MISC);
983         if (!err)
984                 err = tg3_readphy(tp, MII_TG3_AUX_CTRL, val);
985
986         return err;
987 }
988
989 static int tg3_phy_auxctl_write(struct tg3 *tp, int reg, u32 set)
990 {
991         if (reg == MII_TG3_AUXCTL_SHDWSEL_MISC)
992                 set |= MII_TG3_AUXCTL_MISC_WREN;
993
994         return tg3_writephy(tp, MII_TG3_AUX_CTRL, set | reg);
995 }
996
997 #define TG3_PHY_AUXCTL_SMDSP_ENABLE(tp) \
998         tg3_phy_auxctl_write((tp), MII_TG3_AUXCTL_SHDWSEL_AUXCTL, \
999                              MII_TG3_AUXCTL_ACTL_SMDSP_ENA | \
1000                              MII_TG3_AUXCTL_ACTL_TX_6DB)
1001
1002 #define TG3_PHY_AUXCTL_SMDSP_DISABLE(tp) \
1003         tg3_phy_auxctl_write((tp), MII_TG3_AUXCTL_SHDWSEL_AUXCTL, \
1004                              MII_TG3_AUXCTL_ACTL_TX_6DB);
1005
1006 static int tg3_bmcr_reset(struct tg3 *tp)
1007 {
1008         u32 phy_control;
1009         int limit, err;
1010
1011         /* OK, reset it, and poll the BMCR_RESET bit until it
1012          * clears or we time out.
1013          */
1014         phy_control = BMCR_RESET;
1015         err = tg3_writephy(tp, MII_BMCR, phy_control);
1016         if (err != 0)
1017                 return -EBUSY;
1018
1019         limit = 5000;
1020         while (limit--) {
1021                 err = tg3_readphy(tp, MII_BMCR, &phy_control);
1022                 if (err != 0)
1023                         return -EBUSY;
1024
1025                 if ((phy_control & BMCR_RESET) == 0) {
1026                         udelay(40);
1027                         break;
1028                 }
1029                 udelay(10);
1030         }
1031         if (limit < 0)
1032                 return -EBUSY;
1033
1034         return 0;
1035 }
1036
1037 static int tg3_mdio_read(struct mii_bus *bp, int mii_id, int reg)
1038 {
1039         struct tg3 *tp = bp->priv;
1040         u32 val;
1041
1042         spin_lock_bh(&tp->lock);
1043
1044         if (tg3_readphy(tp, reg, &val))
1045                 val = -EIO;
1046
1047         spin_unlock_bh(&tp->lock);
1048
1049         return val;
1050 }
1051
1052 static int tg3_mdio_write(struct mii_bus *bp, int mii_id, int reg, u16 val)
1053 {
1054         struct tg3 *tp = bp->priv;
1055         u32 ret = 0;
1056
1057         spin_lock_bh(&tp->lock);
1058
1059         if (tg3_writephy(tp, reg, val))
1060                 ret = -EIO;
1061
1062         spin_unlock_bh(&tp->lock);
1063
1064         return ret;
1065 }
1066
1067 static int tg3_mdio_reset(struct mii_bus *bp)
1068 {
1069         return 0;
1070 }
1071
1072 static void tg3_mdio_config_5785(struct tg3 *tp)
1073 {
1074         u32 val;
1075         struct phy_device *phydev;
1076
1077         phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
1078         switch (phydev->drv->phy_id & phydev->drv->phy_id_mask) {
1079         case PHY_ID_BCM50610:
1080         case PHY_ID_BCM50610M:
1081                 val = MAC_PHYCFG2_50610_LED_MODES;
1082                 break;
1083         case PHY_ID_BCMAC131:
1084                 val = MAC_PHYCFG2_AC131_LED_MODES;
1085                 break;
1086         case PHY_ID_RTL8211C:
1087                 val = MAC_PHYCFG2_RTL8211C_LED_MODES;
1088                 break;
1089         case PHY_ID_RTL8201E:
1090                 val = MAC_PHYCFG2_RTL8201E_LED_MODES;
1091                 break;
1092         default:
1093                 return;
1094         }
1095
1096         if (phydev->interface != PHY_INTERFACE_MODE_RGMII) {
1097                 tw32(MAC_PHYCFG2, val);
1098
1099                 val = tr32(MAC_PHYCFG1);
1100                 val &= ~(MAC_PHYCFG1_RGMII_INT |
1101                          MAC_PHYCFG1_RXCLK_TO_MASK | MAC_PHYCFG1_TXCLK_TO_MASK);
1102                 val |= MAC_PHYCFG1_RXCLK_TIMEOUT | MAC_PHYCFG1_TXCLK_TIMEOUT;
1103                 tw32(MAC_PHYCFG1, val);
1104
1105                 return;
1106         }
1107
1108         if (!tg3_flag(tp, RGMII_INBAND_DISABLE))
1109                 val |= MAC_PHYCFG2_EMODE_MASK_MASK |
1110                        MAC_PHYCFG2_FMODE_MASK_MASK |
1111                        MAC_PHYCFG2_GMODE_MASK_MASK |
1112                        MAC_PHYCFG2_ACT_MASK_MASK   |
1113                        MAC_PHYCFG2_QUAL_MASK_MASK |
1114                        MAC_PHYCFG2_INBAND_ENABLE;
1115
1116         tw32(MAC_PHYCFG2, val);
1117
1118         val = tr32(MAC_PHYCFG1);
1119         val &= ~(MAC_PHYCFG1_RXCLK_TO_MASK | MAC_PHYCFG1_TXCLK_TO_MASK |
1120                  MAC_PHYCFG1_RGMII_EXT_RX_DEC | MAC_PHYCFG1_RGMII_SND_STAT_EN);
1121         if (!tg3_flag(tp, RGMII_INBAND_DISABLE)) {
1122                 if (tg3_flag(tp, RGMII_EXT_IBND_RX_EN))
1123                         val |= MAC_PHYCFG1_RGMII_EXT_RX_DEC;
1124                 if (tg3_flag(tp, RGMII_EXT_IBND_TX_EN))
1125                         val |= MAC_PHYCFG1_RGMII_SND_STAT_EN;
1126         }
1127         val |= MAC_PHYCFG1_RXCLK_TIMEOUT | MAC_PHYCFG1_TXCLK_TIMEOUT |
1128                MAC_PHYCFG1_RGMII_INT | MAC_PHYCFG1_TXC_DRV;
1129         tw32(MAC_PHYCFG1, val);
1130
1131         val = tr32(MAC_EXT_RGMII_MODE);
1132         val &= ~(MAC_RGMII_MODE_RX_INT_B |
1133                  MAC_RGMII_MODE_RX_QUALITY |
1134                  MAC_RGMII_MODE_RX_ACTIVITY |
1135                  MAC_RGMII_MODE_RX_ENG_DET |
1136                  MAC_RGMII_MODE_TX_ENABLE |
1137                  MAC_RGMII_MODE_TX_LOWPWR |
1138                  MAC_RGMII_MODE_TX_RESET);
1139         if (!tg3_flag(tp, RGMII_INBAND_DISABLE)) {
1140                 if (tg3_flag(tp, RGMII_EXT_IBND_RX_EN))
1141                         val |= MAC_RGMII_MODE_RX_INT_B |
1142                                MAC_RGMII_MODE_RX_QUALITY |
1143                                MAC_RGMII_MODE_RX_ACTIVITY |
1144                                MAC_RGMII_MODE_RX_ENG_DET;
1145                 if (tg3_flag(tp, RGMII_EXT_IBND_TX_EN))
1146                         val |= MAC_RGMII_MODE_TX_ENABLE |
1147                                MAC_RGMII_MODE_TX_LOWPWR |
1148                                MAC_RGMII_MODE_TX_RESET;
1149         }
1150         tw32(MAC_EXT_RGMII_MODE, val);
1151 }
1152
1153 static void tg3_mdio_start(struct tg3 *tp)
1154 {
1155         tp->mi_mode &= ~MAC_MI_MODE_AUTO_POLL;
1156         tw32_f(MAC_MI_MODE, tp->mi_mode);
1157         udelay(80);
1158
1159         if (tg3_flag(tp, MDIOBUS_INITED) &&
1160             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
1161                 tg3_mdio_config_5785(tp);
1162 }
1163
1164 static int tg3_mdio_init(struct tg3 *tp)
1165 {
1166         int i;
1167         u32 reg;
1168         struct phy_device *phydev;
1169
1170         if (tg3_flag(tp, 5717_PLUS)) {
1171                 u32 is_serdes;
1172
1173                 tp->phy_addr = PCI_FUNC(tp->pdev->devfn) + 1;
1174
1175                 if (tp->pci_chip_rev_id != CHIPREV_ID_5717_A0)
1176                         is_serdes = tr32(SG_DIG_STATUS) & SG_DIG_IS_SERDES;
1177                 else
1178                         is_serdes = tr32(TG3_CPMU_PHY_STRAP) &
1179                                     TG3_CPMU_PHY_STRAP_IS_SERDES;
1180                 if (is_serdes)
1181                         tp->phy_addr += 7;
1182         } else
1183                 tp->phy_addr = TG3_PHY_MII_ADDR;
1184
1185         tg3_mdio_start(tp);
1186
1187         if (!tg3_flag(tp, USE_PHYLIB) || tg3_flag(tp, MDIOBUS_INITED))
1188                 return 0;
1189
1190         tp->mdio_bus = mdiobus_alloc();
1191         if (tp->mdio_bus == NULL)
1192                 return -ENOMEM;
1193
1194         tp->mdio_bus->name     = "tg3 mdio bus";
1195         snprintf(tp->mdio_bus->id, MII_BUS_ID_SIZE, "%x",
1196                  (tp->pdev->bus->number << 8) | tp->pdev->devfn);
1197         tp->mdio_bus->priv     = tp;
1198         tp->mdio_bus->parent   = &tp->pdev->dev;
1199         tp->mdio_bus->read     = &tg3_mdio_read;
1200         tp->mdio_bus->write    = &tg3_mdio_write;
1201         tp->mdio_bus->reset    = &tg3_mdio_reset;
1202         tp->mdio_bus->phy_mask = ~(1 << TG3_PHY_MII_ADDR);
1203         tp->mdio_bus->irq      = &tp->mdio_irq[0];
1204
1205         for (i = 0; i < PHY_MAX_ADDR; i++)
1206                 tp->mdio_bus->irq[i] = PHY_POLL;
1207
1208         /* The bus registration will look for all the PHYs on the mdio bus.
1209          * Unfortunately, it does not ensure the PHY is powered up before
1210          * accessing the PHY ID registers.  A chip reset is the
1211          * quickest way to bring the device back to an operational state..
1212          */
1213         if (tg3_readphy(tp, MII_BMCR, &reg) || (reg & BMCR_PDOWN))
1214                 tg3_bmcr_reset(tp);
1215
1216         i = mdiobus_register(tp->mdio_bus);
1217         if (i) {
1218                 dev_warn(&tp->pdev->dev, "mdiobus_reg failed (0x%x)\n", i);
1219                 mdiobus_free(tp->mdio_bus);
1220                 return i;
1221         }
1222
1223         phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
1224
1225         if (!phydev || !phydev->drv) {
1226                 dev_warn(&tp->pdev->dev, "No PHY devices\n");
1227                 mdiobus_unregister(tp->mdio_bus);
1228                 mdiobus_free(tp->mdio_bus);
1229                 return -ENODEV;
1230         }
1231
1232         switch (phydev->drv->phy_id & phydev->drv->phy_id_mask) {
1233         case PHY_ID_BCM57780:
1234                 phydev->interface = PHY_INTERFACE_MODE_GMII;
1235                 phydev->dev_flags |= PHY_BRCM_AUTO_PWRDWN_ENABLE;
1236                 break;
1237         case PHY_ID_BCM50610:
1238         case PHY_ID_BCM50610M:
1239                 phydev->dev_flags |= PHY_BRCM_CLEAR_RGMII_MODE |
1240                                      PHY_BRCM_RX_REFCLK_UNUSED |
1241                                      PHY_BRCM_DIS_TXCRXC_NOENRGY |
1242                                      PHY_BRCM_AUTO_PWRDWN_ENABLE;
1243                 if (tg3_flag(tp, RGMII_INBAND_DISABLE))
1244                         phydev->dev_flags |= PHY_BRCM_STD_IBND_DISABLE;
1245                 if (tg3_flag(tp, RGMII_EXT_IBND_RX_EN))
1246                         phydev->dev_flags |= PHY_BRCM_EXT_IBND_RX_ENABLE;
1247                 if (tg3_flag(tp, RGMII_EXT_IBND_TX_EN))
1248                         phydev->dev_flags |= PHY_BRCM_EXT_IBND_TX_ENABLE;
1249                 /* fallthru */
1250         case PHY_ID_RTL8211C:
1251                 phydev->interface = PHY_INTERFACE_MODE_RGMII;
1252                 break;
1253         case PHY_ID_RTL8201E:
1254         case PHY_ID_BCMAC131:
1255                 phydev->interface = PHY_INTERFACE_MODE_MII;
1256                 phydev->dev_flags |= PHY_BRCM_AUTO_PWRDWN_ENABLE;
1257                 tp->phy_flags |= TG3_PHYFLG_IS_FET;
1258                 break;
1259         }
1260
1261         tg3_flag_set(tp, MDIOBUS_INITED);
1262
1263         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
1264                 tg3_mdio_config_5785(tp);
1265
1266         return 0;
1267 }
1268
1269 static void tg3_mdio_fini(struct tg3 *tp)
1270 {
1271         if (tg3_flag(tp, MDIOBUS_INITED)) {
1272                 tg3_flag_clear(tp, MDIOBUS_INITED);
1273                 mdiobus_unregister(tp->mdio_bus);
1274                 mdiobus_free(tp->mdio_bus);
1275         }
1276 }
1277
1278 /* tp->lock is held. */
1279 static inline void tg3_generate_fw_event(struct tg3 *tp)
1280 {
1281         u32 val;
1282
1283         val = tr32(GRC_RX_CPU_EVENT);
1284         val |= GRC_RX_CPU_DRIVER_EVENT;
1285         tw32_f(GRC_RX_CPU_EVENT, val);
1286
1287         tp->last_event_jiffies = jiffies;
1288 }
1289
1290 #define TG3_FW_EVENT_TIMEOUT_USEC 2500
1291
1292 /* tp->lock is held. */
1293 static void tg3_wait_for_event_ack(struct tg3 *tp)
1294 {
1295         int i;
1296         unsigned int delay_cnt;
1297         long time_remain;
1298
1299         /* If enough time has passed, no wait is necessary. */
1300         time_remain = (long)(tp->last_event_jiffies + 1 +
1301                       usecs_to_jiffies(TG3_FW_EVENT_TIMEOUT_USEC)) -
1302                       (long)jiffies;
1303         if (time_remain < 0)
1304                 return;
1305
1306         /* Check if we can shorten the wait time. */
1307         delay_cnt = jiffies_to_usecs(time_remain);
1308         if (delay_cnt > TG3_FW_EVENT_TIMEOUT_USEC)
1309                 delay_cnt = TG3_FW_EVENT_TIMEOUT_USEC;
1310         delay_cnt = (delay_cnt >> 3) + 1;
1311
1312         for (i = 0; i < delay_cnt; i++) {
1313                 if (!(tr32(GRC_RX_CPU_EVENT) & GRC_RX_CPU_DRIVER_EVENT))
1314                         break;
1315                 udelay(8);
1316         }
1317 }
1318
1319 /* tp->lock is held. */
1320 static void tg3_ump_link_report(struct tg3 *tp)
1321 {
1322         u32 reg;
1323         u32 val;
1324
1325         if (!tg3_flag(tp, 5780_CLASS) || !tg3_flag(tp, ENABLE_ASF))
1326                 return;
1327
1328         tg3_wait_for_event_ack(tp);
1329
1330         tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX, FWCMD_NICDRV_LINK_UPDATE);
1331
1332         tg3_write_mem(tp, NIC_SRAM_FW_CMD_LEN_MBOX, 14);
1333
1334         val = 0;
1335         if (!tg3_readphy(tp, MII_BMCR, &reg))
1336                 val = reg << 16;
1337         if (!tg3_readphy(tp, MII_BMSR, &reg))
1338                 val |= (reg & 0xffff);
1339         tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX, val);
1340
1341         val = 0;
1342         if (!tg3_readphy(tp, MII_ADVERTISE, &reg))
1343                 val = reg << 16;
1344         if (!tg3_readphy(tp, MII_LPA, &reg))
1345                 val |= (reg & 0xffff);
1346         tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 4, val);
1347
1348         val = 0;
1349         if (!(tp->phy_flags & TG3_PHYFLG_MII_SERDES)) {
1350                 if (!tg3_readphy(tp, MII_CTRL1000, &reg))
1351                         val = reg << 16;
1352                 if (!tg3_readphy(tp, MII_STAT1000, &reg))
1353                         val |= (reg & 0xffff);
1354         }
1355         tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 8, val);
1356
1357         if (!tg3_readphy(tp, MII_PHYADDR, &reg))
1358                 val = reg << 16;
1359         else
1360                 val = 0;
1361         tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 12, val);
1362
1363         tg3_generate_fw_event(tp);
1364 }
1365
1366 static void tg3_link_report(struct tg3 *tp)
1367 {
1368         if (!netif_carrier_ok(tp->dev)) {
1369                 netif_info(tp, link, tp->dev, "Link is down\n");
1370                 tg3_ump_link_report(tp);
1371         } else if (netif_msg_link(tp)) {
1372                 netdev_info(tp->dev, "Link is up at %d Mbps, %s duplex\n",
1373                             (tp->link_config.active_speed == SPEED_1000 ?
1374                              1000 :
1375                              (tp->link_config.active_speed == SPEED_100 ?
1376                               100 : 10)),
1377                             (tp->link_config.active_duplex == DUPLEX_FULL ?
1378                              "full" : "half"));
1379
1380                 netdev_info(tp->dev, "Flow control is %s for TX and %s for RX\n",
1381                             (tp->link_config.active_flowctrl & FLOW_CTRL_TX) ?
1382                             "on" : "off",
1383                             (tp->link_config.active_flowctrl & FLOW_CTRL_RX) ?
1384                             "on" : "off");
1385
1386                 if (tp->phy_flags & TG3_PHYFLG_EEE_CAP)
1387                         netdev_info(tp->dev, "EEE is %s\n",
1388                                     tp->setlpicnt ? "enabled" : "disabled");
1389
1390                 tg3_ump_link_report(tp);
1391         }
1392 }
1393
1394 static u16 tg3_advert_flowctrl_1000T(u8 flow_ctrl)
1395 {
1396         u16 miireg;
1397
1398         if ((flow_ctrl & FLOW_CTRL_TX) && (flow_ctrl & FLOW_CTRL_RX))
1399                 miireg = ADVERTISE_PAUSE_CAP;
1400         else if (flow_ctrl & FLOW_CTRL_TX)
1401                 miireg = ADVERTISE_PAUSE_ASYM;
1402         else if (flow_ctrl & FLOW_CTRL_RX)
1403                 miireg = ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM;
1404         else
1405                 miireg = 0;
1406
1407         return miireg;
1408 }
1409
1410 static u16 tg3_advert_flowctrl_1000X(u8 flow_ctrl)
1411 {
1412         u16 miireg;
1413
1414         if ((flow_ctrl & FLOW_CTRL_TX) && (flow_ctrl & FLOW_CTRL_RX))
1415                 miireg = ADVERTISE_1000XPAUSE;
1416         else if (flow_ctrl & FLOW_CTRL_TX)
1417                 miireg = ADVERTISE_1000XPSE_ASYM;
1418         else if (flow_ctrl & FLOW_CTRL_RX)
1419                 miireg = ADVERTISE_1000XPAUSE | ADVERTISE_1000XPSE_ASYM;
1420         else
1421                 miireg = 0;
1422
1423         return miireg;
1424 }
1425
1426 static u8 tg3_resolve_flowctrl_1000X(u16 lcladv, u16 rmtadv)
1427 {
1428         u8 cap = 0;
1429
1430         if (lcladv & ADVERTISE_1000XPAUSE) {
1431                 if (lcladv & ADVERTISE_1000XPSE_ASYM) {
1432                         if (rmtadv & LPA_1000XPAUSE)
1433                                 cap = FLOW_CTRL_TX | FLOW_CTRL_RX;
1434                         else if (rmtadv & LPA_1000XPAUSE_ASYM)
1435                                 cap = FLOW_CTRL_RX;
1436                 } else {
1437                         if (rmtadv & LPA_1000XPAUSE)
1438                                 cap = FLOW_CTRL_TX | FLOW_CTRL_RX;
1439                 }
1440         } else if (lcladv & ADVERTISE_1000XPSE_ASYM) {
1441                 if ((rmtadv & LPA_1000XPAUSE) && (rmtadv & LPA_1000XPAUSE_ASYM))
1442                         cap = FLOW_CTRL_TX;
1443         }
1444
1445         return cap;
1446 }
1447
1448 static void tg3_setup_flow_control(struct tg3 *tp, u32 lcladv, u32 rmtadv)
1449 {
1450         u8 autoneg;
1451         u8 flowctrl = 0;
1452         u32 old_rx_mode = tp->rx_mode;
1453         u32 old_tx_mode = tp->tx_mode;
1454
1455         if (tg3_flag(tp, USE_PHYLIB))
1456                 autoneg = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]->autoneg;
1457         else
1458                 autoneg = tp->link_config.autoneg;
1459
1460         if (autoneg == AUTONEG_ENABLE && tg3_flag(tp, PAUSE_AUTONEG)) {
1461                 if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)
1462                         flowctrl = tg3_resolve_flowctrl_1000X(lcladv, rmtadv);
1463                 else
1464                         flowctrl = mii_resolve_flowctrl_fdx(lcladv, rmtadv);
1465         } else
1466                 flowctrl = tp->link_config.flowctrl;
1467
1468         tp->link_config.active_flowctrl = flowctrl;
1469
1470         if (flowctrl & FLOW_CTRL_RX)
1471                 tp->rx_mode |= RX_MODE_FLOW_CTRL_ENABLE;
1472         else
1473                 tp->rx_mode &= ~RX_MODE_FLOW_CTRL_ENABLE;
1474
1475         if (old_rx_mode != tp->rx_mode)
1476                 tw32_f(MAC_RX_MODE, tp->rx_mode);
1477
1478         if (flowctrl & FLOW_CTRL_TX)
1479                 tp->tx_mode |= TX_MODE_FLOW_CTRL_ENABLE;
1480         else
1481                 tp->tx_mode &= ~TX_MODE_FLOW_CTRL_ENABLE;
1482
1483         if (old_tx_mode != tp->tx_mode)
1484                 tw32_f(MAC_TX_MODE, tp->tx_mode);
1485 }
1486
1487 static void tg3_adjust_link(struct net_device *dev)
1488 {
1489         u8 oldflowctrl, linkmesg = 0;
1490         u32 mac_mode, lcl_adv, rmt_adv;
1491         struct tg3 *tp = netdev_priv(dev);
1492         struct phy_device *phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
1493
1494         spin_lock_bh(&tp->lock);
1495
1496         mac_mode = tp->mac_mode & ~(MAC_MODE_PORT_MODE_MASK |
1497                                     MAC_MODE_HALF_DUPLEX);
1498
1499         oldflowctrl = tp->link_config.active_flowctrl;
1500
1501         if (phydev->link) {
1502                 lcl_adv = 0;
1503                 rmt_adv = 0;
1504
1505                 if (phydev->speed == SPEED_100 || phydev->speed == SPEED_10)
1506                         mac_mode |= MAC_MODE_PORT_MODE_MII;
1507                 else if (phydev->speed == SPEED_1000 ||
1508                          GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785)
1509                         mac_mode |= MAC_MODE_PORT_MODE_GMII;
1510                 else
1511                         mac_mode |= MAC_MODE_PORT_MODE_MII;
1512
1513                 if (phydev->duplex == DUPLEX_HALF)
1514                         mac_mode |= MAC_MODE_HALF_DUPLEX;
1515                 else {
1516                         lcl_adv = tg3_advert_flowctrl_1000T(
1517                                   tp->link_config.flowctrl);
1518
1519                         if (phydev->pause)
1520                                 rmt_adv = LPA_PAUSE_CAP;
1521                         if (phydev->asym_pause)
1522                                 rmt_adv |= LPA_PAUSE_ASYM;
1523                 }
1524
1525                 tg3_setup_flow_control(tp, lcl_adv, rmt_adv);
1526         } else
1527                 mac_mode |= MAC_MODE_PORT_MODE_GMII;
1528
1529         if (mac_mode != tp->mac_mode) {
1530                 tp->mac_mode = mac_mode;
1531                 tw32_f(MAC_MODE, tp->mac_mode);
1532                 udelay(40);
1533         }
1534
1535         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785) {
1536                 if (phydev->speed == SPEED_10)
1537                         tw32(MAC_MI_STAT,
1538                              MAC_MI_STAT_10MBPS_MODE |
1539                              MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
1540                 else
1541                         tw32(MAC_MI_STAT, MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
1542         }
1543
1544         if (phydev->speed == SPEED_1000 && phydev->duplex == DUPLEX_HALF)
1545                 tw32(MAC_TX_LENGTHS,
1546                      ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
1547                       (6 << TX_LENGTHS_IPG_SHIFT) |
1548                       (0xff << TX_LENGTHS_SLOT_TIME_SHIFT)));
1549         else
1550                 tw32(MAC_TX_LENGTHS,
1551                      ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
1552                       (6 << TX_LENGTHS_IPG_SHIFT) |
1553                       (32 << TX_LENGTHS_SLOT_TIME_SHIFT)));
1554
1555         if ((phydev->link && tp->link_config.active_speed == SPEED_INVALID) ||
1556             (!phydev->link && tp->link_config.active_speed != SPEED_INVALID) ||
1557             phydev->speed != tp->link_config.active_speed ||
1558             phydev->duplex != tp->link_config.active_duplex ||
1559             oldflowctrl != tp->link_config.active_flowctrl)
1560                 linkmesg = 1;
1561
1562         tp->link_config.active_speed = phydev->speed;
1563         tp->link_config.active_duplex = phydev->duplex;
1564
1565         spin_unlock_bh(&tp->lock);
1566
1567         if (linkmesg)
1568                 tg3_link_report(tp);
1569 }
1570
1571 static int tg3_phy_init(struct tg3 *tp)
1572 {
1573         struct phy_device *phydev;
1574
1575         if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED)
1576                 return 0;
1577
1578         /* Bring the PHY back to a known state. */
1579         tg3_bmcr_reset(tp);
1580
1581         phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
1582
1583         /* Attach the MAC to the PHY. */
1584         phydev = phy_connect(tp->dev, dev_name(&phydev->dev), tg3_adjust_link,
1585                              phydev->dev_flags, phydev->interface);
1586         if (IS_ERR(phydev)) {
1587                 dev_err(&tp->pdev->dev, "Could not attach to PHY\n");
1588                 return PTR_ERR(phydev);
1589         }
1590
1591         /* Mask with MAC supported features. */
1592         switch (phydev->interface) {
1593         case PHY_INTERFACE_MODE_GMII:
1594         case PHY_INTERFACE_MODE_RGMII:
1595                 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
1596                         phydev->supported &= (PHY_GBIT_FEATURES |
1597                                               SUPPORTED_Pause |
1598                                               SUPPORTED_Asym_Pause);
1599                         break;
1600                 }
1601                 /* fallthru */
1602         case PHY_INTERFACE_MODE_MII:
1603                 phydev->supported &= (PHY_BASIC_FEATURES |
1604                                       SUPPORTED_Pause |
1605                                       SUPPORTED_Asym_Pause);
1606                 break;
1607         default:
1608                 phy_disconnect(tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]);
1609                 return -EINVAL;
1610         }
1611
1612         tp->phy_flags |= TG3_PHYFLG_IS_CONNECTED;
1613
1614         phydev->advertising = phydev->supported;
1615
1616         return 0;
1617 }
1618
1619 static void tg3_phy_start(struct tg3 *tp)
1620 {
1621         struct phy_device *phydev;
1622
1623         if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
1624                 return;
1625
1626         phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
1627
1628         if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) {
1629                 tp->phy_flags &= ~TG3_PHYFLG_IS_LOW_POWER;
1630                 phydev->speed = tp->link_config.orig_speed;
1631                 phydev->duplex = tp->link_config.orig_duplex;
1632                 phydev->autoneg = tp->link_config.orig_autoneg;
1633                 phydev->advertising = tp->link_config.orig_advertising;
1634         }
1635
1636         phy_start(phydev);
1637
1638         phy_start_aneg(phydev);
1639 }
1640
1641 static void tg3_phy_stop(struct tg3 *tp)
1642 {
1643         if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
1644                 return;
1645
1646         phy_stop(tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]);
1647 }
1648
1649 static void tg3_phy_fini(struct tg3 *tp)
1650 {
1651         if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) {
1652                 phy_disconnect(tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]);
1653                 tp->phy_flags &= ~TG3_PHYFLG_IS_CONNECTED;
1654         }
1655 }
1656
1657 static void tg3_phy_fet_toggle_apd(struct tg3 *tp, bool enable)
1658 {
1659         u32 phytest;
1660
1661         if (!tg3_readphy(tp, MII_TG3_FET_TEST, &phytest)) {
1662                 u32 phy;
1663
1664                 tg3_writephy(tp, MII_TG3_FET_TEST,
1665                              phytest | MII_TG3_FET_SHADOW_EN);
1666                 if (!tg3_readphy(tp, MII_TG3_FET_SHDW_AUXSTAT2, &phy)) {
1667                         if (enable)
1668                                 phy |= MII_TG3_FET_SHDW_AUXSTAT2_APD;
1669                         else
1670                                 phy &= ~MII_TG3_FET_SHDW_AUXSTAT2_APD;
1671                         tg3_writephy(tp, MII_TG3_FET_SHDW_AUXSTAT2, phy);
1672                 }
1673                 tg3_writephy(tp, MII_TG3_FET_TEST, phytest);
1674         }
1675 }
1676
1677 static void tg3_phy_toggle_apd(struct tg3 *tp, bool enable)
1678 {
1679         u32 reg;
1680
1681         if (!tg3_flag(tp, 5705_PLUS) ||
1682             (tg3_flag(tp, 5717_PLUS) &&
1683              (tp->phy_flags & TG3_PHYFLG_MII_SERDES)))
1684                 return;
1685
1686         if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
1687                 tg3_phy_fet_toggle_apd(tp, enable);
1688                 return;
1689         }
1690
1691         reg = MII_TG3_MISC_SHDW_WREN |
1692               MII_TG3_MISC_SHDW_SCR5_SEL |
1693               MII_TG3_MISC_SHDW_SCR5_LPED |
1694               MII_TG3_MISC_SHDW_SCR5_DLPTLM |
1695               MII_TG3_MISC_SHDW_SCR5_SDTL |
1696               MII_TG3_MISC_SHDW_SCR5_C125OE;
1697         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5784 || !enable)
1698                 reg |= MII_TG3_MISC_SHDW_SCR5_DLLAPD;
1699
1700         tg3_writephy(tp, MII_TG3_MISC_SHDW, reg);
1701
1702
1703         reg = MII_TG3_MISC_SHDW_WREN |
1704               MII_TG3_MISC_SHDW_APD_SEL |
1705               MII_TG3_MISC_SHDW_APD_WKTM_84MS;
1706         if (enable)
1707                 reg |= MII_TG3_MISC_SHDW_APD_ENABLE;
1708
1709         tg3_writephy(tp, MII_TG3_MISC_SHDW, reg);
1710 }
1711
1712 static void tg3_phy_toggle_automdix(struct tg3 *tp, int enable)
1713 {
1714         u32 phy;
1715
1716         if (!tg3_flag(tp, 5705_PLUS) ||
1717             (tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
1718                 return;
1719
1720         if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
1721                 u32 ephy;
1722
1723                 if (!tg3_readphy(tp, MII_TG3_FET_TEST, &ephy)) {
1724                         u32 reg = MII_TG3_FET_SHDW_MISCCTRL;
1725
1726                         tg3_writephy(tp, MII_TG3_FET_TEST,
1727                                      ephy | MII_TG3_FET_SHADOW_EN);
1728                         if (!tg3_readphy(tp, reg, &phy)) {
1729                                 if (enable)
1730                                         phy |= MII_TG3_FET_SHDW_MISCCTRL_MDIX;
1731                                 else
1732                                         phy &= ~MII_TG3_FET_SHDW_MISCCTRL_MDIX;
1733                                 tg3_writephy(tp, reg, phy);
1734                         }
1735                         tg3_writephy(tp, MII_TG3_FET_TEST, ephy);
1736                 }
1737         } else {
1738                 int ret;
1739
1740                 ret = tg3_phy_auxctl_read(tp,
1741                                           MII_TG3_AUXCTL_SHDWSEL_MISC, &phy);
1742                 if (!ret) {
1743                         if (enable)
1744                                 phy |= MII_TG3_AUXCTL_MISC_FORCE_AMDIX;
1745                         else
1746                                 phy &= ~MII_TG3_AUXCTL_MISC_FORCE_AMDIX;
1747                         tg3_phy_auxctl_write(tp,
1748                                              MII_TG3_AUXCTL_SHDWSEL_MISC, phy);
1749                 }
1750         }
1751 }
1752
1753 static void tg3_phy_set_wirespeed(struct tg3 *tp)
1754 {
1755         int ret;
1756         u32 val;
1757
1758         if (tp->phy_flags & TG3_PHYFLG_NO_ETH_WIRE_SPEED)
1759                 return;
1760
1761         ret = tg3_phy_auxctl_read(tp, MII_TG3_AUXCTL_SHDWSEL_MISC, &val);
1762         if (!ret)
1763                 tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_MISC,
1764                                      val | MII_TG3_AUXCTL_MISC_WIRESPD_EN);
1765 }
1766
1767 static void tg3_phy_apply_otp(struct tg3 *tp)
1768 {
1769         u32 otp, phy;
1770
1771         if (!tp->phy_otp)
1772                 return;
1773
1774         otp = tp->phy_otp;
1775
1776         if (TG3_PHY_AUXCTL_SMDSP_ENABLE(tp))
1777                 return;
1778
1779         phy = ((otp & TG3_OTP_AGCTGT_MASK) >> TG3_OTP_AGCTGT_SHIFT);
1780         phy |= MII_TG3_DSP_TAP1_AGCTGT_DFLT;
1781         tg3_phydsp_write(tp, MII_TG3_DSP_TAP1, phy);
1782
1783         phy = ((otp & TG3_OTP_HPFFLTR_MASK) >> TG3_OTP_HPFFLTR_SHIFT) |
1784               ((otp & TG3_OTP_HPFOVER_MASK) >> TG3_OTP_HPFOVER_SHIFT);
1785         tg3_phydsp_write(tp, MII_TG3_DSP_AADJ1CH0, phy);
1786
1787         phy = ((otp & TG3_OTP_LPFDIS_MASK) >> TG3_OTP_LPFDIS_SHIFT);
1788         phy |= MII_TG3_DSP_AADJ1CH3_ADCCKADJ;
1789         tg3_phydsp_write(tp, MII_TG3_DSP_AADJ1CH3, phy);
1790
1791         phy = ((otp & TG3_OTP_VDAC_MASK) >> TG3_OTP_VDAC_SHIFT);
1792         tg3_phydsp_write(tp, MII_TG3_DSP_EXP75, phy);
1793
1794         phy = ((otp & TG3_OTP_10BTAMP_MASK) >> TG3_OTP_10BTAMP_SHIFT);
1795         tg3_phydsp_write(tp, MII_TG3_DSP_EXP96, phy);
1796
1797         phy = ((otp & TG3_OTP_ROFF_MASK) >> TG3_OTP_ROFF_SHIFT) |
1798               ((otp & TG3_OTP_RCOFF_MASK) >> TG3_OTP_RCOFF_SHIFT);
1799         tg3_phydsp_write(tp, MII_TG3_DSP_EXP97, phy);
1800
1801         TG3_PHY_AUXCTL_SMDSP_DISABLE(tp);
1802 }
1803
1804 static void tg3_phy_eee_adjust(struct tg3 *tp, u32 current_link_up)
1805 {
1806         u32 val;
1807
1808         if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP))
1809                 return;
1810
1811         tp->setlpicnt = 0;
1812
1813         if (tp->link_config.autoneg == AUTONEG_ENABLE &&
1814             current_link_up == 1 &&
1815             tp->link_config.active_duplex == DUPLEX_FULL &&
1816             (tp->link_config.active_speed == SPEED_100 ||
1817              tp->link_config.active_speed == SPEED_1000)) {
1818                 u32 eeectl;
1819
1820                 if (tp->link_config.active_speed == SPEED_1000)
1821                         eeectl = TG3_CPMU_EEE_CTRL_EXIT_16_5_US;
1822                 else
1823                         eeectl = TG3_CPMU_EEE_CTRL_EXIT_36_US;
1824
1825                 tw32(TG3_CPMU_EEE_CTRL, eeectl);
1826
1827                 tg3_phy_cl45_read(tp, MDIO_MMD_AN,
1828                                   TG3_CL45_D7_EEERES_STAT, &val);
1829
1830                 if (val == TG3_CL45_D7_EEERES_STAT_LP_1000T ||
1831                     val == TG3_CL45_D7_EEERES_STAT_LP_100TX)
1832                         tp->setlpicnt = 2;
1833         }
1834
1835         if (!tp->setlpicnt) {
1836                 val = tr32(TG3_CPMU_EEE_MODE);
1837                 tw32(TG3_CPMU_EEE_MODE, val & ~TG3_CPMU_EEEMD_LPI_ENABLE);
1838         }
1839 }
1840
1841 static void tg3_phy_eee_enable(struct tg3 *tp)
1842 {
1843         u32 val;
1844
1845         if (tp->link_config.active_speed == SPEED_1000 &&
1846             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
1847              GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
1848              GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765) &&
1849             !TG3_PHY_AUXCTL_SMDSP_ENABLE(tp)) {
1850                 tg3_phydsp_write(tp, MII_TG3_DSP_TAP26, 0x0003);
1851                 TG3_PHY_AUXCTL_SMDSP_DISABLE(tp);
1852         }
1853
1854         val = tr32(TG3_CPMU_EEE_MODE);
1855         tw32(TG3_CPMU_EEE_MODE, val | TG3_CPMU_EEEMD_LPI_ENABLE);
1856 }
1857
1858 static int tg3_wait_macro_done(struct tg3 *tp)
1859 {
1860         int limit = 100;
1861
1862         while (limit--) {
1863                 u32 tmp32;
1864
1865                 if (!tg3_readphy(tp, MII_TG3_DSP_CONTROL, &tmp32)) {
1866                         if ((tmp32 & 0x1000) == 0)
1867                                 break;
1868                 }
1869         }
1870         if (limit < 0)
1871                 return -EBUSY;
1872
1873         return 0;
1874 }
1875
1876 static int tg3_phy_write_and_check_testpat(struct tg3 *tp, int *resetp)
1877 {
1878         static const u32 test_pat[4][6] = {
1879         { 0x00005555, 0x00000005, 0x00002aaa, 0x0000000a, 0x00003456, 0x00000003 },
1880         { 0x00002aaa, 0x0000000a, 0x00003333, 0x00000003, 0x0000789a, 0x00000005 },
1881         { 0x00005a5a, 0x00000005, 0x00002a6a, 0x0000000a, 0x00001bcd, 0x00000003 },
1882         { 0x00002a5a, 0x0000000a, 0x000033c3, 0x00000003, 0x00002ef1, 0x00000005 }
1883         };
1884         int chan;
1885
1886         for (chan = 0; chan < 4; chan++) {
1887                 int i;
1888
1889                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
1890                              (chan * 0x2000) | 0x0200);
1891                 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0002);
1892
1893                 for (i = 0; i < 6; i++)
1894                         tg3_writephy(tp, MII_TG3_DSP_RW_PORT,
1895                                      test_pat[chan][i]);
1896
1897                 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0202);
1898                 if (tg3_wait_macro_done(tp)) {
1899                         *resetp = 1;
1900                         return -EBUSY;
1901                 }
1902
1903                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
1904                              (chan * 0x2000) | 0x0200);
1905                 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0082);
1906                 if (tg3_wait_macro_done(tp)) {
1907                         *resetp = 1;
1908                         return -EBUSY;
1909                 }
1910
1911                 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0802);
1912                 if (tg3_wait_macro_done(tp)) {
1913                         *resetp = 1;
1914                         return -EBUSY;
1915                 }
1916
1917                 for (i = 0; i < 6; i += 2) {
1918                         u32 low, high;
1919
1920                         if (tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &low) ||
1921                             tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &high) ||
1922                             tg3_wait_macro_done(tp)) {
1923                                 *resetp = 1;
1924                                 return -EBUSY;
1925                         }
1926                         low &= 0x7fff;
1927                         high &= 0x000f;
1928                         if (low != test_pat[chan][i] ||
1929                             high != test_pat[chan][i+1]) {
1930                                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000b);
1931                                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x4001);
1932                                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x4005);
1933
1934                                 return -EBUSY;
1935                         }
1936                 }
1937         }
1938
1939         return 0;
1940 }
1941
1942 static int tg3_phy_reset_chanpat(struct tg3 *tp)
1943 {
1944         int chan;
1945
1946         for (chan = 0; chan < 4; chan++) {
1947                 int i;
1948
1949                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
1950                              (chan * 0x2000) | 0x0200);
1951                 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0002);
1952                 for (i = 0; i < 6; i++)
1953                         tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x000);
1954                 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0202);
1955                 if (tg3_wait_macro_done(tp))
1956                         return -EBUSY;
1957         }
1958
1959         return 0;
1960 }
1961
1962 static int tg3_phy_reset_5703_4_5(struct tg3 *tp)
1963 {
1964         u32 reg32, phy9_orig;
1965         int retries, do_phy_reset, err;
1966
1967         retries = 10;
1968         do_phy_reset = 1;
1969         do {
1970                 if (do_phy_reset) {
1971                         err = tg3_bmcr_reset(tp);
1972                         if (err)
1973                                 return err;
1974                         do_phy_reset = 0;
1975                 }
1976
1977                 /* Disable transmitter and interrupt.  */
1978                 if (tg3_readphy(tp, MII_TG3_EXT_CTRL, &reg32))
1979                         continue;
1980
1981                 reg32 |= 0x3000;
1982                 tg3_writephy(tp, MII_TG3_EXT_CTRL, reg32);
1983
1984                 /* Set full-duplex, 1000 mbps.  */
1985                 tg3_writephy(tp, MII_BMCR,
1986                              BMCR_FULLDPLX | BMCR_SPEED1000);
1987
1988                 /* Set to master mode.  */
1989                 if (tg3_readphy(tp, MII_CTRL1000, &phy9_orig))
1990                         continue;
1991
1992                 tg3_writephy(tp, MII_CTRL1000,
1993                              CTL1000_AS_MASTER | CTL1000_ENABLE_MASTER);
1994
1995                 err = TG3_PHY_AUXCTL_SMDSP_ENABLE(tp);
1996                 if (err)
1997                         return err;
1998
1999                 /* Block the PHY control access.  */
2000                 tg3_phydsp_write(tp, 0x8005, 0x0800);
2001
2002                 err = tg3_phy_write_and_check_testpat(tp, &do_phy_reset);
2003                 if (!err)
2004                         break;
2005         } while (--retries);
2006
2007         err = tg3_phy_reset_chanpat(tp);
2008         if (err)
2009                 return err;
2010
2011         tg3_phydsp_write(tp, 0x8005, 0x0000);
2012
2013         tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8200);
2014         tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0000);
2015
2016         TG3_PHY_AUXCTL_SMDSP_DISABLE(tp);
2017
2018         tg3_writephy(tp, MII_CTRL1000, phy9_orig);
2019
2020         if (!tg3_readphy(tp, MII_TG3_EXT_CTRL, &reg32)) {
2021                 reg32 &= ~0x3000;
2022                 tg3_writephy(tp, MII_TG3_EXT_CTRL, reg32);
2023         } else if (!err)
2024                 err = -EBUSY;
2025
2026         return err;
2027 }
2028
2029 /* This will reset the tigon3 PHY if there is no valid
2030  * link unless the FORCE argument is non-zero.
2031  */
2032 static int tg3_phy_reset(struct tg3 *tp)
2033 {
2034         u32 val, cpmuctrl;
2035         int err;
2036
2037         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
2038                 val = tr32(GRC_MISC_CFG);
2039                 tw32_f(GRC_MISC_CFG, val & ~GRC_MISC_CFG_EPHY_IDDQ);
2040                 udelay(40);
2041         }
2042         err  = tg3_readphy(tp, MII_BMSR, &val);
2043         err |= tg3_readphy(tp, MII_BMSR, &val);
2044         if (err != 0)
2045                 return -EBUSY;
2046
2047         if (netif_running(tp->dev) && netif_carrier_ok(tp->dev)) {
2048                 netif_carrier_off(tp->dev);
2049                 tg3_link_report(tp);
2050         }
2051
2052         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
2053             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
2054             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
2055                 err = tg3_phy_reset_5703_4_5(tp);
2056                 if (err)
2057                         return err;
2058                 goto out;
2059         }
2060
2061         cpmuctrl = 0;
2062         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 &&
2063             GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5784_AX) {
2064                 cpmuctrl = tr32(TG3_CPMU_CTRL);
2065                 if (cpmuctrl & CPMU_CTRL_GPHY_10MB_RXONLY)
2066                         tw32(TG3_CPMU_CTRL,
2067                              cpmuctrl & ~CPMU_CTRL_GPHY_10MB_RXONLY);
2068         }
2069
2070         err = tg3_bmcr_reset(tp);
2071         if (err)
2072                 return err;
2073
2074         if (cpmuctrl & CPMU_CTRL_GPHY_10MB_RXONLY) {
2075                 val = MII_TG3_DSP_EXP8_AEDW | MII_TG3_DSP_EXP8_REJ2MHz;
2076                 tg3_phydsp_write(tp, MII_TG3_DSP_EXP8, val);
2077
2078                 tw32(TG3_CPMU_CTRL, cpmuctrl);
2079         }
2080
2081         if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5784_AX ||
2082             GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5761_AX) {
2083                 val = tr32(TG3_CPMU_LSPD_1000MB_CLK);
2084                 if ((val & CPMU_LSPD_1000MB_MACCLK_MASK) ==
2085                     CPMU_LSPD_1000MB_MACCLK_12_5) {
2086                         val &= ~CPMU_LSPD_1000MB_MACCLK_MASK;
2087                         udelay(40);
2088                         tw32_f(TG3_CPMU_LSPD_1000MB_CLK, val);
2089                 }
2090         }
2091
2092         if (tg3_flag(tp, 5717_PLUS) &&
2093             (tp->phy_flags & TG3_PHYFLG_MII_SERDES))
2094                 return 0;
2095
2096         tg3_phy_apply_otp(tp);
2097
2098         if (tp->phy_flags & TG3_PHYFLG_ENABLE_APD)
2099                 tg3_phy_toggle_apd(tp, true);
2100         else
2101                 tg3_phy_toggle_apd(tp, false);
2102
2103 out:
2104         if ((tp->phy_flags & TG3_PHYFLG_ADC_BUG) &&
2105             !TG3_PHY_AUXCTL_SMDSP_ENABLE(tp)) {
2106                 tg3_phydsp_write(tp, 0x201f, 0x2aaa);
2107                 tg3_phydsp_write(tp, 0x000a, 0x0323);
2108                 TG3_PHY_AUXCTL_SMDSP_DISABLE(tp);
2109         }
2110
2111         if (tp->phy_flags & TG3_PHYFLG_5704_A0_BUG) {
2112                 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8d68);
2113                 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8d68);
2114         }
2115
2116         if (tp->phy_flags & TG3_PHYFLG_BER_BUG) {
2117                 if (!TG3_PHY_AUXCTL_SMDSP_ENABLE(tp)) {
2118                         tg3_phydsp_write(tp, 0x000a, 0x310b);
2119                         tg3_phydsp_write(tp, 0x201f, 0x9506);
2120                         tg3_phydsp_write(tp, 0x401f, 0x14e2);
2121                         TG3_PHY_AUXCTL_SMDSP_DISABLE(tp);
2122                 }
2123         } else if (tp->phy_flags & TG3_PHYFLG_JITTER_BUG) {
2124                 if (!TG3_PHY_AUXCTL_SMDSP_ENABLE(tp)) {
2125                         tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000a);
2126                         if (tp->phy_flags & TG3_PHYFLG_ADJUST_TRIM) {
2127                                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x110b);
2128                                 tg3_writephy(tp, MII_TG3_TEST1,
2129                                              MII_TG3_TEST1_TRIM_EN | 0x4);
2130                         } else
2131                                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x010b);
2132
2133                         TG3_PHY_AUXCTL_SMDSP_DISABLE(tp);
2134                 }
2135         }
2136
2137         /* Set Extended packet length bit (bit 14) on all chips that */
2138         /* support jumbo frames */
2139         if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) {
2140                 /* Cannot do read-modify-write on 5401 */
2141                 tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_AUXCTL, 0x4c20);
2142         } else if (tg3_flag(tp, JUMBO_CAPABLE)) {
2143                 /* Set bit 14 with read-modify-write to preserve other bits */
2144                 err = tg3_phy_auxctl_read(tp,
2145                                           MII_TG3_AUXCTL_SHDWSEL_AUXCTL, &val);
2146                 if (!err)
2147                         tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_AUXCTL,
2148                                            val | MII_TG3_AUXCTL_ACTL_EXTPKTLEN);
2149         }
2150
2151         /* Set phy register 0x10 bit 0 to high fifo elasticity to support
2152          * jumbo frames transmission.
2153          */
2154         if (tg3_flag(tp, JUMBO_CAPABLE)) {
2155                 if (!tg3_readphy(tp, MII_TG3_EXT_CTRL, &val))
2156                         tg3_writephy(tp, MII_TG3_EXT_CTRL,
2157                                      val | MII_TG3_EXT_CTRL_FIFO_ELASTIC);
2158         }
2159
2160         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
2161                 /* adjust output voltage */
2162                 tg3_writephy(tp, MII_TG3_FET_PTEST, 0x12);
2163         }
2164
2165         tg3_phy_toggle_automdix(tp, 1);
2166         tg3_phy_set_wirespeed(tp);
2167         return 0;
2168 }
2169
2170 static inline int tg3_pwrsrc_switch_to_vmain(struct tg3 *tp)
2171 {
2172         if (!tg3_flag(tp, IS_NIC))
2173                 return 0;
2174
2175         tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl,
2176                     TG3_GRC_LCLCTL_PWRSW_DELAY);
2177
2178         return 0;
2179 }
2180
2181 static void tg3_pwrsrc_die_with_vmain(struct tg3 *tp)
2182 {
2183         u32 grc_local_ctrl;
2184
2185         if (!tg3_flag(tp, IS_NIC) ||
2186             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
2187             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)
2188                 return;
2189
2190         grc_local_ctrl = tp->grc_local_ctrl | GRC_LCLCTRL_GPIO_OE1;
2191
2192         tw32_wait_f(GRC_LOCAL_CTRL,
2193                     grc_local_ctrl | GRC_LCLCTRL_GPIO_OUTPUT1,
2194                     TG3_GRC_LCLCTL_PWRSW_DELAY);
2195
2196         tw32_wait_f(GRC_LOCAL_CTRL,
2197                     grc_local_ctrl,
2198                     TG3_GRC_LCLCTL_PWRSW_DELAY);
2199
2200         tw32_wait_f(GRC_LOCAL_CTRL,
2201                     grc_local_ctrl | GRC_LCLCTRL_GPIO_OUTPUT1,
2202                     TG3_GRC_LCLCTL_PWRSW_DELAY);
2203 }
2204
2205 static void tg3_pwrsrc_switch_to_vaux(struct tg3 *tp)
2206 {
2207         if (!tg3_flag(tp, IS_NIC))
2208                 return;
2209
2210         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
2211             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
2212                 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
2213                             (GRC_LCLCTRL_GPIO_OE0 |
2214                              GRC_LCLCTRL_GPIO_OE1 |
2215                              GRC_LCLCTRL_GPIO_OE2 |
2216                              GRC_LCLCTRL_GPIO_OUTPUT0 |
2217                              GRC_LCLCTRL_GPIO_OUTPUT1),
2218                             TG3_GRC_LCLCTL_PWRSW_DELAY);
2219         } else if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761 ||
2220                    tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761S) {
2221                 /* The 5761 non-e device swaps GPIO 0 and GPIO 2. */
2222                 u32 grc_local_ctrl = GRC_LCLCTRL_GPIO_OE0 |
2223                                      GRC_LCLCTRL_GPIO_OE1 |
2224                                      GRC_LCLCTRL_GPIO_OE2 |
2225                                      GRC_LCLCTRL_GPIO_OUTPUT0 |
2226                                      GRC_LCLCTRL_GPIO_OUTPUT1 |
2227                                      tp->grc_local_ctrl;
2228                 tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl,
2229                             TG3_GRC_LCLCTL_PWRSW_DELAY);
2230
2231                 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OUTPUT2;
2232                 tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl,
2233                             TG3_GRC_LCLCTL_PWRSW_DELAY);
2234
2235                 grc_local_ctrl &= ~GRC_LCLCTRL_GPIO_OUTPUT0;
2236                 tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl,
2237                             TG3_GRC_LCLCTL_PWRSW_DELAY);
2238         } else {
2239                 u32 no_gpio2;
2240                 u32 grc_local_ctrl = 0;
2241
2242                 /* Workaround to prevent overdrawing Amps. */
2243                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) {
2244                         grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE3;
2245                         tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
2246                                     grc_local_ctrl,
2247                                     TG3_GRC_LCLCTL_PWRSW_DELAY);
2248                 }
2249
2250                 /* On 5753 and variants, GPIO2 cannot be used. */
2251                 no_gpio2 = tp->nic_sram_data_cfg &
2252                            NIC_SRAM_DATA_CFG_NO_GPIO2;
2253
2254                 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE0 |
2255                                   GRC_LCLCTRL_GPIO_OE1 |
2256                                   GRC_LCLCTRL_GPIO_OE2 |
2257                                   GRC_LCLCTRL_GPIO_OUTPUT1 |
2258                                   GRC_LCLCTRL_GPIO_OUTPUT2;
2259                 if (no_gpio2) {
2260                         grc_local_ctrl &= ~(GRC_LCLCTRL_GPIO_OE2 |
2261                                             GRC_LCLCTRL_GPIO_OUTPUT2);
2262                 }
2263                 tw32_wait_f(GRC_LOCAL_CTRL,
2264                             tp->grc_local_ctrl | grc_local_ctrl,
2265                             TG3_GRC_LCLCTL_PWRSW_DELAY);
2266
2267                 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OUTPUT0;
2268
2269                 tw32_wait_f(GRC_LOCAL_CTRL,
2270                             tp->grc_local_ctrl | grc_local_ctrl,
2271                             TG3_GRC_LCLCTL_PWRSW_DELAY);
2272
2273                 if (!no_gpio2) {
2274                         grc_local_ctrl &= ~GRC_LCLCTRL_GPIO_OUTPUT2;
2275                         tw32_wait_f(GRC_LOCAL_CTRL,
2276                                     tp->grc_local_ctrl | grc_local_ctrl,
2277                                     TG3_GRC_LCLCTL_PWRSW_DELAY);
2278                 }
2279         }
2280 }
2281
2282 static void tg3_frob_aux_power(struct tg3 *tp)
2283 {
2284         bool need_vaux = false;
2285
2286         /* The GPIOs do something completely different on 57765. */
2287         if (!tg3_flag(tp, IS_NIC) ||
2288             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
2289             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765)
2290                 return;
2291
2292         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
2293              GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714 ||
2294              GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
2295              GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) &&
2296             tp->pdev_peer != tp->pdev) {
2297                 struct net_device *dev_peer;
2298
2299                 dev_peer = pci_get_drvdata(tp->pdev_peer);
2300
2301                 /* remove_one() may have been run on the peer. */
2302                 if (dev_peer) {
2303                         struct tg3 *tp_peer = netdev_priv(dev_peer);
2304
2305                         if (tg3_flag(tp_peer, INIT_COMPLETE))
2306                                 return;
2307
2308                         if (tg3_flag(tp_peer, WOL_ENABLE) ||
2309                             tg3_flag(tp_peer, ENABLE_ASF))
2310                                 need_vaux = true;
2311                 }
2312         }
2313
2314         if (tg3_flag(tp, WOL_ENABLE) || tg3_flag(tp, ENABLE_ASF))
2315                 need_vaux = true;
2316
2317         if (need_vaux)
2318                 tg3_pwrsrc_switch_to_vaux(tp);
2319         else
2320                 tg3_pwrsrc_die_with_vmain(tp);
2321 }
2322
2323 static int tg3_5700_link_polarity(struct tg3 *tp, u32 speed)
2324 {
2325         if (tp->led_ctrl == LED_CTRL_MODE_PHY_2)
2326                 return 1;
2327         else if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5411) {
2328                 if (speed != SPEED_10)
2329                         return 1;
2330         } else if (speed == SPEED_10)
2331                 return 1;
2332
2333         return 0;
2334 }
2335
2336 static int tg3_setup_phy(struct tg3 *, int);
2337
2338 #define RESET_KIND_SHUTDOWN     0
2339 #define RESET_KIND_INIT         1
2340 #define RESET_KIND_SUSPEND      2
2341
2342 static void tg3_write_sig_post_reset(struct tg3 *, int);
2343 static int tg3_halt_cpu(struct tg3 *, u32);
2344
2345 static void tg3_power_down_phy(struct tg3 *tp, bool do_low_power)
2346 {
2347         u32 val;
2348
2349         if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
2350                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
2351                         u32 sg_dig_ctrl = tr32(SG_DIG_CTRL);
2352                         u32 serdes_cfg = tr32(MAC_SERDES_CFG);
2353
2354                         sg_dig_ctrl |=
2355                                 SG_DIG_USING_HW_AUTONEG | SG_DIG_SOFT_RESET;
2356                         tw32(SG_DIG_CTRL, sg_dig_ctrl);
2357                         tw32(MAC_SERDES_CFG, serdes_cfg | (1 << 15));
2358                 }
2359                 return;
2360         }
2361
2362         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
2363                 tg3_bmcr_reset(tp);
2364                 val = tr32(GRC_MISC_CFG);
2365                 tw32_f(GRC_MISC_CFG, val | GRC_MISC_CFG_EPHY_IDDQ);
2366                 udelay(40);
2367                 return;
2368         } else if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
2369                 u32 phytest;
2370                 if (!tg3_readphy(tp, MII_TG3_FET_TEST, &phytest)) {
2371                         u32 phy;
2372
2373                         tg3_writephy(tp, MII_ADVERTISE, 0);
2374                         tg3_writephy(tp, MII_BMCR,
2375                                      BMCR_ANENABLE | BMCR_ANRESTART);
2376
2377                         tg3_writephy(tp, MII_TG3_FET_TEST,
2378                                      phytest | MII_TG3_FET_SHADOW_EN);
2379                         if (!tg3_readphy(tp, MII_TG3_FET_SHDW_AUXMODE4, &phy)) {
2380                                 phy |= MII_TG3_FET_SHDW_AUXMODE4_SBPD;
2381                                 tg3_writephy(tp,
2382                                              MII_TG3_FET_SHDW_AUXMODE4,
2383                                              phy);
2384                         }
2385                         tg3_writephy(tp, MII_TG3_FET_TEST, phytest);
2386                 }
2387                 return;
2388         } else if (do_low_power) {
2389                 tg3_writephy(tp, MII_TG3_EXT_CTRL,
2390                              MII_TG3_EXT_CTRL_FORCE_LED_OFF);
2391
2392                 val = MII_TG3_AUXCTL_PCTL_100TX_LPWR |
2393                       MII_TG3_AUXCTL_PCTL_SPR_ISOLATE |
2394                       MII_TG3_AUXCTL_PCTL_VREG_11V;
2395                 tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_PWRCTL, val);
2396         }
2397
2398         /* The PHY should not be powered down on some chips because
2399          * of bugs.
2400          */
2401         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
2402             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
2403             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780 &&
2404              (tp->phy_flags & TG3_PHYFLG_MII_SERDES)))
2405                 return;
2406
2407         if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5784_AX ||
2408             GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5761_AX) {
2409                 val = tr32(TG3_CPMU_LSPD_1000MB_CLK);
2410                 val &= ~CPMU_LSPD_1000MB_MACCLK_MASK;
2411                 val |= CPMU_LSPD_1000MB_MACCLK_12_5;
2412                 tw32_f(TG3_CPMU_LSPD_1000MB_CLK, val);
2413         }
2414
2415         tg3_writephy(tp, MII_BMCR, BMCR_PDOWN);
2416 }
2417
2418 /* tp->lock is held. */
2419 static int tg3_nvram_lock(struct tg3 *tp)
2420 {
2421         if (tg3_flag(tp, NVRAM)) {
2422                 int i;
2423
2424                 if (tp->nvram_lock_cnt == 0) {
2425                         tw32(NVRAM_SWARB, SWARB_REQ_SET1);
2426                         for (i = 0; i < 8000; i++) {
2427                                 if (tr32(NVRAM_SWARB) & SWARB_GNT1)
2428                                         break;
2429                                 udelay(20);
2430                         }
2431                         if (i == 8000) {
2432                                 tw32(NVRAM_SWARB, SWARB_REQ_CLR1);
2433                                 return -ENODEV;
2434                         }
2435                 }
2436                 tp->nvram_lock_cnt++;
2437         }
2438         return 0;
2439 }
2440
2441 /* tp->lock is held. */
2442 static void tg3_nvram_unlock(struct tg3 *tp)
2443 {
2444         if (tg3_flag(tp, NVRAM)) {
2445                 if (tp->nvram_lock_cnt > 0)
2446                         tp->nvram_lock_cnt--;
2447                 if (tp->nvram_lock_cnt == 0)
2448                         tw32_f(NVRAM_SWARB, SWARB_REQ_CLR1);
2449         }
2450 }
2451
2452 /* tp->lock is held. */
2453 static void tg3_enable_nvram_access(struct tg3 *tp)
2454 {
2455         if (tg3_flag(tp, 5750_PLUS) && !tg3_flag(tp, PROTECTED_NVRAM)) {
2456                 u32 nvaccess = tr32(NVRAM_ACCESS);
2457
2458                 tw32(NVRAM_ACCESS, nvaccess | ACCESS_ENABLE);
2459         }
2460 }
2461
2462 /* tp->lock is held. */
2463 static void tg3_disable_nvram_access(struct tg3 *tp)
2464 {
2465         if (tg3_flag(tp, 5750_PLUS) && !tg3_flag(tp, PROTECTED_NVRAM)) {
2466                 u32 nvaccess = tr32(NVRAM_ACCESS);
2467
2468                 tw32(NVRAM_ACCESS, nvaccess & ~ACCESS_ENABLE);
2469         }
2470 }
2471
2472 static int tg3_nvram_read_using_eeprom(struct tg3 *tp,
2473                                         u32 offset, u32 *val)
2474 {
2475         u32 tmp;
2476         int i;
2477
2478         if (offset > EEPROM_ADDR_ADDR_MASK || (offset % 4) != 0)
2479                 return -EINVAL;
2480
2481         tmp = tr32(GRC_EEPROM_ADDR) & ~(EEPROM_ADDR_ADDR_MASK |
2482                                         EEPROM_ADDR_DEVID_MASK |
2483                                         EEPROM_ADDR_READ);
2484         tw32(GRC_EEPROM_ADDR,
2485              tmp |
2486              (0 << EEPROM_ADDR_DEVID_SHIFT) |
2487              ((offset << EEPROM_ADDR_ADDR_SHIFT) &
2488               EEPROM_ADDR_ADDR_MASK) |
2489              EEPROM_ADDR_READ | EEPROM_ADDR_START);
2490
2491         for (i = 0; i < 1000; i++) {
2492                 tmp = tr32(GRC_EEPROM_ADDR);
2493
2494                 if (tmp & EEPROM_ADDR_COMPLETE)
2495                         break;
2496                 msleep(1);
2497         }
2498         if (!(tmp & EEPROM_ADDR_COMPLETE))
2499                 return -EBUSY;
2500
2501         tmp = tr32(GRC_EEPROM_DATA);
2502
2503         /*
2504          * The data will always be opposite the native endian
2505          * format.  Perform a blind byteswap to compensate.
2506          */
2507         *val = swab32(tmp);
2508
2509         return 0;
2510 }
2511
2512 #define NVRAM_CMD_TIMEOUT 10000
2513
2514 static int tg3_nvram_exec_cmd(struct tg3 *tp, u32 nvram_cmd)
2515 {
2516         int i;
2517
2518         tw32(NVRAM_CMD, nvram_cmd);
2519         for (i = 0; i < NVRAM_CMD_TIMEOUT; i++) {
2520                 udelay(10);
2521                 if (tr32(NVRAM_CMD) & NVRAM_CMD_DONE) {
2522                         udelay(10);
2523                         break;
2524                 }
2525         }
2526
2527         if (i == NVRAM_CMD_TIMEOUT)
2528                 return -EBUSY;
2529
2530         return 0;
2531 }
2532
2533 static u32 tg3_nvram_phys_addr(struct tg3 *tp, u32 addr)
2534 {
2535         if (tg3_flag(tp, NVRAM) &&
2536             tg3_flag(tp, NVRAM_BUFFERED) &&
2537             tg3_flag(tp, FLASH) &&
2538             !tg3_flag(tp, NO_NVRAM_ADDR_TRANS) &&
2539             (tp->nvram_jedecnum == JEDEC_ATMEL))
2540
2541                 addr = ((addr / tp->nvram_pagesize) <<
2542                         ATMEL_AT45DB0X1B_PAGE_POS) +
2543                        (addr % tp->nvram_pagesize);
2544
2545         return addr;
2546 }
2547
2548 static u32 tg3_nvram_logical_addr(struct tg3 *tp, u32 addr)
2549 {
2550         if (tg3_flag(tp, NVRAM) &&
2551             tg3_flag(tp, NVRAM_BUFFERED) &&
2552             tg3_flag(tp, FLASH) &&
2553             !tg3_flag(tp, NO_NVRAM_ADDR_TRANS) &&
2554             (tp->nvram_jedecnum == JEDEC_ATMEL))
2555
2556                 addr = ((addr >> ATMEL_AT45DB0X1B_PAGE_POS) *
2557                         tp->nvram_pagesize) +
2558                        (addr & ((1 << ATMEL_AT45DB0X1B_PAGE_POS) - 1));
2559
2560         return addr;
2561 }
2562
2563 /* NOTE: Data read in from NVRAM is byteswapped according to
2564  * the byteswapping settings for all other register accesses.
2565  * tg3 devices are BE devices, so on a BE machine, the data
2566  * returned will be exactly as it is seen in NVRAM.  On a LE
2567  * machine, the 32-bit value will be byteswapped.
2568  */
2569 static int tg3_nvram_read(struct tg3 *tp, u32 offset, u32 *val)
2570 {
2571         int ret;
2572
2573         if (!tg3_flag(tp, NVRAM))
2574                 return tg3_nvram_read_using_eeprom(tp, offset, val);
2575
2576         offset = tg3_nvram_phys_addr(tp, offset);
2577
2578         if (offset > NVRAM_ADDR_MSK)
2579                 return -EINVAL;
2580
2581         ret = tg3_nvram_lock(tp);
2582         if (ret)
2583                 return ret;
2584
2585         tg3_enable_nvram_access(tp);
2586
2587         tw32(NVRAM_ADDR, offset);
2588         ret = tg3_nvram_exec_cmd(tp, NVRAM_CMD_RD | NVRAM_CMD_GO |
2589                 NVRAM_CMD_FIRST | NVRAM_CMD_LAST | NVRAM_CMD_DONE);
2590
2591         if (ret == 0)
2592                 *val = tr32(NVRAM_RDDATA);
2593
2594         tg3_disable_nvram_access(tp);
2595
2596         tg3_nvram_unlock(tp);
2597
2598         return ret;
2599 }
2600
2601 /* Ensures NVRAM data is in bytestream format. */
2602 static int tg3_nvram_read_be32(struct tg3 *tp, u32 offset, __be32 *val)
2603 {
2604         u32 v;
2605         int res = tg3_nvram_read(tp, offset, &v);
2606         if (!res)
2607                 *val = cpu_to_be32(v);
2608         return res;
2609 }
2610
2611 /* tp->lock is held. */
2612 static void __tg3_set_mac_addr(struct tg3 *tp, int skip_mac_1)
2613 {
2614         u32 addr_high, addr_low;
2615         int i;
2616
2617         addr_high = ((tp->dev->dev_addr[0] << 8) |
2618                      tp->dev->dev_addr[1]);
2619         addr_low = ((tp->dev->dev_addr[2] << 24) |
2620                     (tp->dev->dev_addr[3] << 16) |
2621                     (tp->dev->dev_addr[4] <<  8) |
2622                     (tp->dev->dev_addr[5] <<  0));
2623         for (i = 0; i < 4; i++) {
2624                 if (i == 1 && skip_mac_1)
2625                         continue;
2626                 tw32(MAC_ADDR_0_HIGH + (i * 8), addr_high);
2627                 tw32(MAC_ADDR_0_LOW + (i * 8), addr_low);
2628         }
2629
2630         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
2631             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
2632                 for (i = 0; i < 12; i++) {
2633                         tw32(MAC_EXTADDR_0_HIGH + (i * 8), addr_high);
2634                         tw32(MAC_EXTADDR_0_LOW + (i * 8), addr_low);
2635                 }
2636         }
2637
2638         addr_high = (tp->dev->dev_addr[0] +
2639                      tp->dev->dev_addr[1] +
2640                      tp->dev->dev_addr[2] +
2641                      tp->dev->dev_addr[3] +
2642                      tp->dev->dev_addr[4] +
2643                      tp->dev->dev_addr[5]) &
2644                 TX_BACKOFF_SEED_MASK;
2645         tw32(MAC_TX_BACKOFF_SEED, addr_high);
2646 }
2647
2648 static void tg3_enable_register_access(struct tg3 *tp)
2649 {
2650         /*
2651          * Make sure register accesses (indirect or otherwise) will function
2652          * correctly.
2653          */
2654         pci_write_config_dword(tp->pdev,
2655                                TG3PCI_MISC_HOST_CTRL, tp->misc_host_ctrl);
2656 }
2657
2658 static int tg3_power_up(struct tg3 *tp)
2659 {
2660         tg3_enable_register_access(tp);
2661
2662         pci_set_power_state(tp->pdev, PCI_D0);
2663
2664         /* Switch out of Vaux if it is a NIC */
2665         tg3_pwrsrc_switch_to_vmain(tp);
2666
2667         return 0;
2668 }
2669
2670 static int tg3_power_down_prepare(struct tg3 *tp)
2671 {
2672         u32 misc_host_ctrl;
2673         bool device_should_wake, do_low_power;
2674
2675         tg3_enable_register_access(tp);
2676
2677         /* Restore the CLKREQ setting. */
2678         if (tg3_flag(tp, CLKREQ_BUG)) {
2679                 u16 lnkctl;
2680
2681                 pci_read_config_word(tp->pdev,
2682                                      tp->pcie_cap + PCI_EXP_LNKCTL,
2683                                      &lnkctl);
2684                 lnkctl |= PCI_EXP_LNKCTL_CLKREQ_EN;
2685                 pci_write_config_word(tp->pdev,
2686                                       tp->pcie_cap + PCI_EXP_LNKCTL,
2687                                       lnkctl);
2688         }
2689
2690         misc_host_ctrl = tr32(TG3PCI_MISC_HOST_CTRL);
2691         tw32(TG3PCI_MISC_HOST_CTRL,
2692              misc_host_ctrl | MISC_HOST_CTRL_MASK_PCI_INT);
2693
2694         device_should_wake = device_may_wakeup(&tp->pdev->dev) &&
2695                              tg3_flag(tp, WOL_ENABLE);
2696
2697         if (tg3_flag(tp, USE_PHYLIB)) {
2698                 do_low_power = false;
2699                 if ((tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) &&
2700                     !(tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)) {
2701                         struct phy_device *phydev;
2702                         u32 phyid, advertising;
2703
2704                         phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
2705
2706                         tp->phy_flags |= TG3_PHYFLG_IS_LOW_POWER;
2707
2708                         tp->link_config.orig_speed = phydev->speed;
2709                         tp->link_config.orig_duplex = phydev->duplex;
2710                         tp->link_config.orig_autoneg = phydev->autoneg;
2711                         tp->link_config.orig_advertising = phydev->advertising;
2712
2713                         advertising = ADVERTISED_TP |
2714                                       ADVERTISED_Pause |
2715                                       ADVERTISED_Autoneg |
2716                                       ADVERTISED_10baseT_Half;
2717
2718                         if (tg3_flag(tp, ENABLE_ASF) || device_should_wake) {
2719                                 if (tg3_flag(tp, WOL_SPEED_100MB))
2720                                         advertising |=
2721                                                 ADVERTISED_100baseT_Half |
2722                                                 ADVERTISED_100baseT_Full |
2723                                                 ADVERTISED_10baseT_Full;
2724                                 else
2725                                         advertising |= ADVERTISED_10baseT_Full;
2726                         }
2727
2728                         phydev->advertising = advertising;
2729
2730                         phy_start_aneg(phydev);
2731
2732                         phyid = phydev->drv->phy_id & phydev->drv->phy_id_mask;
2733                         if (phyid != PHY_ID_BCMAC131) {
2734                                 phyid &= PHY_BCM_OUI_MASK;
2735                                 if (phyid == PHY_BCM_OUI_1 ||
2736                                     phyid == PHY_BCM_OUI_2 ||
2737                                     phyid == PHY_BCM_OUI_3)
2738                                         do_low_power = true;
2739                         }
2740                 }
2741         } else {
2742                 do_low_power = true;
2743
2744                 if (!(tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)) {
2745                         tp->phy_flags |= TG3_PHYFLG_IS_LOW_POWER;
2746                         tp->link_config.orig_speed = tp->link_config.speed;
2747                         tp->link_config.orig_duplex = tp->link_config.duplex;
2748                         tp->link_config.orig_autoneg = tp->link_config.autoneg;
2749                 }
2750
2751                 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES)) {
2752                         tp->link_config.speed = SPEED_10;
2753                         tp->link_config.duplex = DUPLEX_HALF;
2754                         tp->link_config.autoneg = AUTONEG_ENABLE;
2755                         tg3_setup_phy(tp, 0);
2756                 }
2757         }
2758
2759         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
2760                 u32 val;
2761
2762                 val = tr32(GRC_VCPU_EXT_CTRL);
2763                 tw32(GRC_VCPU_EXT_CTRL, val | GRC_VCPU_EXT_CTRL_DISABLE_WOL);
2764         } else if (!tg3_flag(tp, ENABLE_ASF)) {
2765                 int i;
2766                 u32 val;
2767
2768                 for (i = 0; i < 200; i++) {
2769                         tg3_read_mem(tp, NIC_SRAM_FW_ASF_STATUS_MBOX, &val);
2770                         if (val == ~NIC_SRAM_FIRMWARE_MBOX_MAGIC1)
2771                                 break;
2772                         msleep(1);
2773                 }
2774         }
2775         if (tg3_flag(tp, WOL_CAP))
2776                 tg3_write_mem(tp, NIC_SRAM_WOL_MBOX, WOL_SIGNATURE |
2777                                                      WOL_DRV_STATE_SHUTDOWN |
2778                                                      WOL_DRV_WOL |
2779                                                      WOL_SET_MAGIC_PKT);
2780
2781         if (device_should_wake) {
2782                 u32 mac_mode;
2783
2784                 if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES)) {
2785                         if (do_low_power &&
2786                             !(tp->phy_flags & TG3_PHYFLG_IS_FET)) {
2787                                 tg3_phy_auxctl_write(tp,
2788                                                MII_TG3_AUXCTL_SHDWSEL_PWRCTL,
2789                                                MII_TG3_AUXCTL_PCTL_WOL_EN |
2790                                                MII_TG3_AUXCTL_PCTL_100TX_LPWR |
2791                                                MII_TG3_AUXCTL_PCTL_CL_AB_TXDAC);
2792                                 udelay(40);
2793                         }
2794
2795                         if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
2796                                 mac_mode = MAC_MODE_PORT_MODE_GMII;
2797                         else
2798                                 mac_mode = MAC_MODE_PORT_MODE_MII;
2799
2800                         mac_mode |= tp->mac_mode & MAC_MODE_LINK_POLARITY;
2801                         if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
2802                             ASIC_REV_5700) {
2803                                 u32 speed = tg3_flag(tp, WOL_SPEED_100MB) ?
2804                                              SPEED_100 : SPEED_10;
2805                                 if (tg3_5700_link_polarity(tp, speed))
2806                                         mac_mode |= MAC_MODE_LINK_POLARITY;
2807                                 else
2808                                         mac_mode &= ~MAC_MODE_LINK_POLARITY;
2809                         }
2810                 } else {
2811                         mac_mode = MAC_MODE_PORT_MODE_TBI;
2812                 }
2813
2814                 if (!tg3_flag(tp, 5750_PLUS))
2815                         tw32(MAC_LED_CTRL, tp->led_ctrl);
2816
2817                 mac_mode |= MAC_MODE_MAGIC_PKT_ENABLE;
2818                 if ((tg3_flag(tp, 5705_PLUS) && !tg3_flag(tp, 5780_CLASS)) &&
2819                     (tg3_flag(tp, ENABLE_ASF) || tg3_flag(tp, ENABLE_APE)))
2820                         mac_mode |= MAC_MODE_KEEP_FRAME_IN_WOL;
2821
2822                 if (tg3_flag(tp, ENABLE_APE))
2823                         mac_mode |= MAC_MODE_APE_TX_EN |
2824                                     MAC_MODE_APE_RX_EN |
2825                                     MAC_MODE_TDE_ENABLE;
2826
2827                 tw32_f(MAC_MODE, mac_mode);
2828                 udelay(100);
2829
2830                 tw32_f(MAC_RX_MODE, RX_MODE_ENABLE);
2831                 udelay(10);
2832         }
2833
2834         if (!tg3_flag(tp, WOL_SPEED_100MB) &&
2835             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
2836              GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)) {
2837                 u32 base_val;
2838
2839                 base_val = tp->pci_clock_ctrl;
2840                 base_val |= (CLOCK_CTRL_RXCLK_DISABLE |
2841                              CLOCK_CTRL_TXCLK_DISABLE);
2842
2843                 tw32_wait_f(TG3PCI_CLOCK_CTRL, base_val | CLOCK_CTRL_ALTCLK |
2844                             CLOCK_CTRL_PWRDOWN_PLL133, 40);
2845         } else if (tg3_flag(tp, 5780_CLASS) ||
2846                    tg3_flag(tp, CPMU_PRESENT) ||
2847                    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
2848                 /* do nothing */
2849         } else if (!(tg3_flag(tp, 5750_PLUS) && tg3_flag(tp, ENABLE_ASF))) {
2850                 u32 newbits1, newbits2;
2851
2852                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
2853                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
2854                         newbits1 = (CLOCK_CTRL_RXCLK_DISABLE |
2855                                     CLOCK_CTRL_TXCLK_DISABLE |
2856                                     CLOCK_CTRL_ALTCLK);
2857                         newbits2 = newbits1 | CLOCK_CTRL_44MHZ_CORE;
2858                 } else if (tg3_flag(tp, 5705_PLUS)) {
2859                         newbits1 = CLOCK_CTRL_625_CORE;
2860                         newbits2 = newbits1 | CLOCK_CTRL_ALTCLK;
2861                 } else {
2862                         newbits1 = CLOCK_CTRL_ALTCLK;
2863                         newbits2 = newbits1 | CLOCK_CTRL_44MHZ_CORE;
2864                 }
2865
2866                 tw32_wait_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl | newbits1,
2867                             40);
2868
2869                 tw32_wait_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl | newbits2,
2870                             40);
2871
2872                 if (!tg3_flag(tp, 5705_PLUS)) {
2873                         u32 newbits3;
2874
2875                         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
2876                             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
2877                                 newbits3 = (CLOCK_CTRL_RXCLK_DISABLE |
2878                                             CLOCK_CTRL_TXCLK_DISABLE |
2879                                             CLOCK_CTRL_44MHZ_CORE);
2880                         } else {
2881                                 newbits3 = CLOCK_CTRL_44MHZ_CORE;
2882                         }
2883
2884                         tw32_wait_f(TG3PCI_CLOCK_CTRL,
2885                                     tp->pci_clock_ctrl | newbits3, 40);
2886                 }
2887         }
2888
2889         if (!(device_should_wake) && !tg3_flag(tp, ENABLE_ASF))
2890                 tg3_power_down_phy(tp, do_low_power);
2891
2892         tg3_frob_aux_power(tp);
2893
2894         /* Workaround for unstable PLL clock */
2895         if ((GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_AX) ||
2896             (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_BX)) {
2897                 u32 val = tr32(0x7d00);
2898
2899                 val &= ~((1 << 16) | (1 << 4) | (1 << 2) | (1 << 1) | 1);
2900                 tw32(0x7d00, val);
2901                 if (!tg3_flag(tp, ENABLE_ASF)) {
2902                         int err;
2903
2904                         err = tg3_nvram_lock(tp);
2905                         tg3_halt_cpu(tp, RX_CPU_BASE);
2906                         if (!err)
2907                                 tg3_nvram_unlock(tp);
2908                 }
2909         }
2910
2911         tg3_write_sig_post_reset(tp, RESET_KIND_SHUTDOWN);
2912
2913         return 0;
2914 }
2915
2916 static void tg3_power_down(struct tg3 *tp)
2917 {
2918         tg3_power_down_prepare(tp);
2919
2920         pci_wake_from_d3(tp->pdev, tg3_flag(tp, WOL_ENABLE));
2921         pci_set_power_state(tp->pdev, PCI_D3hot);
2922 }
2923
2924 static void tg3_aux_stat_to_speed_duplex(struct tg3 *tp, u32 val, u16 *speed, u8 *duplex)
2925 {
2926         switch (val & MII_TG3_AUX_STAT_SPDMASK) {
2927         case MII_TG3_AUX_STAT_10HALF:
2928                 *speed = SPEED_10;
2929                 *duplex = DUPLEX_HALF;
2930                 break;
2931
2932         case MII_TG3_AUX_STAT_10FULL:
2933                 *speed = SPEED_10;
2934                 *duplex = DUPLEX_FULL;
2935                 break;
2936
2937         case MII_TG3_AUX_STAT_100HALF:
2938                 *speed = SPEED_100;
2939                 *duplex = DUPLEX_HALF;
2940                 break;
2941
2942         case MII_TG3_AUX_STAT_100FULL:
2943                 *speed = SPEED_100;
2944                 *duplex = DUPLEX_FULL;
2945                 break;
2946
2947         case MII_TG3_AUX_STAT_1000HALF:
2948                 *speed = SPEED_1000;
2949                 *duplex = DUPLEX_HALF;
2950                 break;
2951
2952         case MII_TG3_AUX_STAT_1000FULL:
2953                 *speed = SPEED_1000;
2954                 *duplex = DUPLEX_FULL;
2955                 break;
2956
2957         default:
2958                 if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
2959                         *speed = (val & MII_TG3_AUX_STAT_100) ? SPEED_100 :
2960                                  SPEED_10;
2961                         *duplex = (val & MII_TG3_AUX_STAT_FULL) ? DUPLEX_FULL :
2962                                   DUPLEX_HALF;
2963                         break;
2964                 }
2965                 *speed = SPEED_INVALID;
2966                 *duplex = DUPLEX_INVALID;
2967                 break;
2968         }
2969 }
2970
2971 static int tg3_phy_autoneg_cfg(struct tg3 *tp, u32 advertise, u32 flowctrl)
2972 {
2973         int err = 0;
2974         u32 val, new_adv;
2975
2976         new_adv = ADVERTISE_CSMA;
2977         if (advertise & ADVERTISED_10baseT_Half)
2978                 new_adv |= ADVERTISE_10HALF;
2979         if (advertise & ADVERTISED_10baseT_Full)
2980                 new_adv |= ADVERTISE_10FULL;
2981         if (advertise & ADVERTISED_100baseT_Half)
2982                 new_adv |= ADVERTISE_100HALF;
2983         if (advertise & ADVERTISED_100baseT_Full)
2984                 new_adv |= ADVERTISE_100FULL;
2985
2986         new_adv |= tg3_advert_flowctrl_1000T(flowctrl);
2987
2988         err = tg3_writephy(tp, MII_ADVERTISE, new_adv);
2989         if (err)
2990                 goto done;
2991
2992         if (tp->phy_flags & TG3_PHYFLG_10_100_ONLY)
2993                 goto done;
2994
2995         new_adv = 0;
2996         if (advertise & ADVERTISED_1000baseT_Half)
2997                 new_adv |= ADVERTISE_1000HALF;
2998         if (advertise & ADVERTISED_1000baseT_Full)
2999                 new_adv |= ADVERTISE_1000FULL;
3000
3001         if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
3002             tp->pci_chip_rev_id == CHIPREV_ID_5701_B0)
3003                 new_adv |= CTL1000_AS_MASTER | CTL1000_ENABLE_MASTER;
3004
3005         err = tg3_writephy(tp, MII_CTRL1000, new_adv);
3006         if (err)
3007                 goto done;
3008
3009         if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP))
3010                 goto done;
3011
3012         tw32(TG3_CPMU_EEE_MODE,
3013              tr32(TG3_CPMU_EEE_MODE) & ~TG3_CPMU_EEEMD_LPI_ENABLE);
3014
3015         err = TG3_PHY_AUXCTL_SMDSP_ENABLE(tp);
3016         if (!err) {
3017                 u32 err2;
3018
3019                 switch (GET_ASIC_REV(tp->pci_chip_rev_id)) {
3020                 case ASIC_REV_5717:
3021                 case ASIC_REV_57765:
3022                         if (!tg3_phydsp_read(tp, MII_TG3_DSP_CH34TP2, &val))
3023                                 tg3_phydsp_write(tp, MII_TG3_DSP_CH34TP2, val |
3024                                                  MII_TG3_DSP_CH34TP2_HIBW01);
3025                         /* Fall through */
3026                 case ASIC_REV_5719:
3027                         val = MII_TG3_DSP_TAP26_ALNOKO |
3028                               MII_TG3_DSP_TAP26_RMRXSTO |
3029                               MII_TG3_DSP_TAP26_OPCSINPT;
3030                         tg3_phydsp_write(tp, MII_TG3_DSP_TAP26, val);
3031                 }
3032
3033                 val = 0;
3034                 /* Advertise 100-BaseTX EEE ability */
3035                 if (advertise & ADVERTISED_100baseT_Full)
3036                         val |= MDIO_AN_EEE_ADV_100TX;
3037                 /* Advertise 1000-BaseT EEE ability */
3038                 if (advertise & ADVERTISED_1000baseT_Full)
3039                         val |= MDIO_AN_EEE_ADV_1000T;
3040                 err = tg3_phy_cl45_write(tp, MDIO_MMD_AN, MDIO_AN_EEE_ADV, val);
3041
3042                 err2 = TG3_PHY_AUXCTL_SMDSP_DISABLE(tp);
3043                 if (!err)
3044                         err = err2;
3045         }
3046
3047 done:
3048         return err;
3049 }
3050
3051 static void tg3_phy_copper_begin(struct tg3 *tp)
3052 {
3053         u32 new_adv;
3054         int i;
3055
3056         if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) {
3057                 new_adv = ADVERTISED_10baseT_Half |
3058                           ADVERTISED_10baseT_Full;
3059                 if (tg3_flag(tp, WOL_SPEED_100MB))
3060                         new_adv |= ADVERTISED_100baseT_Half |
3061                                    ADVERTISED_100baseT_Full;
3062
3063                 tg3_phy_autoneg_cfg(tp, new_adv,
3064                                     FLOW_CTRL_TX | FLOW_CTRL_RX);
3065         } else if (tp->link_config.speed == SPEED_INVALID) {
3066                 if (tp->phy_flags & TG3_PHYFLG_10_100_ONLY)
3067                         tp->link_config.advertising &=
3068                                 ~(ADVERTISED_1000baseT_Half |
3069                                   ADVERTISED_1000baseT_Full);
3070
3071                 tg3_phy_autoneg_cfg(tp, tp->link_config.advertising,
3072                                     tp->link_config.flowctrl);
3073         } else {
3074                 /* Asking for a specific link mode. */
3075                 if (tp->link_config.speed == SPEED_1000) {
3076                         if (tp->link_config.duplex == DUPLEX_FULL)
3077                                 new_adv = ADVERTISED_1000baseT_Full;
3078                         else
3079                                 new_adv = ADVERTISED_1000baseT_Half;
3080                 } else if (tp->link_config.speed == SPEED_100) {
3081                         if (tp->link_config.duplex == DUPLEX_FULL)
3082                                 new_adv = ADVERTISED_100baseT_Full;
3083                         else
3084                                 new_adv = ADVERTISED_100baseT_Half;
3085                 } else {
3086                         if (tp->link_config.duplex == DUPLEX_FULL)
3087                                 new_adv = ADVERTISED_10baseT_Full;
3088                         else
3089                                 new_adv = ADVERTISED_10baseT_Half;
3090                 }
3091
3092                 tg3_phy_autoneg_cfg(tp, new_adv,
3093                                     tp->link_config.flowctrl);
3094         }
3095
3096         if (tp->link_config.autoneg == AUTONEG_DISABLE &&
3097             tp->link_config.speed != SPEED_INVALID) {
3098                 u32 bmcr, orig_bmcr;
3099
3100                 tp->link_config.active_speed = tp->link_config.speed;
3101                 tp->link_config.active_duplex = tp->link_config.duplex;
3102
3103                 bmcr = 0;
3104                 switch (tp->link_config.speed) {
3105                 default:
3106                 case SPEED_10:
3107                         break;
3108
3109                 case SPEED_100:
3110                         bmcr |= BMCR_SPEED100;
3111                         break;
3112
3113                 case SPEED_1000:
3114                         bmcr |= BMCR_SPEED1000;
3115                         break;
3116                 }
3117
3118                 if (tp->link_config.duplex == DUPLEX_FULL)
3119                         bmcr |= BMCR_FULLDPLX;
3120
3121                 if (!tg3_readphy(tp, MII_BMCR, &orig_bmcr) &&
3122                     (bmcr != orig_bmcr)) {
3123                         tg3_writephy(tp, MII_BMCR, BMCR_LOOPBACK);
3124                         for (i = 0; i < 1500; i++) {
3125                                 u32 tmp;
3126
3127                                 udelay(10);
3128                                 if (tg3_readphy(tp, MII_BMSR, &tmp) ||
3129                                     tg3_readphy(tp, MII_BMSR, &tmp))
3130                                         continue;
3131                                 if (!(tmp & BMSR_LSTATUS)) {
3132                                         udelay(40);
3133                                         break;
3134                                 }
3135                         }
3136                         tg3_writephy(tp, MII_BMCR, bmcr);
3137                         udelay(40);
3138                 }
3139         } else {
3140                 tg3_writephy(tp, MII_BMCR,
3141                              BMCR_ANENABLE | BMCR_ANRESTART);
3142         }
3143 }
3144
3145 static int tg3_init_5401phy_dsp(struct tg3 *tp)
3146 {
3147         int err;
3148
3149         /* Turn off tap power management. */
3150         /* Set Extended packet length bit */
3151         err = tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_AUXCTL, 0x4c20);
3152
3153         err |= tg3_phydsp_write(tp, 0x0012, 0x1804);
3154         err |= tg3_phydsp_write(tp, 0x0013, 0x1204);
3155         err |= tg3_phydsp_write(tp, 0x8006, 0x0132);
3156         err |= tg3_phydsp_write(tp, 0x8006, 0x0232);
3157         err |= tg3_phydsp_write(tp, 0x201f, 0x0a20);
3158
3159         udelay(40);
3160
3161         return err;
3162 }
3163
3164 static int tg3_copper_is_advertising_all(struct tg3 *tp, u32 mask)
3165 {
3166         u32 adv_reg, all_mask = 0;
3167
3168         if (mask & ADVERTISED_10baseT_Half)
3169                 all_mask |= ADVERTISE_10HALF;
3170         if (mask & ADVERTISED_10baseT_Full)
3171                 all_mask |= ADVERTISE_10FULL;
3172         if (mask & ADVERTISED_100baseT_Half)
3173                 all_mask |= ADVERTISE_100HALF;
3174         if (mask & ADVERTISED_100baseT_Full)
3175                 all_mask |= ADVERTISE_100FULL;
3176
3177         if (tg3_readphy(tp, MII_ADVERTISE, &adv_reg))
3178                 return 0;
3179
3180         if ((adv_reg & all_mask) != all_mask)
3181                 return 0;
3182         if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
3183                 u32 tg3_ctrl;
3184
3185                 all_mask = 0;
3186                 if (mask & ADVERTISED_1000baseT_Half)
3187                         all_mask |= ADVERTISE_1000HALF;
3188                 if (mask & ADVERTISED_1000baseT_Full)
3189                         all_mask |= ADVERTISE_1000FULL;
3190
3191                 if (tg3_readphy(tp, MII_CTRL1000, &tg3_ctrl))
3192                         return 0;
3193
3194                 if ((tg3_ctrl & all_mask) != all_mask)
3195                         return 0;
3196         }
3197         return 1;
3198 }
3199
3200 static int tg3_adv_1000T_flowctrl_ok(struct tg3 *tp, u32 *lcladv, u32 *rmtadv)
3201 {
3202         u32 curadv, reqadv;
3203
3204         if (tg3_readphy(tp, MII_ADVERTISE, lcladv))
3205                 return 1;
3206
3207         curadv = *lcladv & (ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM);
3208         reqadv = tg3_advert_flowctrl_1000T(tp->link_config.flowctrl);
3209
3210         if (tp->link_config.active_duplex == DUPLEX_FULL) {
3211                 if (curadv != reqadv)
3212                         return 0;
3213
3214                 if (tg3_flag(tp, PAUSE_AUTONEG))
3215                         tg3_readphy(tp, MII_LPA, rmtadv);
3216         } else {
3217                 /* Reprogram the advertisement register, even if it
3218                  * does not affect the current link.  If the link
3219                  * gets renegotiated in the future, we can save an
3220                  * additional renegotiation cycle by advertising
3221                  * it correctly in the first place.
3222                  */
3223                 if (curadv != reqadv) {
3224                         *lcladv &= ~(ADVERTISE_PAUSE_CAP |
3225                                      ADVERTISE_PAUSE_ASYM);
3226                         tg3_writephy(tp, MII_ADVERTISE, *lcladv | reqadv);
3227                 }
3228         }
3229
3230         return 1;
3231 }
3232
3233 static int tg3_setup_copper_phy(struct tg3 *tp, int force_reset)
3234 {
3235         int current_link_up;
3236         u32 bmsr, val;
3237         u32 lcl_adv, rmt_adv;
3238         u16 current_speed;
3239         u8 current_duplex;
3240         int i, err;
3241
3242         tw32(MAC_EVENT, 0);
3243
3244         tw32_f(MAC_STATUS,
3245              (MAC_STATUS_SYNC_CHANGED |
3246               MAC_STATUS_CFG_CHANGED |
3247               MAC_STATUS_MI_COMPLETION |
3248               MAC_STATUS_LNKSTATE_CHANGED));
3249         udelay(40);
3250
3251         if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
3252                 tw32_f(MAC_MI_MODE,
3253                      (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
3254                 udelay(80);
3255         }
3256
3257         tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_PWRCTL, 0);
3258
3259         /* Some third-party PHYs need to be reset on link going
3260          * down.
3261          */
3262         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
3263              GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
3264              GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) &&
3265             netif_carrier_ok(tp->dev)) {
3266                 tg3_readphy(tp, MII_BMSR, &bmsr);
3267                 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
3268                     !(bmsr & BMSR_LSTATUS))
3269                         force_reset = 1;
3270         }
3271         if (force_reset)
3272                 tg3_phy_reset(tp);
3273
3274         if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) {
3275                 tg3_readphy(tp, MII_BMSR, &bmsr);
3276                 if (tg3_readphy(tp, MII_BMSR, &bmsr) ||
3277                     !tg3_flag(tp, INIT_COMPLETE))
3278                         bmsr = 0;
3279
3280                 if (!(bmsr & BMSR_LSTATUS)) {
3281                         err = tg3_init_5401phy_dsp(tp);
3282                         if (err)
3283                                 return err;
3284
3285                         tg3_readphy(tp, MII_BMSR, &bmsr);
3286                         for (i = 0; i < 1000; i++) {
3287                                 udelay(10);
3288                                 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
3289                                     (bmsr & BMSR_LSTATUS)) {
3290                                         udelay(40);
3291                                         break;
3292                                 }
3293                         }
3294
3295                         if ((tp->phy_id & TG3_PHY_ID_REV_MASK) ==
3296                             TG3_PHY_REV_BCM5401_B0 &&
3297                             !(bmsr & BMSR_LSTATUS) &&
3298                             tp->link_config.active_speed == SPEED_1000) {
3299                                 err = tg3_phy_reset(tp);
3300                                 if (!err)
3301                                         err = tg3_init_5401phy_dsp(tp);
3302                                 if (err)
3303                                         return err;
3304                         }
3305                 }
3306         } else if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
3307                    tp->pci_chip_rev_id == CHIPREV_ID_5701_B0) {
3308                 /* 5701 {A0,B0} CRC bug workaround */
3309                 tg3_writephy(tp, 0x15, 0x0a75);
3310                 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8c68);
3311                 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8d68);
3312                 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8c68);
3313         }
3314
3315         /* Clear pending interrupts... */
3316         tg3_readphy(tp, MII_TG3_ISTAT, &val);
3317         tg3_readphy(tp, MII_TG3_ISTAT, &val);
3318
3319         if (tp->phy_flags & TG3_PHYFLG_USE_MI_INTERRUPT)
3320                 tg3_writephy(tp, MII_TG3_IMASK, ~MII_TG3_INT_LINKCHG);
3321         else if (!(tp->phy_flags & TG3_PHYFLG_IS_FET))
3322                 tg3_writephy(tp, MII_TG3_IMASK, ~0);
3323
3324         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
3325             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
3326                 if (tp->led_ctrl == LED_CTRL_MODE_PHY_1)
3327                         tg3_writephy(tp, MII_TG3_EXT_CTRL,
3328                                      MII_TG3_EXT_CTRL_LNK3_LED_MODE);
3329                 else
3330                         tg3_writephy(tp, MII_TG3_EXT_CTRL, 0);
3331         }
3332
3333         current_link_up = 0;
3334         current_speed = SPEED_INVALID;
3335         current_duplex = DUPLEX_INVALID;
3336
3337         if (tp->phy_flags & TG3_PHYFLG_CAPACITIVE_COUPLING) {
3338                 err = tg3_phy_auxctl_read(tp,
3339                                           MII_TG3_AUXCTL_SHDWSEL_MISCTEST,
3340                                           &val);
3341                 if (!err && !(val & (1 << 10))) {
3342                         tg3_phy_auxctl_write(tp,
3343                                              MII_TG3_AUXCTL_SHDWSEL_MISCTEST,
3344                                              val | (1 << 10));
3345                         goto relink;
3346                 }
3347         }
3348
3349         bmsr = 0;
3350         for (i = 0; i < 100; i++) {
3351                 tg3_readphy(tp, MII_BMSR, &bmsr);
3352                 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
3353                     (bmsr & BMSR_LSTATUS))
3354                         break;
3355                 udelay(40);
3356         }
3357
3358         if (bmsr & BMSR_LSTATUS) {
3359                 u32 aux_stat, bmcr;
3360
3361                 tg3_readphy(tp, MII_TG3_AUX_STAT, &aux_stat);
3362                 for (i = 0; i < 2000; i++) {
3363                         udelay(10);
3364                         if (!tg3_readphy(tp, MII_TG3_AUX_STAT, &aux_stat) &&
3365                             aux_stat)
3366                                 break;
3367                 }
3368
3369                 tg3_aux_stat_to_speed_duplex(tp, aux_stat,
3370                                              &current_speed,
3371                                              &current_duplex);
3372
3373                 bmcr = 0;
3374                 for (i = 0; i < 200; i++) {
3375                         tg3_readphy(tp, MII_BMCR, &bmcr);
3376                         if (tg3_readphy(tp, MII_BMCR, &bmcr))
3377                                 continue;
3378                         if (bmcr && bmcr != 0x7fff)
3379                                 break;
3380                         udelay(10);
3381                 }
3382
3383                 lcl_adv = 0;
3384                 rmt_adv = 0;
3385
3386                 tp->link_config.active_speed = current_speed;
3387                 tp->link_config.active_duplex = current_duplex;
3388
3389                 if (tp->link_config.autoneg == AUTONEG_ENABLE) {
3390                         if ((bmcr & BMCR_ANENABLE) &&
3391                             tg3_copper_is_advertising_all(tp,
3392                                                 tp->link_config.advertising)) {
3393                                 if (tg3_adv_1000T_flowctrl_ok(tp, &lcl_adv,
3394                                                                   &rmt_adv))
3395                                         current_link_up = 1;
3396                         }
3397                 } else {
3398                         if (!(bmcr & BMCR_ANENABLE) &&
3399                             tp->link_config.speed == current_speed &&
3400                             tp->link_config.duplex == current_duplex &&
3401                             tp->link_config.flowctrl ==
3402                             tp->link_config.active_flowctrl) {
3403                                 current_link_up = 1;
3404                         }
3405                 }
3406
3407                 if (current_link_up == 1 &&
3408                     tp->link_config.active_duplex == DUPLEX_FULL)
3409                         tg3_setup_flow_control(tp, lcl_adv, rmt_adv);
3410         }
3411
3412 relink:
3413         if (current_link_up == 0 || (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)) {
3414                 tg3_phy_copper_begin(tp);
3415
3416                 tg3_readphy(tp, MII_BMSR, &bmsr);
3417                 if ((!tg3_readphy(tp, MII_BMSR, &bmsr) && (bmsr & BMSR_LSTATUS)) ||
3418                     (tp->mac_mode & MAC_MODE_PORT_INT_LPBACK))
3419                         current_link_up = 1;
3420         }
3421
3422         tp->mac_mode &= ~MAC_MODE_PORT_MODE_MASK;
3423         if (current_link_up == 1) {
3424                 if (tp->link_config.active_speed == SPEED_100 ||
3425                     tp->link_config.active_speed == SPEED_10)
3426                         tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
3427                 else
3428                         tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
3429         } else if (tp->phy_flags & TG3_PHYFLG_IS_FET)
3430                 tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
3431         else
3432                 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
3433
3434         tp->mac_mode &= ~MAC_MODE_HALF_DUPLEX;
3435         if (tp->link_config.active_duplex == DUPLEX_HALF)
3436                 tp->mac_mode |= MAC_MODE_HALF_DUPLEX;
3437
3438         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700) {
3439                 if (current_link_up == 1 &&
3440                     tg3_5700_link_polarity(tp, tp->link_config.active_speed))
3441                         tp->mac_mode |= MAC_MODE_LINK_POLARITY;
3442                 else
3443                         tp->mac_mode &= ~MAC_MODE_LINK_POLARITY;
3444         }
3445
3446         /* ??? Without this setting Netgear GA302T PHY does not
3447          * ??? send/receive packets...
3448          */
3449         if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5411 &&
3450             tp->pci_chip_rev_id == CHIPREV_ID_5700_ALTIMA) {
3451                 tp->mi_mode |= MAC_MI_MODE_AUTO_POLL;
3452                 tw32_f(MAC_MI_MODE, tp->mi_mode);
3453                 udelay(80);
3454         }
3455
3456         tw32_f(MAC_MODE, tp->mac_mode);
3457         udelay(40);
3458
3459         tg3_phy_eee_adjust(tp, current_link_up);
3460
3461         if (tg3_flag(tp, USE_LINKCHG_REG)) {
3462                 /* Polled via timer. */
3463                 tw32_f(MAC_EVENT, 0);
3464         } else {
3465                 tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
3466         }
3467         udelay(40);
3468
3469         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 &&
3470             current_link_up == 1 &&
3471             tp->link_config.active_speed == SPEED_1000 &&
3472             (tg3_flag(tp, PCIX_MODE) || tg3_flag(tp, PCI_HIGH_SPEED))) {
3473                 udelay(120);
3474                 tw32_f(MAC_STATUS,
3475                      (MAC_STATUS_SYNC_CHANGED |
3476                       MAC_STATUS_CFG_CHANGED));
3477                 udelay(40);
3478                 tg3_write_mem(tp,
3479                               NIC_SRAM_FIRMWARE_MBOX,
3480                               NIC_SRAM_FIRMWARE_MBOX_MAGIC2);
3481         }
3482
3483         /* Prevent send BD corruption. */
3484         if (tg3_flag(tp, CLKREQ_BUG)) {
3485                 u16 oldlnkctl, newlnkctl;
3486
3487                 pci_read_config_word(tp->pdev,
3488                                      tp->pcie_cap + PCI_EXP_LNKCTL,
3489                                      &oldlnkctl);
3490                 if (tp->link_config.active_speed == SPEED_100 ||
3491                     tp->link_config.active_speed == SPEED_10)
3492                         newlnkctl = oldlnkctl & ~PCI_EXP_LNKCTL_CLKREQ_EN;
3493                 else
3494                         newlnkctl = oldlnkctl | PCI_EXP_LNKCTL_CLKREQ_EN;
3495                 if (newlnkctl != oldlnkctl)
3496                         pci_write_config_word(tp->pdev,
3497                                               tp->pcie_cap + PCI_EXP_LNKCTL,
3498                                               newlnkctl);
3499         }
3500
3501         if (current_link_up != netif_carrier_ok(tp->dev)) {
3502                 if (current_link_up)
3503                         netif_carrier_on(tp->dev);
3504                 else
3505                         netif_carrier_off(tp->dev);
3506                 tg3_link_report(tp);
3507         }
3508
3509         return 0;
3510 }
3511
3512 struct tg3_fiber_aneginfo {
3513         int state;
3514 #define ANEG_STATE_UNKNOWN              0
3515 #define ANEG_STATE_AN_ENABLE            1
3516 #define ANEG_STATE_RESTART_INIT         2
3517 #define ANEG_STATE_RESTART              3
3518 #define ANEG_STATE_DISABLE_LINK_OK      4
3519 #define ANEG_STATE_ABILITY_DETECT_INIT  5
3520 #define ANEG_STATE_ABILITY_DETECT       6
3521 #define ANEG_STATE_ACK_DETECT_INIT      7
3522 #define ANEG_STATE_ACK_DETECT           8
3523 #define ANEG_STATE_COMPLETE_ACK_INIT    9
3524 #define ANEG_STATE_COMPLETE_ACK         10
3525 #define ANEG_STATE_IDLE_DETECT_INIT     11
3526 #define ANEG_STATE_IDLE_DETECT          12
3527 #define ANEG_STATE_LINK_OK              13
3528 #define ANEG_STATE_NEXT_PAGE_WAIT_INIT  14
3529 #define ANEG_STATE_NEXT_PAGE_WAIT       15
3530
3531         u32 flags;
3532 #define MR_AN_ENABLE            0x00000001
3533 #define MR_RESTART_AN           0x00000002
3534 #define MR_AN_COMPLETE          0x00000004
3535 #define MR_PAGE_RX              0x00000008
3536 #define MR_NP_LOADED            0x00000010
3537 #define MR_TOGGLE_TX            0x00000020
3538 #define MR_LP_ADV_FULL_DUPLEX   0x00000040
3539 #define MR_LP_ADV_HALF_DUPLEX   0x00000080
3540 #define MR_LP_ADV_SYM_PAUSE     0x00000100
3541 #define MR_LP_ADV_ASYM_PAUSE    0x00000200
3542 #define MR_LP_ADV_REMOTE_FAULT1 0x00000400
3543 #define MR_LP_ADV_REMOTE_FAULT2 0x00000800
3544 #define MR_LP_ADV_NEXT_PAGE     0x00001000
3545 #define MR_TOGGLE_RX            0x00002000
3546 #define MR_NP_RX                0x00004000
3547
3548 #define MR_LINK_OK              0x80000000
3549
3550         unsigned long link_time, cur_time;
3551
3552         u32 ability_match_cfg;
3553         int ability_match_count;
3554
3555         char ability_match, idle_match, ack_match;
3556
3557         u32 txconfig, rxconfig;
3558 #define ANEG_CFG_NP             0x00000080
3559 #define ANEG_CFG_ACK            0x00000040
3560 #define ANEG_CFG_RF2            0x00000020
3561 #define ANEG_CFG_RF1            0x00000010
3562 #define ANEG_CFG_PS2            0x00000001
3563 #define ANEG_CFG_PS1            0x00008000
3564 #define ANEG_CFG_HD             0x00004000
3565 #define ANEG_CFG_FD             0x00002000
3566 #define ANEG_CFG_INVAL          0x00001f06
3567
3568 };
3569 #define ANEG_OK         0
3570 #define ANEG_DONE       1
3571 #define ANEG_TIMER_ENAB 2
3572 #define ANEG_FAILED     -1
3573
3574 #define ANEG_STATE_SETTLE_TIME  10000
3575
3576 static int tg3_fiber_aneg_smachine(struct tg3 *tp,
3577                                    struct tg3_fiber_aneginfo *ap)
3578 {
3579         u16 flowctrl;
3580         unsigned long delta;
3581         u32 rx_cfg_reg;
3582         int ret;
3583
3584         if (ap->state == ANEG_STATE_UNKNOWN) {
3585                 ap->rxconfig = 0;
3586                 ap->link_time = 0;
3587                 ap->cur_time = 0;
3588                 ap->ability_match_cfg = 0;
3589                 ap->ability_match_count = 0;
3590                 ap->ability_match = 0;
3591                 ap->idle_match = 0;
3592                 ap->ack_match = 0;
3593         }
3594         ap->cur_time++;
3595
3596         if (tr32(MAC_STATUS) & MAC_STATUS_RCVD_CFG) {
3597                 rx_cfg_reg = tr32(MAC_RX_AUTO_NEG);
3598
3599                 if (rx_cfg_reg != ap->ability_match_cfg) {
3600                         ap->ability_match_cfg = rx_cfg_reg;
3601                         ap->ability_match = 0;
3602                         ap->ability_match_count = 0;
3603                 } else {
3604                         if (++ap->ability_match_count > 1) {
3605                                 ap->ability_match = 1;
3606                                 ap->ability_match_cfg = rx_cfg_reg;
3607                         }
3608                 }
3609                 if (rx_cfg_reg & ANEG_CFG_ACK)
3610                         ap->ack_match = 1;
3611                 else
3612                         ap->ack_match = 0;
3613
3614                 ap->idle_match = 0;
3615         } else {
3616                 ap->idle_match = 1;
3617                 ap->ability_match_cfg = 0;
3618                 ap->ability_match_count = 0;
3619                 ap->ability_match = 0;
3620                 ap->ack_match = 0;
3621
3622                 rx_cfg_reg = 0;
3623         }
3624
3625         ap->rxconfig = rx_cfg_reg;
3626         ret = ANEG_OK;
3627
3628         switch (ap->state) {
3629         case ANEG_STATE_UNKNOWN:
3630                 if (ap->flags & (MR_AN_ENABLE | MR_RESTART_AN))
3631                         ap->state = ANEG_STATE_AN_ENABLE;
3632
3633                 /* fallthru */
3634         case ANEG_STATE_AN_ENABLE:
3635                 ap->flags &= ~(MR_AN_COMPLETE | MR_PAGE_RX);
3636                 if (ap->flags & MR_AN_ENABLE) {
3637                         ap->link_time = 0;
3638                         ap->cur_time = 0;
3639                         ap->ability_match_cfg = 0;
3640                         ap->ability_match_count = 0;
3641                         ap->ability_match = 0;
3642                         ap->idle_match = 0;
3643                         ap->ack_match = 0;
3644
3645                         ap->state = ANEG_STATE_RESTART_INIT;
3646                 } else {
3647                         ap->state = ANEG_STATE_DISABLE_LINK_OK;
3648                 }
3649                 break;
3650
3651         case ANEG_STATE_RESTART_INIT:
3652                 ap->link_time = ap->cur_time;
3653                 ap->flags &= ~(MR_NP_LOADED);
3654                 ap->txconfig = 0;
3655                 tw32(MAC_TX_AUTO_NEG, 0);
3656                 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
3657                 tw32_f(MAC_MODE, tp->mac_mode);
3658                 udelay(40);
3659
3660                 ret = ANEG_TIMER_ENAB;
3661                 ap->state = ANEG_STATE_RESTART;
3662
3663                 /* fallthru */
3664         case ANEG_STATE_RESTART:
3665                 delta = ap->cur_time - ap->link_time;
3666                 if (delta > ANEG_STATE_SETTLE_TIME)
3667                         ap->state = ANEG_STATE_ABILITY_DETECT_INIT;
3668                 else
3669                         ret = ANEG_TIMER_ENAB;
3670                 break;
3671
3672         case ANEG_STATE_DISABLE_LINK_OK:
3673                 ret = ANEG_DONE;
3674                 break;
3675
3676         case ANEG_STATE_ABILITY_DETECT_INIT:
3677                 ap->flags &= ~(MR_TOGGLE_TX);
3678                 ap->txconfig = ANEG_CFG_FD;
3679                 flowctrl = tg3_advert_flowctrl_1000X(tp->link_config.flowctrl);
3680                 if (flowctrl & ADVERTISE_1000XPAUSE)
3681                         ap->txconfig |= ANEG_CFG_PS1;
3682                 if (flowctrl & ADVERTISE_1000XPSE_ASYM)
3683                         ap->txconfig |= ANEG_CFG_PS2;
3684                 tw32(MAC_TX_AUTO_NEG, ap->txconfig);
3685                 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
3686                 tw32_f(MAC_MODE, tp->mac_mode);
3687                 udelay(40);
3688
3689                 ap->state = ANEG_STATE_ABILITY_DETECT;
3690                 break;
3691
3692         case ANEG_STATE_ABILITY_DETECT:
3693                 if (ap->ability_match != 0 && ap->rxconfig != 0)
3694                         ap->state = ANEG_STATE_ACK_DETECT_INIT;
3695                 break;
3696
3697         case ANEG_STATE_ACK_DETECT_INIT:
3698                 ap->txconfig |= ANEG_CFG_ACK;
3699                 tw32(MAC_TX_AUTO_NEG, ap->txconfig);
3700                 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
3701                 tw32_f(MAC_MODE, tp->mac_mode);
3702                 udelay(40);
3703
3704                 ap->state = ANEG_STATE_ACK_DETECT;
3705
3706                 /* fallthru */
3707         case ANEG_STATE_ACK_DETECT:
3708                 if (ap->ack_match != 0) {
3709                         if ((ap->rxconfig & ~ANEG_CFG_ACK) ==
3710                             (ap->ability_match_cfg & ~ANEG_CFG_ACK)) {
3711                                 ap->state = ANEG_STATE_COMPLETE_ACK_INIT;
3712                         } else {
3713                                 ap->state = ANEG_STATE_AN_ENABLE;
3714                         }
3715                 } else if (ap->ability_match != 0 &&
3716                            ap->rxconfig == 0) {
3717                         ap->state = ANEG_STATE_AN_ENABLE;
3718                 }
3719                 break;
3720
3721         case ANEG_STATE_COMPLETE_ACK_INIT:
3722                 if (ap->rxconfig & ANEG_CFG_INVAL) {
3723                         ret = ANEG_FAILED;
3724                         break;
3725                 }
3726                 ap->flags &= ~(MR_LP_ADV_FULL_DUPLEX |
3727                                MR_LP_ADV_HALF_DUPLEX |
3728                                MR_LP_ADV_SYM_PAUSE |
3729                                MR_LP_ADV_ASYM_PAUSE |
3730                                MR_LP_ADV_REMOTE_FAULT1 |
3731                                MR_LP_ADV_REMOTE_FAULT2 |
3732                                MR_LP_ADV_NEXT_PAGE |
3733                                MR_TOGGLE_RX |
3734                                MR_NP_RX);
3735                 if (ap->rxconfig & ANEG_CFG_FD)
3736                         ap->flags |= MR_LP_ADV_FULL_DUPLEX;
3737                 if (ap->rxconfig & ANEG_CFG_HD)
3738                         ap->flags |= MR_LP_ADV_HALF_DUPLEX;
3739                 if (ap->rxconfig & ANEG_CFG_PS1)
3740                         ap->flags |= MR_LP_ADV_SYM_PAUSE;
3741                 if (ap->rxconfig & ANEG_CFG_PS2)
3742                         ap->flags |= MR_LP_ADV_ASYM_PAUSE;
3743                 if (ap->rxconfig & ANEG_CFG_RF1)
3744                         ap->flags |= MR_LP_ADV_REMOTE_FAULT1;
3745                 if (ap->rxconfig & ANEG_CFG_RF2)
3746                         ap->flags |= MR_LP_ADV_REMOTE_FAULT2;
3747                 if (ap->rxconfig & ANEG_CFG_NP)
3748                         ap->flags |= MR_LP_ADV_NEXT_PAGE;
3749
3750                 ap->link_time = ap->cur_time;
3751
3752                 ap->flags ^= (MR_TOGGLE_TX);
3753                 if (ap->rxconfig & 0x0008)
3754                         ap->flags |= MR_TOGGLE_RX;
3755                 if (ap->rxconfig & ANEG_CFG_NP)
3756                         ap->flags |= MR_NP_RX;
3757                 ap->flags |= MR_PAGE_RX;
3758
3759                 ap->state = ANEG_STATE_COMPLETE_ACK;
3760                 ret = ANEG_TIMER_ENAB;
3761                 break;
3762
3763         case ANEG_STATE_COMPLETE_ACK:
3764                 if (ap->ability_match != 0 &&
3765                     ap->rxconfig == 0) {
3766                         ap->state = ANEG_STATE_AN_ENABLE;
3767                         break;
3768                 }
3769                 delta = ap->cur_time - ap->link_time;
3770                 if (delta > ANEG_STATE_SETTLE_TIME) {
3771                         if (!(ap->flags & (MR_LP_ADV_NEXT_PAGE))) {
3772                                 ap->state = ANEG_STATE_IDLE_DETECT_INIT;
3773                         } else {
3774                                 if ((ap->txconfig & ANEG_CFG_NP) == 0 &&
3775                                     !(ap->flags & MR_NP_RX)) {
3776                                         ap->state = ANEG_STATE_IDLE_DETECT_INIT;
3777                                 } else {
3778                                         ret = ANEG_FAILED;
3779                                 }
3780                         }
3781                 }
3782                 break;
3783
3784         case ANEG_STATE_IDLE_DETECT_INIT:
3785                 ap->link_time = ap->cur_time;
3786                 tp->mac_mode &= ~MAC_MODE_SEND_CONFIGS;
3787                 tw32_f(MAC_MODE, tp->mac_mode);
3788                 udelay(40);
3789
3790                 ap->state = ANEG_STATE_IDLE_DETECT;
3791                 ret = ANEG_TIMER_ENAB;
3792                 break;
3793
3794         case ANEG_STATE_IDLE_DETECT:
3795                 if (ap->ability_match != 0 &&
3796                     ap->rxconfig == 0) {
3797                         ap->state = ANEG_STATE_AN_ENABLE;
3798                         break;
3799                 }
3800                 delta = ap->cur_time - ap->link_time;
3801                 if (delta > ANEG_STATE_SETTLE_TIME) {
3802                         /* XXX another gem from the Broadcom driver :( */
3803                         ap->state = ANEG_STATE_LINK_OK;
3804                 }
3805                 break;
3806
3807         case ANEG_STATE_LINK_OK:
3808                 ap->flags |= (MR_AN_COMPLETE | MR_LINK_OK);
3809                 ret = ANEG_DONE;
3810                 break;
3811
3812         case ANEG_STATE_NEXT_PAGE_WAIT_INIT:
3813                 /* ??? unimplemented */
3814                 break;
3815
3816         case ANEG_STATE_NEXT_PAGE_WAIT:
3817                 /* ??? unimplemented */
3818                 break;
3819
3820         default:
3821                 ret = ANEG_FAILED;
3822                 break;
3823         }
3824
3825         return ret;
3826 }
3827
3828 static int fiber_autoneg(struct tg3 *tp, u32 *txflags, u32 *rxflags)
3829 {
3830         int res = 0;
3831         struct tg3_fiber_aneginfo aninfo;
3832         int status = ANEG_FAILED;
3833         unsigned int tick;
3834         u32 tmp;
3835
3836         tw32_f(MAC_TX_AUTO_NEG, 0);
3837
3838         tmp = tp->mac_mode & ~MAC_MODE_PORT_MODE_MASK;
3839         tw32_f(MAC_MODE, tmp | MAC_MODE_PORT_MODE_GMII);
3840         udelay(40);
3841
3842         tw32_f(MAC_MODE, tp->mac_mode | MAC_MODE_SEND_CONFIGS);
3843         udelay(40);
3844
3845         memset(&aninfo, 0, sizeof(aninfo));
3846         aninfo.flags |= MR_AN_ENABLE;
3847         aninfo.state = ANEG_STATE_UNKNOWN;
3848         aninfo.cur_time = 0;
3849         tick = 0;
3850         while (++tick < 195000) {
3851                 status = tg3_fiber_aneg_smachine(tp, &aninfo);
3852                 if (status == ANEG_DONE || status == ANEG_FAILED)
3853                         break;
3854
3855                 udelay(1);
3856         }
3857
3858         tp->mac_mode &= ~MAC_MODE_SEND_CONFIGS;
3859         tw32_f(MAC_MODE, tp->mac_mode);
3860         udelay(40);
3861
3862         *txflags = aninfo.txconfig;
3863         *rxflags = aninfo.flags;
3864
3865         if (status == ANEG_DONE &&
3866             (aninfo.flags & (MR_AN_COMPLETE | MR_LINK_OK |
3867                              MR_LP_ADV_FULL_DUPLEX)))
3868                 res = 1;
3869
3870         return res;
3871 }
3872
3873 static void tg3_init_bcm8002(struct tg3 *tp)
3874 {
3875         u32 mac_status = tr32(MAC_STATUS);
3876         int i;
3877
3878         /* Reset when initting first time or we have a link. */
3879         if (tg3_flag(tp, INIT_COMPLETE) &&
3880             !(mac_status & MAC_STATUS_PCS_SYNCED))
3881                 return;
3882
3883         /* Set PLL lock range. */
3884         tg3_writephy(tp, 0x16, 0x8007);
3885
3886         /* SW reset */
3887         tg3_writephy(tp, MII_BMCR, BMCR_RESET);
3888
3889         /* Wait for reset to complete. */
3890         /* XXX schedule_timeout() ... */
3891         for (i = 0; i < 500; i++)
3892                 udelay(10);
3893
3894         /* Config mode; select PMA/Ch 1 regs. */
3895         tg3_writephy(tp, 0x10, 0x8411);
3896
3897         /* Enable auto-lock and comdet, select txclk for tx. */
3898         tg3_writephy(tp, 0x11, 0x0a10);
3899
3900         tg3_writephy(tp, 0x18, 0x00a0);
3901         tg3_writephy(tp, 0x16, 0x41ff);
3902
3903         /* Assert and deassert POR. */
3904         tg3_writephy(tp, 0x13, 0x0400);
3905         udelay(40);
3906         tg3_writephy(tp, 0x13, 0x0000);
3907
3908         tg3_writephy(tp, 0x11, 0x0a50);
3909         udelay(40);
3910         tg3_writephy(tp, 0x11, 0x0a10);
3911
3912         /* Wait for signal to stabilize */
3913         /* XXX schedule_timeout() ... */
3914         for (i = 0; i < 15000; i++)
3915                 udelay(10);
3916
3917         /* Deselect the channel register so we can read the PHYID
3918          * later.
3919          */
3920         tg3_writephy(tp, 0x10, 0x8011);
3921 }
3922
3923 static int tg3_setup_fiber_hw_autoneg(struct tg3 *tp, u32 mac_status)
3924 {
3925         u16 flowctrl;
3926         u32 sg_dig_ctrl, sg_dig_status;
3927         u32 serdes_cfg, expected_sg_dig_ctrl;
3928         int workaround, port_a;
3929         int current_link_up;
3930
3931         serdes_cfg = 0;
3932         expected_sg_dig_ctrl = 0;
3933         workaround = 0;
3934         port_a = 1;
3935         current_link_up = 0;
3936
3937         if (tp->pci_chip_rev_id != CHIPREV_ID_5704_A0 &&
3938             tp->pci_chip_rev_id != CHIPREV_ID_5704_A1) {
3939                 workaround = 1;
3940                 if (tr32(TG3PCI_DUAL_MAC_CTRL) & DUAL_MAC_CTRL_ID)
3941                         port_a = 0;
3942
3943                 /* preserve bits 0-11,13,14 for signal pre-emphasis */
3944                 /* preserve bits 20-23 for voltage regulator */
3945                 serdes_cfg = tr32(MAC_SERDES_CFG) & 0x00f06fff;
3946         }
3947
3948         sg_dig_ctrl = tr32(SG_DIG_CTRL);
3949
3950         if (tp->link_config.autoneg != AUTONEG_ENABLE) {
3951                 if (sg_dig_ctrl & SG_DIG_USING_HW_AUTONEG) {
3952                         if (workaround) {
3953                                 u32 val = serdes_cfg;
3954
3955                                 if (port_a)
3956                                         val |= 0xc010000;
3957                                 else
3958                                         val |= 0x4010000;
3959                                 tw32_f(MAC_SERDES_CFG, val);
3960                         }
3961
3962                         tw32_f(SG_DIG_CTRL, SG_DIG_COMMON_SETUP);
3963                 }
3964                 if (mac_status & MAC_STATUS_PCS_SYNCED) {
3965                         tg3_setup_flow_control(tp, 0, 0);
3966                         current_link_up = 1;
3967                 }
3968                 goto out;
3969         }
3970
3971         /* Want auto-negotiation.  */
3972         expected_sg_dig_ctrl = SG_DIG_USING_HW_AUTONEG | SG_DIG_COMMON_SETUP;
3973
3974         flowctrl = tg3_advert_flowctrl_1000X(tp->link_config.flowctrl);
3975         if (flowctrl & ADVERTISE_1000XPAUSE)
3976                 expected_sg_dig_ctrl |= SG_DIG_PAUSE_CAP;
3977         if (flowctrl & ADVERTISE_1000XPSE_ASYM)
3978                 expected_sg_dig_ctrl |= SG_DIG_ASYM_PAUSE;
3979
3980         if (sg_dig_ctrl != expected_sg_dig_ctrl) {
3981                 if ((tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT) &&
3982                     tp->serdes_counter &&
3983                     ((mac_status & (MAC_STATUS_PCS_SYNCED |
3984                                     MAC_STATUS_RCVD_CFG)) ==
3985                      MAC_STATUS_PCS_SYNCED)) {
3986                         tp->serdes_counter--;
3987                         current_link_up = 1;
3988                         goto out;
3989                 }
3990 restart_autoneg:
3991                 if (workaround)
3992                         tw32_f(MAC_SERDES_CFG, serdes_cfg | 0xc011000);
3993                 tw32_f(SG_DIG_CTRL, expected_sg_dig_ctrl | SG_DIG_SOFT_RESET);
3994                 udelay(5);
3995                 tw32_f(SG_DIG_CTRL, expected_sg_dig_ctrl);
3996
3997                 tp->serdes_counter = SERDES_AN_TIMEOUT_5704S;
3998                 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
3999         } else if (mac_status & (MAC_STATUS_PCS_SYNCED |
4000                                  MAC_STATUS_SIGNAL_DET)) {
4001                 sg_dig_status = tr32(SG_DIG_STATUS);
4002                 mac_status = tr32(MAC_STATUS);
4003
4004                 if ((sg_dig_status & SG_DIG_AUTONEG_COMPLETE) &&
4005                     (mac_status & MAC_STATUS_PCS_SYNCED)) {
4006                         u32 local_adv = 0, remote_adv = 0;
4007
4008                         if (sg_dig_ctrl & SG_DIG_PAUSE_CAP)
4009                                 local_adv |= ADVERTISE_1000XPAUSE;
4010                         if (sg_dig_ctrl & SG_DIG_ASYM_PAUSE)
4011                                 local_adv |= ADVERTISE_1000XPSE_ASYM;
4012
4013                         if (sg_dig_status & SG_DIG_PARTNER_PAUSE_CAPABLE)
4014                                 remote_adv |= LPA_1000XPAUSE;
4015                         if (sg_dig_status & SG_DIG_PARTNER_ASYM_PAUSE)
4016                                 remote_adv |= LPA_1000XPAUSE_ASYM;
4017
4018                         tg3_setup_flow_control(tp, local_adv, remote_adv);
4019                         current_link_up = 1;
4020                         tp->serdes_counter = 0;
4021                         tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
4022                 } else if (!(sg_dig_status & SG_DIG_AUTONEG_COMPLETE)) {
4023                         if (tp->serdes_counter)
4024                                 tp->serdes_counter--;
4025                         else {
4026                                 if (workaround) {
4027                                         u32 val = serdes_cfg;
4028
4029                                         if (port_a)
4030                                                 val |= 0xc010000;
4031                                         else
4032                                                 val |= 0x4010000;
4033
4034                                         tw32_f(MAC_SERDES_CFG, val);
4035                                 }
4036
4037                                 tw32_f(SG_DIG_CTRL, SG_DIG_COMMON_SETUP);
4038                                 udelay(40);
4039
4040                                 /* Link parallel detection - link is up */
4041                                 /* only if we have PCS_SYNC and not */
4042                                 /* receiving config code words */
4043                                 mac_status = tr32(MAC_STATUS);
4044                                 if ((mac_status & MAC_STATUS_PCS_SYNCED) &&
4045                                     !(mac_status & MAC_STATUS_RCVD_CFG)) {
4046                                         tg3_setup_flow_control(tp, 0, 0);
4047                                         current_link_up = 1;
4048                                         tp->phy_flags |=
4049                                                 TG3_PHYFLG_PARALLEL_DETECT;
4050                                         tp->serdes_counter =
4051                                                 SERDES_PARALLEL_DET_TIMEOUT;
4052                                 } else
4053                                         goto restart_autoneg;
4054                         }
4055                 }
4056         } else {
4057                 tp->serdes_counter = SERDES_AN_TIMEOUT_5704S;
4058                 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
4059         }
4060
4061 out:
4062         return current_link_up;
4063 }
4064
4065 static int tg3_setup_fiber_by_hand(struct tg3 *tp, u32 mac_status)
4066 {
4067         int current_link_up = 0;
4068
4069         if (!(mac_status & MAC_STATUS_PCS_SYNCED))
4070                 goto out;
4071
4072         if (tp->link_config.autoneg == AUTONEG_ENABLE) {
4073                 u32 txflags, rxflags;
4074                 int i;
4075
4076                 if (fiber_autoneg(tp, &txflags, &rxflags)) {
4077                         u32 local_adv = 0, remote_adv = 0;
4078
4079                         if (txflags & ANEG_CFG_PS1)
4080                                 local_adv |= ADVERTISE_1000XPAUSE;
4081                         if (txflags & ANEG_CFG_PS2)
4082                                 local_adv |= ADVERTISE_1000XPSE_ASYM;
4083
4084                         if (rxflags & MR_LP_ADV_SYM_PAUSE)
4085                                 remote_adv |= LPA_1000XPAUSE;
4086                         if (rxflags & MR_LP_ADV_ASYM_PAUSE)
4087                                 remote_adv |= LPA_1000XPAUSE_ASYM;
4088
4089                         tg3_setup_flow_control(tp, local_adv, remote_adv);
4090
4091                         current_link_up = 1;
4092                 }
4093                 for (i = 0; i < 30; i++) {
4094                         udelay(20);
4095                         tw32_f(MAC_STATUS,
4096                                (MAC_STATUS_SYNC_CHANGED |
4097                                 MAC_STATUS_CFG_CHANGED));
4098                         udelay(40);
4099                         if ((tr32(MAC_STATUS) &
4100                              (MAC_STATUS_SYNC_CHANGED |
4101                               MAC_STATUS_CFG_CHANGED)) == 0)
4102                                 break;
4103                 }
4104
4105                 mac_status = tr32(MAC_STATUS);
4106                 if (current_link_up == 0 &&
4107                     (mac_status & MAC_STATUS_PCS_SYNCED) &&
4108                     !(mac_status & MAC_STATUS_RCVD_CFG))
4109                         current_link_up = 1;
4110         } else {
4111                 tg3_setup_flow_control(tp, 0, 0);
4112
4113                 /* Forcing 1000FD link up. */
4114                 current_link_up = 1;
4115
4116                 tw32_f(MAC_MODE, (tp->mac_mode | MAC_MODE_SEND_CONFIGS));
4117                 udelay(40);
4118
4119                 tw32_f(MAC_MODE, tp->mac_mode);
4120                 udelay(40);
4121         }
4122
4123 out:
4124         return current_link_up;
4125 }
4126
4127 static int tg3_setup_fiber_phy(struct tg3 *tp, int force_reset)
4128 {
4129         u32 orig_pause_cfg;
4130         u16 orig_active_speed;
4131         u8 orig_active_duplex;
4132         u32 mac_status;
4133         int current_link_up;
4134         int i;
4135
4136         orig_pause_cfg = tp->link_config.active_flowctrl;
4137         orig_active_speed = tp->link_config.active_speed;
4138         orig_active_duplex = tp->link_config.active_duplex;
4139
4140         if (!tg3_flag(tp, HW_AUTONEG) &&
4141             netif_carrier_ok(tp->dev) &&
4142             tg3_flag(tp, INIT_COMPLETE)) {
4143                 mac_status = tr32(MAC_STATUS);
4144                 mac_status &= (MAC_STATUS_PCS_SYNCED |
4145                                MAC_STATUS_SIGNAL_DET |
4146                                MAC_STATUS_CFG_CHANGED |
4147                                MAC_STATUS_RCVD_CFG);
4148                 if (mac_status == (MAC_STATUS_PCS_SYNCED |
4149                                    MAC_STATUS_SIGNAL_DET)) {
4150                         tw32_f(MAC_STATUS, (MAC_STATUS_SYNC_CHANGED |
4151                                             MAC_STATUS_CFG_CHANGED));
4152                         return 0;
4153                 }
4154         }
4155
4156         tw32_f(MAC_TX_AUTO_NEG, 0);
4157
4158         tp->mac_mode &= ~(MAC_MODE_PORT_MODE_MASK | MAC_MODE_HALF_DUPLEX);
4159         tp->mac_mode |= MAC_MODE_PORT_MODE_TBI;
4160         tw32_f(MAC_MODE, tp->mac_mode);
4161         udelay(40);
4162
4163         if (tp->phy_id == TG3_PHY_ID_BCM8002)
4164                 tg3_init_bcm8002(tp);
4165
4166         /* Enable link change event even when serdes polling.  */
4167         tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
4168         udelay(40);
4169
4170         current_link_up = 0;
4171         mac_status = tr32(MAC_STATUS);
4172
4173         if (tg3_flag(tp, HW_AUTONEG))
4174                 current_link_up = tg3_setup_fiber_hw_autoneg(tp, mac_status);
4175         else
4176                 current_link_up = tg3_setup_fiber_by_hand(tp, mac_status);
4177
4178         tp->napi[0].hw_status->status =
4179                 (SD_STATUS_UPDATED |
4180                  (tp->napi[0].hw_status->status & ~SD_STATUS_LINK_CHG));
4181
4182         for (i = 0; i < 100; i++) {
4183                 tw32_f(MAC_STATUS, (MAC_STATUS_SYNC_CHANGED |
4184                                     MAC_STATUS_CFG_CHANGED));
4185                 udelay(5);
4186                 if ((tr32(MAC_STATUS) & (MAC_STATUS_SYNC_CHANGED |
4187                                          MAC_STATUS_CFG_CHANGED |
4188                                          MAC_STATUS_LNKSTATE_CHANGED)) == 0)
4189                         break;
4190         }
4191
4192         mac_status = tr32(MAC_STATUS);
4193         if ((mac_status & MAC_STATUS_PCS_SYNCED) == 0) {
4194                 current_link_up = 0;
4195                 if (tp->link_config.autoneg == AUTONEG_ENABLE &&
4196                     tp->serdes_counter == 0) {
4197                         tw32_f(MAC_MODE, (tp->mac_mode |
4198                                           MAC_MODE_SEND_CONFIGS));
4199                         udelay(1);
4200                         tw32_f(MAC_MODE, tp->mac_mode);
4201                 }
4202         }
4203
4204         if (current_link_up == 1) {
4205                 tp->link_config.active_speed = SPEED_1000;
4206                 tp->link_config.active_duplex = DUPLEX_FULL;
4207                 tw32(MAC_LED_CTRL, (tp->led_ctrl |
4208                                     LED_CTRL_LNKLED_OVERRIDE |
4209                                     LED_CTRL_1000MBPS_ON));
4210         } else {
4211                 tp->link_config.active_speed = SPEED_INVALID;
4212                 tp->link_config.active_duplex = DUPLEX_INVALID;
4213                 tw32(MAC_LED_CTRL, (tp->led_ctrl |
4214                                     LED_CTRL_LNKLED_OVERRIDE |
4215                                     LED_CTRL_TRAFFIC_OVERRIDE));
4216         }
4217
4218         if (current_link_up != netif_carrier_ok(tp->dev)) {
4219                 if (current_link_up)
4220                         netif_carrier_on(tp->dev);
4221                 else
4222                         netif_carrier_off(tp->dev);
4223                 tg3_link_report(tp);
4224         } else {
4225                 u32 now_pause_cfg = tp->link_config.active_flowctrl;
4226                 if (orig_pause_cfg != now_pause_cfg ||
4227                     orig_active_speed != tp->link_config.active_speed ||
4228                     orig_active_duplex != tp->link_config.active_duplex)
4229                         tg3_link_report(tp);
4230         }
4231
4232         return 0;
4233 }
4234
4235 static int tg3_setup_fiber_mii_phy(struct tg3 *tp, int force_reset)
4236 {
4237         int current_link_up, err = 0;
4238         u32 bmsr, bmcr;
4239         u16 current_speed;
4240         u8 current_duplex;
4241         u32 local_adv, remote_adv;
4242
4243         tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
4244         tw32_f(MAC_MODE, tp->mac_mode);
4245         udelay(40);
4246
4247         tw32(MAC_EVENT, 0);
4248
4249         tw32_f(MAC_STATUS,
4250              (MAC_STATUS_SYNC_CHANGED |
4251               MAC_STATUS_CFG_CHANGED |
4252               MAC_STATUS_MI_COMPLETION |
4253               MAC_STATUS_LNKSTATE_CHANGED));
4254         udelay(40);
4255
4256         if (force_reset)
4257                 tg3_phy_reset(tp);
4258
4259         current_link_up = 0;
4260         current_speed = SPEED_INVALID;
4261         current_duplex = DUPLEX_INVALID;
4262
4263         err |= tg3_readphy(tp, MII_BMSR, &bmsr);
4264         err |= tg3_readphy(tp, MII_BMSR, &bmsr);
4265         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) {
4266                 if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
4267                         bmsr |= BMSR_LSTATUS;
4268                 else
4269                         bmsr &= ~BMSR_LSTATUS;
4270         }
4271
4272         err |= tg3_readphy(tp, MII_BMCR, &bmcr);
4273
4274         if ((tp->link_config.autoneg == AUTONEG_ENABLE) && !force_reset &&
4275             (tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT)) {
4276                 /* do nothing, just check for link up at the end */
4277         } else if (tp->link_config.autoneg == AUTONEG_ENABLE) {
4278                 u32 adv, new_adv;
4279
4280                 err |= tg3_readphy(tp, MII_ADVERTISE, &adv);
4281                 new_adv = adv & ~(ADVERTISE_1000XFULL | ADVERTISE_1000XHALF |
4282                                   ADVERTISE_1000XPAUSE |
4283                                   ADVERTISE_1000XPSE_ASYM |
4284                                   ADVERTISE_SLCT);
4285
4286                 new_adv |= tg3_advert_flowctrl_1000X(tp->link_config.flowctrl);
4287
4288                 if (tp->link_config.advertising & ADVERTISED_1000baseT_Half)
4289                         new_adv |= ADVERTISE_1000XHALF;
4290                 if (tp->link_config.advertising & ADVERTISED_1000baseT_Full)
4291                         new_adv |= ADVERTISE_1000XFULL;
4292
4293                 if ((new_adv != adv) || !(bmcr & BMCR_ANENABLE)) {
4294                         tg3_writephy(tp, MII_ADVERTISE, new_adv);
4295                         bmcr |= BMCR_ANENABLE | BMCR_ANRESTART;
4296                         tg3_writephy(tp, MII_BMCR, bmcr);
4297
4298                         tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
4299                         tp->serdes_counter = SERDES_AN_TIMEOUT_5714S;
4300                         tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
4301
4302                         return err;
4303                 }
4304         } else {
4305                 u32 new_bmcr;
4306
4307                 bmcr &= ~BMCR_SPEED1000;
4308                 new_bmcr = bmcr & ~(BMCR_ANENABLE | BMCR_FULLDPLX);
4309
4310                 if (tp->link_config.duplex == DUPLEX_FULL)
4311                         new_bmcr |= BMCR_FULLDPLX;
4312
4313                 if (new_bmcr != bmcr) {
4314                         /* BMCR_SPEED1000 is a reserved bit that needs
4315                          * to be set on write.
4316                          */
4317                         new_bmcr |= BMCR_SPEED1000;
4318
4319                         /* Force a linkdown */
4320                         if (netif_carrier_ok(tp->dev)) {
4321                                 u32 adv;
4322
4323                                 err |= tg3_readphy(tp, MII_ADVERTISE, &adv);
4324                                 adv &= ~(ADVERTISE_1000XFULL |
4325                                          ADVERTISE_1000XHALF |
4326                                          ADVERTISE_SLCT);
4327                                 tg3_writephy(tp, MII_ADVERTISE, adv);
4328                                 tg3_writephy(tp, MII_BMCR, bmcr |
4329                                                            BMCR_ANRESTART |
4330                                                            BMCR_ANENABLE);
4331                                 udelay(10);
4332                                 netif_carrier_off(tp->dev);
4333                         }
4334                         tg3_writephy(tp, MII_BMCR, new_bmcr);
4335                         bmcr = new_bmcr;
4336                         err |= tg3_readphy(tp, MII_BMSR, &bmsr);
4337                         err |= tg3_readphy(tp, MII_BMSR, &bmsr);
4338                         if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
4339                             ASIC_REV_5714) {
4340                                 if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
4341                                         bmsr |= BMSR_LSTATUS;
4342                                 else
4343                                         bmsr &= ~BMSR_LSTATUS;
4344                         }
4345                         tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
4346                 }
4347         }
4348
4349         if (bmsr & BMSR_LSTATUS) {
4350                 current_speed = SPEED_1000;
4351                 current_link_up = 1;
4352                 if (bmcr & BMCR_FULLDPLX)
4353                         current_duplex = DUPLEX_FULL;
4354                 else
4355                         current_duplex = DUPLEX_HALF;
4356
4357                 local_adv = 0;
4358                 remote_adv = 0;
4359
4360                 if (bmcr & BMCR_ANENABLE) {
4361                         u32 common;
4362
4363                         err |= tg3_readphy(tp, MII_ADVERTISE, &local_adv);
4364                         err |= tg3_readphy(tp, MII_LPA, &remote_adv);
4365                         common = local_adv & remote_adv;
4366                         if (common & (ADVERTISE_1000XHALF |
4367                                       ADVERTISE_1000XFULL)) {
4368                                 if (common & ADVERTISE_1000XFULL)
4369                                         current_duplex = DUPLEX_FULL;
4370                                 else
4371                                         current_duplex = DUPLEX_HALF;
4372                         } else if (!tg3_flag(tp, 5780_CLASS)) {
4373                                 /* Link is up via parallel detect */
4374                         } else {
4375                                 current_link_up = 0;
4376                         }
4377                 }
4378         }
4379
4380         if (current_link_up == 1 && current_duplex == DUPLEX_FULL)
4381                 tg3_setup_flow_control(tp, local_adv, remote_adv);
4382
4383         tp->mac_mode &= ~MAC_MODE_HALF_DUPLEX;
4384         if (tp->link_config.active_duplex == DUPLEX_HALF)
4385                 tp->mac_mode |= MAC_MODE_HALF_DUPLEX;
4386
4387         tw32_f(MAC_MODE, tp->mac_mode);
4388         udelay(40);
4389
4390         tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
4391
4392         tp->link_config.active_speed = current_speed;
4393         tp->link_config.active_duplex = current_duplex;
4394
4395         if (current_link_up != netif_carrier_ok(tp->dev)) {
4396                 if (current_link_up)
4397                         netif_carrier_on(tp->dev);
4398                 else {
4399                         netif_carrier_off(tp->dev);
4400                         tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
4401                 }
4402                 tg3_link_report(tp);
4403         }
4404         return err;
4405 }
4406
4407 static void tg3_serdes_parallel_detect(struct tg3 *tp)
4408 {
4409         if (tp->serdes_counter) {
4410                 /* Give autoneg time to complete. */
4411                 tp->serdes_counter--;
4412                 return;
4413         }
4414
4415         if (!netif_carrier_ok(tp->dev) &&
4416             (tp->link_config.autoneg == AUTONEG_ENABLE)) {
4417                 u32 bmcr;
4418
4419                 tg3_readphy(tp, MII_BMCR, &bmcr);
4420                 if (bmcr & BMCR_ANENABLE) {
4421                         u32 phy1, phy2;
4422
4423                         /* Select shadow register 0x1f */
4424                         tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x7c00);
4425                         tg3_readphy(tp, MII_TG3_MISC_SHDW, &phy1);
4426
4427                         /* Select expansion interrupt status register */
4428                         tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
4429                                          MII_TG3_DSP_EXP1_INT_STAT);
4430                         tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &phy2);
4431                         tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &phy2);
4432
4433                         if ((phy1 & 0x10) && !(phy2 & 0x20)) {
4434                                 /* We have signal detect and not receiving
4435                                  * config code words, link is up by parallel
4436                                  * detection.
4437                                  */
4438
4439                                 bmcr &= ~BMCR_ANENABLE;
4440                                 bmcr |= BMCR_SPEED1000 | BMCR_FULLDPLX;
4441                                 tg3_writephy(tp, MII_BMCR, bmcr);
4442                                 tp->phy_flags |= TG3_PHYFLG_PARALLEL_DETECT;
4443                         }
4444                 }
4445         } else if (netif_carrier_ok(tp->dev) &&
4446                    (tp->link_config.autoneg == AUTONEG_ENABLE) &&
4447                    (tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT)) {
4448                 u32 phy2;
4449
4450                 /* Select expansion interrupt status register */
4451                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
4452                                  MII_TG3_DSP_EXP1_INT_STAT);
4453                 tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &phy2);
4454                 if (phy2 & 0x20) {
4455                         u32 bmcr;
4456
4457                         /* Config code words received, turn on autoneg. */
4458                         tg3_readphy(tp, MII_BMCR, &bmcr);
4459                         tg3_writephy(tp, MII_BMCR, bmcr | BMCR_ANENABLE);
4460
4461                         tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
4462
4463                 }
4464         }
4465 }
4466
4467 static int tg3_setup_phy(struct tg3 *tp, int force_reset)
4468 {
4469         u32 val;
4470         int err;
4471
4472         if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
4473                 err = tg3_setup_fiber_phy(tp, force_reset);
4474         else if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
4475                 err = tg3_setup_fiber_mii_phy(tp, force_reset);
4476         else
4477                 err = tg3_setup_copper_phy(tp, force_reset);
4478
4479         if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5784_AX) {
4480                 u32 scale;
4481
4482                 val = tr32(TG3_CPMU_CLCK_STAT) & CPMU_CLCK_STAT_MAC_CLCK_MASK;
4483                 if (val == CPMU_CLCK_STAT_MAC_CLCK_62_5)
4484                         scale = 65;
4485                 else if (val == CPMU_CLCK_STAT_MAC_CLCK_6_25)
4486                         scale = 6;
4487                 else
4488                         scale = 12;
4489
4490                 val = tr32(GRC_MISC_CFG) & ~GRC_MISC_CFG_PRESCALAR_MASK;
4491                 val |= (scale << GRC_MISC_CFG_PRESCALAR_SHIFT);
4492                 tw32(GRC_MISC_CFG, val);
4493         }
4494
4495         val = (2 << TX_LENGTHS_IPG_CRS_SHIFT) |
4496               (6 << TX_LENGTHS_IPG_SHIFT);
4497         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720)
4498                 val |= tr32(MAC_TX_LENGTHS) &
4499                        (TX_LENGTHS_JMB_FRM_LEN_MSK |
4500                         TX_LENGTHS_CNT_DWN_VAL_MSK);
4501
4502         if (tp->link_config.active_speed == SPEED_1000 &&
4503             tp->link_config.active_duplex == DUPLEX_HALF)
4504                 tw32(MAC_TX_LENGTHS, val |
4505                      (0xff << TX_LENGTHS_SLOT_TIME_SHIFT));
4506         else
4507                 tw32(MAC_TX_LENGTHS, val |
4508                      (32 << TX_LENGTHS_SLOT_TIME_SHIFT));
4509
4510         if (!tg3_flag(tp, 5705_PLUS)) {
4511                 if (netif_carrier_ok(tp->dev)) {
4512                         tw32(HOSTCC_STAT_COAL_TICKS,
4513                              tp->coal.stats_block_coalesce_usecs);
4514                 } else {
4515                         tw32(HOSTCC_STAT_COAL_TICKS, 0);
4516                 }
4517         }
4518
4519         if (tg3_flag(tp, ASPM_WORKAROUND)) {
4520                 val = tr32(PCIE_PWR_MGMT_THRESH);
4521                 if (!netif_carrier_ok(tp->dev))
4522                         val = (val & ~PCIE_PWR_MGMT_L1_THRESH_MSK) |
4523                               tp->pwrmgmt_thresh;
4524                 else
4525                         val |= PCIE_PWR_MGMT_L1_THRESH_MSK;
4526                 tw32(PCIE_PWR_MGMT_THRESH, val);
4527         }
4528
4529         return err;
4530 }
4531
4532 static inline int tg3_irq_sync(struct tg3 *tp)
4533 {
4534         return tp->irq_sync;
4535 }
4536
4537 static inline void tg3_rd32_loop(struct tg3 *tp, u32 *dst, u32 off, u32 len)
4538 {
4539         int i;
4540
4541         dst = (u32 *)((u8 *)dst + off);
4542         for (i = 0; i < len; i += sizeof(u32))
4543                 *dst++ = tr32(off + i);
4544 }
4545
4546 static void tg3_dump_legacy_regs(struct tg3 *tp, u32 *regs)
4547 {
4548         tg3_rd32_loop(tp, regs, TG3PCI_VENDOR, 0xb0);
4549         tg3_rd32_loop(tp, regs, MAILBOX_INTERRUPT_0, 0x200);
4550         tg3_rd32_loop(tp, regs, MAC_MODE, 0x4f0);
4551         tg3_rd32_loop(tp, regs, SNDDATAI_MODE, 0xe0);
4552         tg3_rd32_loop(tp, regs, SNDDATAC_MODE, 0x04);
4553         tg3_rd32_loop(tp, regs, SNDBDS_MODE, 0x80);
4554         tg3_rd32_loop(tp, regs, SNDBDI_MODE, 0x48);
4555         tg3_rd32_loop(tp, regs, SNDBDC_MODE, 0x04);
4556         tg3_rd32_loop(tp, regs, RCVLPC_MODE, 0x20);
4557         tg3_rd32_loop(tp, regs, RCVLPC_SELLST_BASE, 0x15c);
4558         tg3_rd32_loop(tp, regs, RCVDBDI_MODE, 0x0c);
4559         tg3_rd32_loop(tp, regs, RCVDBDI_JUMBO_BD, 0x3c);
4560         tg3_rd32_loop(tp, regs, RCVDBDI_BD_PROD_IDX_0, 0x44);
4561         tg3_rd32_loop(tp, regs, RCVDCC_MODE, 0x04);
4562         tg3_rd32_loop(tp, regs, RCVBDI_MODE, 0x20);
4563         tg3_rd32_loop(tp, regs, RCVCC_MODE, 0x14);
4564         tg3_rd32_loop(tp, regs, RCVLSC_MODE, 0x08);
4565         tg3_rd32_loop(tp, regs, MBFREE_MODE, 0x08);
4566         tg3_rd32_loop(tp, regs, HOSTCC_MODE, 0x100);
4567
4568         if (tg3_flag(tp, SUPPORT_MSIX))
4569                 tg3_rd32_loop(tp, regs, HOSTCC_RXCOL_TICKS_VEC1, 0x180);
4570
4571         tg3_rd32_loop(tp, regs, MEMARB_MODE, 0x10);
4572         tg3_rd32_loop(tp, regs, BUFMGR_MODE, 0x58);
4573         tg3_rd32_loop(tp, regs, RDMAC_MODE, 0x08);
4574         tg3_rd32_loop(tp, regs, WDMAC_MODE, 0x08);
4575         tg3_rd32_loop(tp, regs, RX_CPU_MODE, 0x04);
4576         tg3_rd32_loop(tp, regs, RX_CPU_STATE, 0x04);
4577         tg3_rd32_loop(tp, regs, RX_CPU_PGMCTR, 0x04);
4578         tg3_rd32_loop(tp, regs, RX_CPU_HWBKPT, 0x04);
4579
4580         if (!tg3_flag(tp, 5705_PLUS)) {
4581                 tg3_rd32_loop(tp, regs, TX_CPU_MODE, 0x04);
4582                 tg3_rd32_loop(tp, regs, TX_CPU_STATE, 0x04);
4583                 tg3_rd32_loop(tp, regs, TX_CPU_PGMCTR, 0x04);
4584         }
4585
4586         tg3_rd32_loop(tp, regs, GRCMBOX_INTERRUPT_0, 0x110);
4587         tg3_rd32_loop(tp, regs, FTQ_RESET, 0x120);
4588         tg3_rd32_loop(tp, regs, MSGINT_MODE, 0x0c);
4589         tg3_rd32_loop(tp, regs, DMAC_MODE, 0x04);
4590         tg3_rd32_loop(tp, regs, GRC_MODE, 0x4c);
4591
4592         if (tg3_flag(tp, NVRAM))
4593                 tg3_rd32_loop(tp, regs, NVRAM_CMD, 0x24);
4594 }
4595
4596 static void tg3_dump_state(struct tg3 *tp)
4597 {
4598         int i;
4599         u32 *regs;
4600
4601         regs = kzalloc(TG3_REG_BLK_SIZE, GFP_ATOMIC);
4602         if (!regs) {
4603                 netdev_err(tp->dev, "Failed allocating register dump buffer\n");
4604                 return;
4605         }
4606
4607         if (tg3_flag(tp, PCI_EXPRESS)) {
4608                 /* Read up to but not including private PCI registers */
4609                 for (i = 0; i < TG3_PCIE_TLDLPL_PORT; i += sizeof(u32))
4610                         regs[i / sizeof(u32)] = tr32(i);
4611         } else
4612                 tg3_dump_legacy_regs(tp, regs);
4613
4614         for (i = 0; i < TG3_REG_BLK_SIZE / sizeof(u32); i += 4) {
4615                 if (!regs[i + 0] && !regs[i + 1] &&
4616                     !regs[i + 2] && !regs[i + 3])
4617                         continue;
4618
4619                 netdev_err(tp->dev, "0x%08x: 0x%08x, 0x%08x, 0x%08x, 0x%08x\n",
4620                            i * 4,
4621                            regs[i + 0], regs[i + 1], regs[i + 2], regs[i + 3]);
4622         }
4623
4624         kfree(regs);
4625
4626         for (i = 0; i < tp->irq_cnt; i++) {
4627                 struct tg3_napi *tnapi = &tp->napi[i];
4628
4629                 /* SW status block */
4630                 netdev_err(tp->dev,
4631                          "%d: Host status block [%08x:%08x:(%04x:%04x:%04x):(%04x:%04x)]\n",
4632                            i,
4633                            tnapi->hw_status->status,
4634                            tnapi->hw_status->status_tag,
4635                            tnapi->hw_status->rx_jumbo_consumer,
4636                            tnapi->hw_status->rx_consumer,
4637                            tnapi->hw_status->rx_mini_consumer,
4638                            tnapi->hw_status->idx[0].rx_producer,
4639                            tnapi->hw_status->idx[0].tx_consumer);
4640
4641                 netdev_err(tp->dev,
4642                 "%d: NAPI info [%08x:%08x:(%04x:%04x:%04x):%04x:(%04x:%04x:%04x:%04x)]\n",
4643                            i,
4644                            tnapi->last_tag, tnapi->last_irq_tag,
4645                            tnapi->tx_prod, tnapi->tx_cons, tnapi->tx_pending,
4646                            tnapi->rx_rcb_ptr,
4647                            tnapi->prodring.rx_std_prod_idx,
4648                            tnapi->prodring.rx_std_cons_idx,
4649                            tnapi->prodring.rx_jmb_prod_idx,
4650                            tnapi->prodring.rx_jmb_cons_idx);
4651         }
4652 }
4653
4654 /* This is called whenever we suspect that the system chipset is re-
4655  * ordering the sequence of MMIO to the tx send mailbox. The symptom
4656  * is bogus tx completions. We try to recover by setting the
4657  * TG3_FLAG_MBOX_WRITE_REORDER flag and resetting the chip later
4658  * in the workqueue.
4659  */
4660 static void tg3_tx_recover(struct tg3 *tp)
4661 {
4662         BUG_ON(tg3_flag(tp, MBOX_WRITE_REORDER) ||
4663                tp->write32_tx_mbox == tg3_write_indirect_mbox);
4664
4665         netdev_warn(tp->dev,
4666                     "The system may be re-ordering memory-mapped I/O "
4667                     "cycles to the network device, attempting to recover. "
4668                     "Please report the problem to the driver maintainer "
4669                     "and include system chipset information.\n");
4670
4671         spin_lock(&tp->lock);
4672         tg3_flag_set(tp, TX_RECOVERY_PENDING);
4673         spin_unlock(&tp->lock);
4674 }
4675
4676 static inline u32 tg3_tx_avail(struct tg3_napi *tnapi)
4677 {
4678         /* Tell compiler to fetch tx indices from memory. */
4679         barrier();
4680         return tnapi->tx_pending -
4681                ((tnapi->tx_prod - tnapi->tx_cons) & (TG3_TX_RING_SIZE - 1));
4682 }
4683
4684 /* Tigon3 never reports partial packet sends.  So we do not
4685  * need special logic to handle SKBs that have not had all
4686  * of their frags sent yet, like SunGEM does.
4687  */
4688 static void tg3_tx(struct tg3_napi *tnapi)
4689 {
4690         struct tg3 *tp = tnapi->tp;
4691         u32 hw_idx = tnapi->hw_status->idx[0].tx_consumer;
4692         u32 sw_idx = tnapi->tx_cons;
4693         struct netdev_queue *txq;
4694         int index = tnapi - tp->napi;
4695
4696         if (tg3_flag(tp, ENABLE_TSS))
4697                 index--;
4698
4699         txq = netdev_get_tx_queue(tp->dev, index);
4700
4701         while (sw_idx != hw_idx) {
4702                 struct ring_info *ri = &tnapi->tx_buffers[sw_idx];
4703                 struct sk_buff *skb = ri->skb;
4704                 int i, tx_bug = 0;
4705
4706                 if (unlikely(skb == NULL)) {
4707                         tg3_tx_recover(tp);
4708                         return;
4709                 }
4710
4711                 pci_unmap_single(tp->pdev,
4712                                  dma_unmap_addr(ri, mapping),
4713                                  skb_headlen(skb),
4714                                  PCI_DMA_TODEVICE);
4715
4716                 ri->skb = NULL;
4717
4718                 sw_idx = NEXT_TX(sw_idx);
4719
4720                 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
4721                         ri = &tnapi->tx_buffers[sw_idx];
4722                         if (unlikely(ri->skb != NULL || sw_idx == hw_idx))
4723                                 tx_bug = 1;
4724
4725                         pci_unmap_page(tp->pdev,
4726                                        dma_unmap_addr(ri, mapping),
4727                                        skb_shinfo(skb)->frags[i].size,
4728                                        PCI_DMA_TODEVICE);
4729                         sw_idx = NEXT_TX(sw_idx);
4730                 }
4731
4732                 dev_kfree_skb(skb);
4733
4734                 if (unlikely(tx_bug)) {
4735                         tg3_tx_recover(tp);
4736                         return;
4737                 }
4738         }
4739
4740         tnapi->tx_cons = sw_idx;
4741
4742         /* Need to make the tx_cons update visible to tg3_start_xmit()
4743          * before checking for netif_queue_stopped().  Without the
4744          * memory barrier, there is a small possibility that tg3_start_xmit()
4745          * will miss it and cause the queue to be stopped forever.
4746          */
4747         smp_mb();
4748
4749         if (unlikely(netif_tx_queue_stopped(txq) &&
4750                      (tg3_tx_avail(tnapi) > TG3_TX_WAKEUP_THRESH(tnapi)))) {
4751                 __netif_tx_lock(txq, smp_processor_id());
4752                 if (netif_tx_queue_stopped(txq) &&
4753                     (tg3_tx_avail(tnapi) > TG3_TX_WAKEUP_THRESH(tnapi)))
4754                         netif_tx_wake_queue(txq);
4755                 __netif_tx_unlock(txq);
4756         }
4757 }
4758
4759 static void tg3_rx_skb_free(struct tg3 *tp, struct ring_info *ri, u32 map_sz)
4760 {
4761         if (!ri->skb)
4762                 return;
4763
4764         pci_unmap_single(tp->pdev, dma_unmap_addr(ri, mapping),
4765                          map_sz, PCI_DMA_FROMDEVICE);
4766         dev_kfree_skb_any(ri->skb);
4767         ri->skb = NULL;
4768 }
4769
4770 /* Returns size of skb allocated or < 0 on error.
4771  *
4772  * We only need to fill in the address because the other members
4773  * of the RX descriptor are invariant, see tg3_init_rings.
4774  *
4775  * Note the purposeful assymetry of cpu vs. chip accesses.  For
4776  * posting buffers we only dirty the first cache line of the RX
4777  * descriptor (containing the address).  Whereas for the RX status
4778  * buffers the cpu only reads the last cacheline of the RX descriptor
4779  * (to fetch the error flags, vlan tag, checksum, and opaque cookie).
4780  */
4781 static int tg3_alloc_rx_skb(struct tg3 *tp, struct tg3_rx_prodring_set *tpr,
4782                             u32 opaque_key, u32 dest_idx_unmasked)
4783 {
4784         struct tg3_rx_buffer_desc *desc;
4785         struct ring_info *map;
4786         struct sk_buff *skb;
4787         dma_addr_t mapping;
4788         int skb_size, dest_idx;
4789
4790         switch (opaque_key) {
4791         case RXD_OPAQUE_RING_STD:
4792                 dest_idx = dest_idx_unmasked & tp->rx_std_ring_mask;
4793                 desc = &tpr->rx_std[dest_idx];
4794                 map = &tpr->rx_std_buffers[dest_idx];
4795                 skb_size = tp->rx_pkt_map_sz;
4796                 break;
4797
4798         case RXD_OPAQUE_RING_JUMBO:
4799                 dest_idx = dest_idx_unmasked & tp->rx_jmb_ring_mask;
4800                 desc = &tpr->rx_jmb[dest_idx].std;
4801                 map = &tpr->rx_jmb_buffers[dest_idx];
4802                 skb_size = TG3_RX_JMB_MAP_SZ;
4803                 break;
4804
4805         default:
4806                 return -EINVAL;
4807         }
4808
4809         /* Do not overwrite any of the map or rp information
4810          * until we are sure we can commit to a new buffer.
4811          *
4812          * Callers depend upon this behavior and assume that
4813          * we leave everything unchanged if we fail.
4814          */
4815         skb = netdev_alloc_skb(tp->dev, skb_size + tp->rx_offset);
4816         if (skb == NULL)
4817                 return -ENOMEM;
4818
4819         skb_reserve(skb, tp->rx_offset);
4820
4821         mapping = pci_map_single(tp->pdev, skb->data, skb_size,
4822                                  PCI_DMA_FROMDEVICE);
4823         if (pci_dma_mapping_error(tp->pdev, mapping)) {
4824                 dev_kfree_skb(skb);
4825                 return -EIO;
4826         }
4827
4828         map->skb = skb;
4829         dma_unmap_addr_set(map, mapping, mapping);
4830
4831         desc->addr_hi = ((u64)mapping >> 32);
4832         desc->addr_lo = ((u64)mapping & 0xffffffff);
4833
4834         return skb_size;
4835 }
4836
4837 /* We only need to move over in the address because the other
4838  * members of the RX descriptor are invariant.  See notes above
4839  * tg3_alloc_rx_skb for full details.
4840  */
4841 static void tg3_recycle_rx(struct tg3_napi *tnapi,
4842                            struct tg3_rx_prodring_set *dpr,
4843                            u32 opaque_key, int src_idx,
4844                            u32 dest_idx_unmasked)
4845 {
4846         struct tg3 *tp = tnapi->tp;
4847         struct tg3_rx_buffer_desc *src_desc, *dest_desc;
4848         struct ring_info *src_map, *dest_map;
4849         struct tg3_rx_prodring_set *spr = &tp->napi[0].prodring;
4850         int dest_idx;
4851
4852         switch (opaque_key) {
4853         case RXD_OPAQUE_RING_STD:
4854                 dest_idx = dest_idx_unmasked & tp->rx_std_ring_mask;
4855                 dest_desc = &dpr->rx_std[dest_idx];
4856                 dest_map = &dpr->rx_std_buffers[dest_idx];
4857                 src_desc = &spr->rx_std[src_idx];
4858                 src_map = &spr->rx_std_buffers[src_idx];
4859                 break;
4860
4861         case RXD_OPAQUE_RING_JUMBO:
4862                 dest_idx = dest_idx_unmasked & tp->rx_jmb_ring_mask;
4863                 dest_desc = &dpr->rx_jmb[dest_idx].std;
4864                 dest_map = &dpr->rx_jmb_buffers[dest_idx];
4865                 src_desc = &spr->rx_jmb[src_idx].std;
4866                 src_map = &spr->rx_jmb_buffers[src_idx];
4867                 break;
4868
4869         default:
4870                 return;
4871         }
4872
4873         dest_map->skb = src_map->skb;
4874         dma_unmap_addr_set(dest_map, mapping,
4875                            dma_unmap_addr(src_map, mapping));
4876         dest_desc->addr_hi = src_desc->addr_hi;
4877         dest_desc->addr_lo = src_desc->addr_lo;
4878
4879         /* Ensure that the update to the skb happens after the physical
4880          * addresses have been transferred to the new BD location.
4881          */
4882         smp_wmb();
4883
4884         src_map->skb = NULL;
4885 }
4886
4887 /* The RX ring scheme is composed of multiple rings which post fresh
4888  * buffers to the chip, and one special ring the chip uses to report
4889  * status back to the host.
4890  *
4891  * The special ring reports the status of received packets to the
4892  * host.  The chip does not write into the original descriptor the
4893  * RX buffer was obtained from.  The chip simply takes the original
4894  * descriptor as provided by the host, updates the status and length
4895  * field, then writes this into the next status ring entry.
4896  *
4897  * Each ring the host uses to post buffers to the chip is described
4898  * by a TG3_BDINFO entry in the chips SRAM area.  When a packet arrives,
4899  * it is first placed into the on-chip ram.  When the packet's length
4900  * is known, it walks down the TG3_BDINFO entries to select the ring.
4901  * Each TG3_BDINFO specifies a MAXLEN field and the first TG3_BDINFO
4902  * which is within the range of the new packet's length is chosen.
4903  *
4904  * The "separate ring for rx status" scheme may sound queer, but it makes
4905  * sense from a cache coherency perspective.  If only the host writes
4906  * to the buffer post rings, and only the chip writes to the rx status
4907  * rings, then cache lines never move beyond shared-modified state.
4908  * If both the host and chip were to write into the same ring, cache line
4909  * eviction could occur since both entities want it in an exclusive state.
4910  */
4911 static int tg3_rx(struct tg3_napi *tnapi, int budget)
4912 {
4913         struct tg3 *tp = tnapi->tp;
4914         u32 work_mask, rx_std_posted = 0;
4915         u32 std_prod_idx, jmb_prod_idx;
4916         u32 sw_idx = tnapi->rx_rcb_ptr;
4917         u16 hw_idx;
4918         int received;
4919         struct tg3_rx_prodring_set *tpr = &tnapi->prodring;
4920
4921         hw_idx = *(tnapi->rx_rcb_prod_idx);
4922         /*
4923          * We need to order the read of hw_idx and the read of
4924          * the opaque cookie.
4925          */
4926         rmb();
4927         work_mask = 0;
4928         received = 0;
4929         std_prod_idx = tpr->rx_std_prod_idx;
4930         jmb_prod_idx = tpr->rx_jmb_prod_idx;
4931         while (sw_idx != hw_idx && budget > 0) {
4932                 struct ring_info *ri;
4933                 struct tg3_rx_buffer_desc *desc = &tnapi->rx_rcb[sw_idx];
4934                 unsigned int len;
4935                 struct sk_buff *skb;
4936                 dma_addr_t dma_addr;
4937                 u32 opaque_key, desc_idx, *post_ptr;
4938
4939                 desc_idx = desc->opaque & RXD_OPAQUE_INDEX_MASK;
4940                 opaque_key = desc->opaque & RXD_OPAQUE_RING_MASK;
4941                 if (opaque_key == RXD_OPAQUE_RING_STD) {
4942                         ri = &tp->napi[0].prodring.rx_std_buffers[desc_idx];
4943                         dma_addr = dma_unmap_addr(ri, mapping);
4944                         skb = ri->skb;
4945                         post_ptr = &std_prod_idx;
4946                         rx_std_posted++;
4947                 } else if (opaque_key == RXD_OPAQUE_RING_JUMBO) {
4948                         ri = &tp->napi[0].prodring.rx_jmb_buffers[desc_idx];
4949                         dma_addr = dma_unmap_addr(ri, mapping);
4950                         skb = ri->skb;
4951                         post_ptr = &jmb_prod_idx;
4952                 } else
4953                         goto next_pkt_nopost;
4954
4955                 work_mask |= opaque_key;
4956
4957                 if ((desc->err_vlan & RXD_ERR_MASK) != 0 &&
4958                     (desc->err_vlan != RXD_ERR_ODD_NIBBLE_RCVD_MII)) {
4959                 drop_it:
4960                         tg3_recycle_rx(tnapi, tpr, opaque_key,
4961                                        desc_idx, *post_ptr);
4962                 drop_it_no_recycle:
4963                         /* Other statistics kept track of by card. */
4964                         tp->rx_dropped++;
4965                         goto next_pkt;
4966                 }
4967
4968                 len = ((desc->idx_len & RXD_LEN_MASK) >> RXD_LEN_SHIFT) -
4969                       ETH_FCS_LEN;
4970
4971                 if (len > TG3_RX_COPY_THRESH(tp)) {
4972                         int skb_size;
4973
4974                         skb_size = tg3_alloc_rx_skb(tp, tpr, opaque_key,
4975                                                     *post_ptr);
4976                         if (skb_size < 0)
4977                                 goto drop_it;
4978
4979                         pci_unmap_single(tp->pdev, dma_addr, skb_size,
4980                                          PCI_DMA_FROMDEVICE);
4981
4982                         /* Ensure that the update to the skb happens
4983                          * after the usage of the old DMA mapping.
4984                          */
4985                         smp_wmb();
4986
4987                         ri->skb = NULL;
4988
4989                         skb_put(skb, len);
4990                 } else {
4991                         struct sk_buff *copy_skb;
4992
4993                         tg3_recycle_rx(tnapi, tpr, opaque_key,
4994                                        desc_idx, *post_ptr);
4995
4996                         copy_skb = netdev_alloc_skb(tp->dev, len +
4997                                                     TG3_RAW_IP_ALIGN);
4998                         if (copy_skb == NULL)
4999                                 goto drop_it_no_recycle;
5000
5001                         skb_reserve(copy_skb, TG3_RAW_IP_ALIGN);
5002                         skb_put(copy_skb, len);
5003                         pci_dma_sync_single_for_cpu(tp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE);
5004                         skb_copy_from_linear_data(skb, copy_skb->data, len);
5005                         pci_dma_sync_single_for_device(tp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE);
5006
5007                         /* We'll reuse the original ring buffer. */
5008                         skb = copy_skb;
5009                 }
5010
5011                 if ((tp->dev->features & NETIF_F_RXCSUM) &&
5012                     (desc->type_flags & RXD_FLAG_TCPUDP_CSUM) &&
5013                     (((desc->ip_tcp_csum & RXD_TCPCSUM_MASK)
5014                       >> RXD_TCPCSUM_SHIFT) == 0xffff))
5015                         skb->ip_summed = CHECKSUM_UNNECESSARY;
5016                 else
5017                         skb_checksum_none_assert(skb);
5018
5019                 skb->protocol = eth_type_trans(skb, tp->dev);
5020
5021                 if (len > (tp->dev->mtu + ETH_HLEN) &&
5022                     skb->protocol != htons(ETH_P_8021Q)) {
5023                         dev_kfree_skb(skb);
5024                         goto drop_it_no_recycle;
5025                 }
5026
5027                 if (desc->type_flags & RXD_FLAG_VLAN &&
5028                     !(tp->rx_mode & RX_MODE_KEEP_VLAN_TAG))
5029                         __vlan_hwaccel_put_tag(skb,
5030                                                desc->err_vlan & RXD_VLAN_MASK);
5031
5032                 napi_gro_receive(&tnapi->napi, skb);
5033
5034                 received++;
5035                 budget--;
5036
5037 next_pkt:
5038                 (*post_ptr)++;
5039
5040                 if (unlikely(rx_std_posted >= tp->rx_std_max_post)) {
5041                         tpr->rx_std_prod_idx = std_prod_idx &
5042                                                tp->rx_std_ring_mask;
5043                         tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG,
5044                                      tpr->rx_std_prod_idx);
5045                         work_mask &= ~RXD_OPAQUE_RING_STD;
5046                         rx_std_posted = 0;
5047                 }
5048 next_pkt_nopost:
5049                 sw_idx++;
5050                 sw_idx &= tp->rx_ret_ring_mask;
5051
5052                 /* Refresh hw_idx to see if there is new work */
5053                 if (sw_idx == hw_idx) {
5054                         hw_idx = *(tnapi->rx_rcb_prod_idx);
5055                         rmb();
5056                 }
5057         }
5058
5059         /* ACK the status ring. */
5060         tnapi->rx_rcb_ptr = sw_idx;
5061         tw32_rx_mbox(tnapi->consmbox, sw_idx);
5062
5063         /* Refill RX ring(s). */
5064         if (!tg3_flag(tp, ENABLE_RSS)) {
5065                 if (work_mask & RXD_OPAQUE_RING_STD) {
5066                         tpr->rx_std_prod_idx = std_prod_idx &
5067                                                tp->rx_std_ring_mask;
5068                         tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG,
5069                                      tpr->rx_std_prod_idx);
5070                 }
5071                 if (work_mask & RXD_OPAQUE_RING_JUMBO) {
5072                         tpr->rx_jmb_prod_idx = jmb_prod_idx &
5073                                                tp->rx_jmb_ring_mask;
5074                         tw32_rx_mbox(TG3_RX_JMB_PROD_IDX_REG,
5075                                      tpr->rx_jmb_prod_idx);
5076                 }
5077                 mmiowb();
5078         } else if (work_mask) {
5079                 /* rx_std_buffers[] and rx_jmb_buffers[] entries must be
5080                  * updated before the producer indices can be updated.
5081                  */
5082                 smp_wmb();
5083
5084                 tpr->rx_std_prod_idx = std_prod_idx & tp->rx_std_ring_mask;
5085                 tpr->rx_jmb_prod_idx = jmb_prod_idx & tp->rx_jmb_ring_mask;
5086
5087                 if (tnapi != &tp->napi[1])
5088                         napi_schedule(&tp->napi[1].napi);
5089         }
5090
5091         return received;
5092 }
5093
5094 static void tg3_poll_link(struct tg3 *tp)
5095 {
5096         /* handle link change and other phy events */
5097         if (!(tg3_flag(tp, USE_LINKCHG_REG) || tg3_flag(tp, POLL_SERDES))) {
5098                 struct tg3_hw_status *sblk = tp->napi[0].hw_status;
5099
5100                 if (sblk->status & SD_STATUS_LINK_CHG) {
5101                         sblk->status = SD_STATUS_UPDATED |
5102                                        (sblk->status & ~SD_STATUS_LINK_CHG);
5103                         spin_lock(&tp->lock);
5104                         if (tg3_flag(tp, USE_PHYLIB)) {
5105                                 tw32_f(MAC_STATUS,
5106                                      (MAC_STATUS_SYNC_CHANGED |
5107                                       MAC_STATUS_CFG_CHANGED |
5108                                       MAC_STATUS_MI_COMPLETION |
5109                                       MAC_STATUS_LNKSTATE_CHANGED));
5110                                 udelay(40);
5111                         } else
5112                                 tg3_setup_phy(tp, 0);
5113                         spin_unlock(&tp->lock);
5114                 }
5115         }
5116 }
5117
5118 static int tg3_rx_prodring_xfer(struct tg3 *tp,
5119                                 struct tg3_rx_prodring_set *dpr,
5120                                 struct tg3_rx_prodring_set *spr)
5121 {
5122         u32 si, di, cpycnt, src_prod_idx;
5123         int i, err = 0;
5124
5125         while (1) {
5126                 src_prod_idx = spr->rx_std_prod_idx;
5127
5128                 /* Make sure updates to the rx_std_buffers[] entries and the
5129                  * standard producer index are seen in the correct order.
5130                  */
5131                 smp_rmb();
5132
5133                 if (spr->rx_std_cons_idx == src_prod_idx)
5134                         break;
5135
5136                 if (spr->rx_std_cons_idx < src_prod_idx)
5137                         cpycnt = src_prod_idx - spr->rx_std_cons_idx;
5138                 else
5139                         cpycnt = tp->rx_std_ring_mask + 1 -
5140                                  spr->rx_std_cons_idx;
5141
5142                 cpycnt = min(cpycnt,
5143                              tp->rx_std_ring_mask + 1 - dpr->rx_std_prod_idx);
5144
5145                 si = spr->rx_std_cons_idx;
5146                 di = dpr->rx_std_prod_idx;
5147
5148                 for (i = di; i < di + cpycnt; i++) {
5149                         if (dpr->rx_std_buffers[i].skb) {
5150                                 cpycnt = i - di;
5151                                 err = -ENOSPC;
5152                                 break;
5153                         }
5154                 }
5155
5156                 if (!cpycnt)
5157                         break;
5158
5159                 /* Ensure that updates to the rx_std_buffers ring and the
5160                  * shadowed hardware producer ring from tg3_recycle_skb() are
5161                  * ordered correctly WRT the skb check above.
5162                  */
5163                 smp_rmb();
5164
5165                 memcpy(&dpr->rx_std_buffers[di],
5166                        &spr->rx_std_buffers[si],
5167                        cpycnt * sizeof(struct ring_info));
5168
5169                 for (i = 0; i < cpycnt; i++, di++, si++) {
5170                         struct tg3_rx_buffer_desc *sbd, *dbd;
5171                         sbd = &spr->rx_std[si];
5172                         dbd = &dpr->rx_std[di];
5173                         dbd->addr_hi = sbd->addr_hi;
5174                         dbd->addr_lo = sbd->addr_lo;
5175                 }
5176
5177                 spr->rx_std_cons_idx = (spr->rx_std_cons_idx + cpycnt) &
5178                                        tp->rx_std_ring_mask;
5179                 dpr->rx_std_prod_idx = (dpr->rx_std_prod_idx + cpycnt) &
5180                                        tp->rx_std_ring_mask;
5181         }
5182
5183         while (1) {
5184                 src_prod_idx = spr->rx_jmb_prod_idx;
5185
5186                 /* Make sure updates to the rx_jmb_buffers[] entries and
5187                  * the jumbo producer index are seen in the correct order.
5188                  */
5189                 smp_rmb();
5190
5191                 if (spr->rx_jmb_cons_idx == src_prod_idx)
5192                         break;
5193
5194                 if (spr->rx_jmb_cons_idx < src_prod_idx)
5195                         cpycnt = src_prod_idx - spr->rx_jmb_cons_idx;
5196                 else
5197                         cpycnt = tp->rx_jmb_ring_mask + 1 -
5198                                  spr->rx_jmb_cons_idx;
5199
5200                 cpycnt = min(cpycnt,
5201                              tp->rx_jmb_ring_mask + 1 - dpr->rx_jmb_prod_idx);
5202
5203                 si = spr->rx_jmb_cons_idx;
5204                 di = dpr->rx_jmb_prod_idx;
5205
5206                 for (i = di; i < di + cpycnt; i++) {
5207                         if (dpr->rx_jmb_buffers[i].skb) {
5208                                 cpycnt = i - di;
5209                                 err = -ENOSPC;
5210                                 break;
5211                         }
5212                 }
5213
5214                 if (!cpycnt)
5215                         break;
5216
5217                 /* Ensure that updates to the rx_jmb_buffers ring and the
5218                  * shadowed hardware producer ring from tg3_recycle_skb() are
5219                  * ordered correctly WRT the skb check above.
5220                  */
5221                 smp_rmb();
5222
5223                 memcpy(&dpr->rx_jmb_buffers[di],
5224                        &spr->rx_jmb_buffers[si],
5225                        cpycnt * sizeof(struct ring_info));
5226
5227                 for (i = 0; i < cpycnt; i++, di++, si++) {
5228                         struct tg3_rx_buffer_desc *sbd, *dbd;
5229                         sbd = &spr->rx_jmb[si].std;
5230                         dbd = &dpr->rx_jmb[di].std;
5231                         dbd->addr_hi = sbd->addr_hi;
5232                         dbd->addr_lo = sbd->addr_lo;
5233                 }
5234
5235                 spr->rx_jmb_cons_idx = (spr->rx_jmb_cons_idx + cpycnt) &
5236                                        tp->rx_jmb_ring_mask;
5237                 dpr->rx_jmb_prod_idx = (dpr->rx_jmb_prod_idx + cpycnt) &
5238                                        tp->rx_jmb_ring_mask;
5239         }
5240
5241         return err;
5242 }
5243
5244 static int tg3_poll_work(struct tg3_napi *tnapi, int work_done, int budget)
5245 {
5246         struct tg3 *tp = tnapi->tp;
5247
5248         /* run TX completion thread */
5249         if (tnapi->hw_status->idx[0].tx_consumer != tnapi->tx_cons) {
5250                 tg3_tx(tnapi);
5251                 if (unlikely(tg3_flag(tp, TX_RECOVERY_PENDING)))
5252                         return work_done;
5253         }
5254
5255         /* run RX thread, within the bounds set by NAPI.
5256          * All RX "locking" is done by ensuring outside
5257          * code synchronizes with tg3->napi.poll()
5258          */
5259         if (*(tnapi->rx_rcb_prod_idx) != tnapi->rx_rcb_ptr)
5260                 work_done += tg3_rx(tnapi, budget - work_done);
5261
5262         if (tg3_flag(tp, ENABLE_RSS) && tnapi == &tp->napi[1]) {
5263                 struct tg3_rx_prodring_set *dpr = &tp->napi[0].prodring;
5264                 int i, err = 0;
5265                 u32 std_prod_idx = dpr->rx_std_prod_idx;
5266                 u32 jmb_prod_idx = dpr->rx_jmb_prod_idx;
5267
5268                 for (i = 1; i < tp->irq_cnt; i++)
5269                         err |= tg3_rx_prodring_xfer(tp, dpr,
5270                                                     &tp->napi[i].prodring);
5271
5272                 wmb();
5273
5274                 if (std_prod_idx != dpr->rx_std_prod_idx)
5275                         tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG,
5276                                      dpr->rx_std_prod_idx);
5277
5278                 if (jmb_prod_idx != dpr->rx_jmb_prod_idx)
5279                         tw32_rx_mbox(TG3_RX_JMB_PROD_IDX_REG,
5280                                      dpr->rx_jmb_prod_idx);
5281
5282                 mmiowb();
5283
5284                 if (err)
5285                         tw32_f(HOSTCC_MODE, tp->coal_now);
5286         }
5287
5288         return work_done;
5289 }
5290
5291 static int tg3_poll_msix(struct napi_struct *napi, int budget)
5292 {
5293         struct tg3_napi *tnapi = container_of(napi, struct tg3_napi, napi);
5294         struct tg3 *tp = tnapi->tp;
5295         int work_done = 0;
5296         struct tg3_hw_status *sblk = tnapi->hw_status;
5297
5298         while (1) {
5299                 work_done = tg3_poll_work(tnapi, work_done, budget);
5300
5301                 if (unlikely(tg3_flag(tp, TX_RECOVERY_PENDING)))
5302                         goto tx_recovery;
5303
5304                 if (unlikely(work_done >= budget))
5305                         break;
5306
5307                 /* tp->last_tag is used in tg3_int_reenable() below
5308                  * to tell the hw how much work has been processed,
5309                  * so we must read it before checking for more work.
5310                  */
5311                 tnapi->last_tag = sblk->status_tag;
5312                 tnapi->last_irq_tag = tnapi->last_tag;
5313                 rmb();
5314
5315                 /* check for RX/TX work to do */
5316                 if (likely(sblk->idx[0].tx_consumer == tnapi->tx_cons &&
5317                            *(tnapi->rx_rcb_prod_idx) == tnapi->rx_rcb_ptr)) {
5318                         napi_complete(napi);
5319                         /* Reenable interrupts. */
5320                         tw32_mailbox(tnapi->int_mbox, tnapi->last_tag << 24);
5321                         mmiowb();
5322                         break;
5323                 }
5324         }
5325
5326         return work_done;
5327
5328 tx_recovery:
5329         /* work_done is guaranteed to be less than budget. */
5330         napi_complete(napi);
5331         schedule_work(&tp->reset_task);
5332         return work_done;
5333 }
5334
5335 static void tg3_process_error(struct tg3 *tp)
5336 {
5337         u32 val;
5338         bool real_error = false;
5339
5340         if (tg3_flag(tp, ERROR_PROCESSED))
5341                 return;
5342
5343         /* Check Flow Attention register */
5344         val = tr32(HOSTCC_FLOW_ATTN);
5345         if (val & ~HOSTCC_FLOW_ATTN_MBUF_LWM) {
5346                 netdev_err(tp->dev, "FLOW Attention error.  Resetting chip.\n");
5347                 real_error = true;
5348         }
5349
5350         if (tr32(MSGINT_STATUS) & ~MSGINT_STATUS_MSI_REQ) {
5351                 netdev_err(tp->dev, "MSI Status error.  Resetting chip.\n");
5352                 real_error = true;
5353         }
5354
5355         if (tr32(RDMAC_STATUS) || tr32(WDMAC_STATUS)) {
5356                 netdev_err(tp->dev, "DMA Status error.  Resetting chip.\n");
5357                 real_error = true;
5358         }
5359
5360         if (!real_error)
5361                 return;
5362
5363         tg3_dump_state(tp);
5364
5365         tg3_flag_set(tp, ERROR_PROCESSED);
5366         schedule_work(&tp->reset_task);
5367 }
5368
5369 static int tg3_poll(struct napi_struct *napi, int budget)
5370 {
5371         struct tg3_napi *tnapi = container_of(napi, struct tg3_napi, napi);
5372         struct tg3 *tp = tnapi->tp;
5373         int work_done = 0;
5374         struct tg3_hw_status *sblk = tnapi->hw_status;
5375
5376         while (1) {
5377                 if (sblk->status & SD_STATUS_ERROR)
5378                         tg3_process_error(tp);
5379
5380                 tg3_poll_link(tp);
5381
5382                 work_done = tg3_poll_work(tnapi, work_done, budget);
5383
5384                 if (unlikely(tg3_flag(tp, TX_RECOVERY_PENDING)))
5385                         goto tx_recovery;
5386
5387                 if (unlikely(work_done >= budget))
5388                         break;
5389
5390                 if (tg3_flag(tp, TAGGED_STATUS)) {
5391                         /* tp->last_tag is used in tg3_int_reenable() below
5392                          * to tell the hw how much work has been processed,
5393                          * so we must read it before checking for more work.
5394                          */
5395                         tnapi->last_tag = sblk->status_tag;
5396                         tnapi->last_irq_tag = tnapi->last_tag;
5397                         rmb();
5398                 } else
5399                         sblk->status &= ~SD_STATUS_UPDATED;
5400
5401                 if (likely(!tg3_has_work(tnapi))) {
5402                         napi_complete(napi);
5403                         tg3_int_reenable(tnapi);
5404                         break;
5405                 }
5406         }
5407
5408         return work_done;
5409
5410 tx_recovery:
5411         /* work_done is guaranteed to be less than budget. */
5412         napi_complete(napi);
5413         schedule_work(&tp->reset_task);
5414         return work_done;
5415 }
5416
5417 static void tg3_napi_disable(struct tg3 *tp)
5418 {
5419         int i;
5420
5421         for (i = tp->irq_cnt - 1; i >= 0; i--)
5422                 napi_disable(&tp->napi[i].napi);
5423 }
5424
5425 static void tg3_napi_enable(struct tg3 *tp)
5426 {
5427         int i;
5428
5429         for (i = 0; i < tp->irq_cnt; i++)
5430                 napi_enable(&tp->napi[i].napi);
5431 }
5432
5433 static void tg3_napi_init(struct tg3 *tp)
5434 {
5435         int i;
5436
5437         netif_napi_add(tp->dev, &tp->napi[0].napi, tg3_poll, 64);
5438         for (i = 1; i < tp->irq_cnt; i++)
5439                 netif_napi_add(tp->dev, &tp->napi[i].napi, tg3_poll_msix, 64);
5440 }
5441
5442 static void tg3_napi_fini(struct tg3 *tp)
5443 {
5444         int i;
5445
5446         for (i = 0; i < tp->irq_cnt; i++)
5447                 netif_napi_del(&tp->napi[i].napi);
5448 }
5449
5450 static inline void tg3_netif_stop(struct tg3 *tp)
5451 {
5452         tp->dev->trans_start = jiffies; /* prevent tx timeout */
5453         tg3_napi_disable(tp);
5454         netif_tx_disable(tp->dev);
5455 }
5456
5457 static inline void tg3_netif_start(struct tg3 *tp)
5458 {
5459         /* NOTE: unconditional netif_tx_wake_all_queues is only
5460          * appropriate so long as all callers are assured to
5461          * have free tx slots (such as after tg3_init_hw)
5462          */
5463         netif_tx_wake_all_queues(tp->dev);
5464
5465         tg3_napi_enable(tp);
5466         tp->napi[0].hw_status->status |= SD_STATUS_UPDATED;
5467         tg3_enable_ints(tp);
5468 }
5469
5470 static void tg3_irq_quiesce(struct tg3 *tp)
5471 {
5472         int i;
5473
5474         BUG_ON(tp->irq_sync);
5475
5476         tp->irq_sync = 1;
5477         smp_mb();
5478
5479         for (i = 0; i < tp->irq_cnt; i++)
5480                 synchronize_irq(tp->napi[i].irq_vec);
5481 }
5482
5483 /* Fully shutdown all tg3 driver activity elsewhere in the system.
5484  * If irq_sync is non-zero, then the IRQ handler must be synchronized
5485  * with as well.  Most of the time, this is not necessary except when
5486  * shutting down the device.
5487  */
5488 static inline void tg3_full_lock(struct tg3 *tp, int irq_sync)
5489 {
5490         spin_lock_bh(&tp->lock);
5491         if (irq_sync)
5492                 tg3_irq_quiesce(tp);
5493 }
5494
5495 static inline void tg3_full_unlock(struct tg3 *tp)
5496 {
5497         spin_unlock_bh(&tp->lock);
5498 }
5499
5500 /* One-shot MSI handler - Chip automatically disables interrupt
5501  * after sending MSI so driver doesn't have to do it.
5502  */
5503 static irqreturn_t tg3_msi_1shot(int irq, void *dev_id)
5504 {
5505         struct tg3_napi *tnapi = dev_id;
5506         struct tg3 *tp = tnapi->tp;
5507
5508         prefetch(tnapi->hw_status);
5509         if (tnapi->rx_rcb)
5510                 prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
5511
5512         if (likely(!tg3_irq_sync(tp)))
5513                 napi_schedule(&tnapi->napi);
5514
5515         return IRQ_HANDLED;
5516 }
5517
5518 /* MSI ISR - No need to check for interrupt sharing and no need to
5519  * flush status block and interrupt mailbox. PCI ordering rules
5520  * guarantee that MSI will arrive after the status block.
5521  */
5522 static irqreturn_t tg3_msi(int irq, void *dev_id)
5523 {
5524         struct tg3_napi *tnapi = dev_id;
5525         struct tg3 *tp = tnapi->tp;
5526
5527         prefetch(tnapi->hw_status);
5528         if (tnapi->rx_rcb)
5529                 prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
5530         /*
5531          * Writing any value to intr-mbox-0 clears PCI INTA# and
5532          * chip-internal interrupt pending events.
5533          * Writing non-zero to intr-mbox-0 additional tells the
5534          * NIC to stop sending us irqs, engaging "in-intr-handler"
5535          * event coalescing.
5536          */
5537         tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
5538         if (likely(!tg3_irq_sync(tp)))
5539                 napi_schedule(&tnapi->napi);
5540
5541         return IRQ_RETVAL(1);
5542 }
5543
5544 static irqreturn_t tg3_interrupt(int irq, void *dev_id)
5545 {
5546         struct tg3_napi *tnapi = dev_id;
5547         struct tg3 *tp = tnapi->tp;
5548         struct tg3_hw_status *sblk = tnapi->hw_status;
5549         unsigned int handled = 1;
5550
5551         /* In INTx mode, it is possible for the interrupt to arrive at
5552          * the CPU before the status block posted prior to the interrupt.
5553          * Reading the PCI State register will confirm whether the
5554          * interrupt is ours and will flush the status block.
5555          */
5556         if (unlikely(!(sblk->status & SD_STATUS_UPDATED))) {
5557                 if (tg3_flag(tp, CHIP_RESETTING) ||
5558                     (tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
5559                         handled = 0;
5560                         goto out;
5561                 }
5562         }
5563
5564         /*
5565          * Writing any value to intr-mbox-0 clears PCI INTA# and
5566          * chip-internal interrupt pending events.
5567          * Writing non-zero to intr-mbox-0 additional tells the
5568          * NIC to stop sending us irqs, engaging "in-intr-handler"
5569          * event coalescing.
5570          *
5571          * Flush the mailbox to de-assert the IRQ immediately to prevent
5572          * spurious interrupts.  The flush impacts performance but
5573          * excessive spurious interrupts can be worse in some cases.
5574          */
5575         tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
5576         if (tg3_irq_sync(tp))
5577                 goto out;
5578         sblk->status &= ~SD_STATUS_UPDATED;
5579         if (likely(tg3_has_work(tnapi))) {
5580                 prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
5581                 napi_schedule(&tnapi->napi);
5582         } else {
5583                 /* No work, shared interrupt perhaps?  re-enable
5584                  * interrupts, and flush that PCI write
5585                  */
5586                 tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
5587                                0x00000000);
5588         }
5589 out:
5590         return IRQ_RETVAL(handled);
5591 }
5592
5593 static irqreturn_t tg3_interrupt_tagged(int irq, void *dev_id)
5594 {
5595         struct tg3_napi *tnapi = dev_id;
5596         struct tg3 *tp = tnapi->tp;
5597         struct tg3_hw_status *sblk = tnapi->hw_status;
5598         unsigned int handled = 1;
5599
5600         /* In INTx mode, it is possible for the interrupt to arrive at
5601          * the CPU before the status block posted prior to the interrupt.
5602          * Reading the PCI State register will confirm whether the
5603          * interrupt is ours and will flush the status block.
5604          */
5605         if (unlikely(sblk->status_tag == tnapi->last_irq_tag)) {
5606                 if (tg3_flag(tp, CHIP_RESETTING) ||
5607                     (tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
5608                         handled = 0;
5609                         goto out;
5610                 }
5611         }
5612
5613         /*
5614          * writing any value to intr-mbox-0 clears PCI INTA# and
5615          * chip-internal interrupt pending events.
5616          * writing non-zero to intr-mbox-0 additional tells the
5617          * NIC to stop sending us irqs, engaging "in-intr-handler"
5618          * event coalescing.
5619          *
5620          * Flush the mailbox to de-assert the IRQ immediately to prevent
5621          * spurious interrupts.  The flush impacts performance but
5622          * excessive spurious interrupts can be worse in some cases.
5623          */
5624         tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
5625
5626         /*
5627          * In a shared interrupt configuration, sometimes other devices'
5628          * interrupts will scream.  We record the current status tag here
5629          * so that the above check can report that the screaming interrupts
5630          * are unhandled.  Eventually they will be silenced.
5631          */
5632         tnapi->last_irq_tag = sblk->status_tag;
5633
5634         if (tg3_irq_sync(tp))
5635                 goto out;
5636
5637         prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
5638
5639         napi_schedule(&tnapi->napi);
5640
5641 out:
5642         return IRQ_RETVAL(handled);
5643 }
5644
5645 /* ISR for interrupt test */
5646 static irqreturn_t tg3_test_isr(int irq, void *dev_id)
5647 {
5648         struct tg3_napi *tnapi = dev_id;
5649         struct tg3 *tp = tnapi->tp;
5650         struct tg3_hw_status *sblk = tnapi->hw_status;
5651
5652         if ((sblk->status & SD_STATUS_UPDATED) ||
5653             !(tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
5654                 tg3_disable_ints(tp);
5655                 return IRQ_RETVAL(1);
5656         }
5657         return IRQ_RETVAL(0);
5658 }
5659
5660 static int tg3_init_hw(struct tg3 *, int);
5661 static int tg3_halt(struct tg3 *, int, int);
5662
5663 /* Restart hardware after configuration changes, self-test, etc.
5664  * Invoked with tp->lock held.
5665  */
5666 static int tg3_restart_hw(struct tg3 *tp, int reset_phy)
5667         __releases(tp->lock)
5668         __acquires(tp->lock)
5669 {
5670         int err;
5671
5672         err = tg3_init_hw(tp, reset_phy);
5673         if (err) {
5674                 netdev_err(tp->dev,
5675                            "Failed to re-initialize device, aborting\n");
5676                 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
5677                 tg3_full_unlock(tp);
5678                 del_timer_sync(&tp->timer);
5679                 tp->irq_sync = 0;
5680                 tg3_napi_enable(tp);
5681                 dev_close(tp->dev);
5682                 tg3_full_lock(tp, 0);
5683         }
5684         return err;
5685 }
5686
5687 #ifdef CONFIG_NET_POLL_CONTROLLER
5688 static void tg3_poll_controller(struct net_device *dev)
5689 {
5690         int i;
5691         struct tg3 *tp = netdev_priv(dev);
5692
5693         for (i = 0; i < tp->irq_cnt; i++)
5694                 tg3_interrupt(tp->napi[i].irq_vec, &tp->napi[i]);
5695 }
5696 #endif
5697
5698 static void tg3_reset_task(struct work_struct *work)
5699 {
5700         struct tg3 *tp = container_of(work, struct tg3, reset_task);
5701         int err;
5702         unsigned int restart_timer;
5703
5704         tg3_full_lock(tp, 0);
5705
5706         if (!netif_running(tp->dev)) {
5707                 tg3_full_unlock(tp);
5708                 return;
5709         }
5710
5711         tg3_full_unlock(tp);
5712
5713         tg3_phy_stop(tp);
5714
5715         tg3_netif_stop(tp);
5716
5717         tg3_full_lock(tp, 1);
5718
5719         restart_timer = tg3_flag(tp, RESTART_TIMER);
5720         tg3_flag_clear(tp, RESTART_TIMER);
5721
5722         if (tg3_flag(tp, TX_RECOVERY_PENDING)) {
5723                 tp->write32_tx_mbox = tg3_write32_tx_mbox;
5724                 tp->write32_rx_mbox = tg3_write_flush_reg32;
5725                 tg3_flag_set(tp, MBOX_WRITE_REORDER);
5726                 tg3_flag_clear(tp, TX_RECOVERY_PENDING);
5727         }
5728
5729         tg3_halt(tp, RESET_KIND_SHUTDOWN, 0);
5730         err = tg3_init_hw(tp, 1);
5731         if (err)
5732                 goto out;
5733
5734         tg3_netif_start(tp);
5735
5736         if (restart_timer)
5737                 mod_timer(&tp->timer, jiffies + 1);
5738
5739 out:
5740         tg3_full_unlock(tp);
5741
5742         if (!err)
5743                 tg3_phy_start(tp);
5744 }
5745
5746 static void tg3_tx_timeout(struct net_device *dev)
5747 {
5748         struct tg3 *tp = netdev_priv(dev);
5749
5750         if (netif_msg_tx_err(tp)) {
5751                 netdev_err(dev, "transmit timed out, resetting\n");
5752                 tg3_dump_state(tp);
5753         }
5754
5755         schedule_work(&tp->reset_task);
5756 }
5757
5758 /* Test for DMA buffers crossing any 4GB boundaries: 4G, 8G, etc */
5759 static inline int tg3_4g_overflow_test(dma_addr_t mapping, int len)
5760 {
5761         u32 base = (u32) mapping & 0xffffffff;
5762
5763         return (base > 0xffffdcc0) && (base + len + 8 < base);
5764 }
5765
5766 /* Test for DMA addresses > 40-bit */
5767 static inline int tg3_40bit_overflow_test(struct tg3 *tp, dma_addr_t mapping,
5768                                           int len)
5769 {
5770 #if defined(CONFIG_HIGHMEM) && (BITS_PER_LONG == 64)
5771         if (tg3_flag(tp, 40BIT_DMA_BUG))
5772                 return ((u64) mapping + len) > DMA_BIT_MASK(40);
5773         return 0;
5774 #else
5775         return 0;
5776 #endif
5777 }
5778
5779 static void tg3_set_txd(struct tg3_napi *tnapi, int entry,
5780                         dma_addr_t mapping, int len, u32 flags,
5781                         u32 mss_and_is_end)
5782 {
5783         struct tg3_tx_buffer_desc *txd = &tnapi->tx_ring[entry];
5784         int is_end = (mss_and_is_end & 0x1);
5785         u32 mss = (mss_and_is_end >> 1);
5786         u32 vlan_tag = 0;
5787
5788         if (is_end)
5789                 flags |= TXD_FLAG_END;
5790         if (flags & TXD_FLAG_VLAN) {
5791                 vlan_tag = flags >> 16;
5792                 flags &= 0xffff;
5793         }
5794         vlan_tag |= (mss << TXD_MSS_SHIFT);
5795
5796         txd->addr_hi = ((u64) mapping >> 32);
5797         txd->addr_lo = ((u64) mapping & 0xffffffff);
5798         txd->len_flags = (len << TXD_LEN_SHIFT) | flags;
5799         txd->vlan_tag = vlan_tag << TXD_VLAN_TAG_SHIFT;
5800 }
5801
5802 static void tg3_skb_error_unmap(struct tg3_napi *tnapi,
5803                                 struct sk_buff *skb, int last)
5804 {
5805         int i;
5806         u32 entry = tnapi->tx_prod;
5807         struct ring_info *txb = &tnapi->tx_buffers[entry];
5808
5809         pci_unmap_single(tnapi->tp->pdev,
5810                          dma_unmap_addr(txb, mapping),
5811                          skb_headlen(skb),
5812                          PCI_DMA_TODEVICE);
5813         for (i = 0; i < last; i++) {
5814                 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
5815
5816                 entry = NEXT_TX(entry);
5817                 txb = &tnapi->tx_buffers[entry];
5818
5819                 pci_unmap_page(tnapi->tp->pdev,
5820                                dma_unmap_addr(txb, mapping),
5821                                frag->size, PCI_DMA_TODEVICE);
5822         }
5823 }
5824
5825 /* Workaround 4GB and 40-bit hardware DMA bugs. */
5826 static int tigon3_dma_hwbug_workaround(struct tg3_napi *tnapi,
5827                                        struct sk_buff *skb,
5828                                        u32 base_flags, u32 mss)
5829 {
5830         struct tg3 *tp = tnapi->tp;
5831         struct sk_buff *new_skb;
5832         dma_addr_t new_addr = 0;
5833         u32 entry = tnapi->tx_prod;
5834         int ret = 0;
5835
5836         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701)
5837                 new_skb = skb_copy(skb, GFP_ATOMIC);
5838         else {
5839                 int more_headroom = 4 - ((unsigned long)skb->data & 3);
5840
5841                 new_skb = skb_copy_expand(skb,
5842                                           skb_headroom(skb) + more_headroom,
5843                                           skb_tailroom(skb), GFP_ATOMIC);
5844         }
5845
5846         if (!new_skb) {
5847                 ret = -1;
5848         } else {
5849                 /* New SKB is guaranteed to be linear. */
5850                 new_addr = pci_map_single(tp->pdev, new_skb->data, new_skb->len,
5851                                           PCI_DMA_TODEVICE);
5852                 /* Make sure the mapping succeeded */
5853                 if (pci_dma_mapping_error(tp->pdev, new_addr)) {
5854                         ret = -1;
5855                         dev_kfree_skb(new_skb);
5856
5857                 /* Make sure new skb does not cross any 4G boundaries.
5858                  * Drop the packet if it does.
5859                  */
5860                 } else if (tg3_4g_overflow_test(new_addr, new_skb->len)) {
5861               &nbs