tg3: Determine PCI function number in one place
[linux-2.6.git] / drivers / net / tg3.c
1 /*
2  * tg3.c: Broadcom Tigon3 ethernet driver.
3  *
4  * Copyright (C) 2001, 2002, 2003, 2004 David S. Miller (davem@redhat.com)
5  * Copyright (C) 2001, 2002, 2003 Jeff Garzik (jgarzik@pobox.com)
6  * Copyright (C) 2004 Sun Microsystems Inc.
7  * Copyright (C) 2005-2011 Broadcom Corporation.
8  *
9  * Firmware is:
10  *      Derived from proprietary unpublished source code,
11  *      Copyright (C) 2000-2003 Broadcom Corporation.
12  *
13  *      Permission is hereby granted for the distribution of this firmware
14  *      data in hexadecimal or equivalent format, provided this copyright
15  *      notice is accompanying it.
16  */
17
18
19 #include <linux/module.h>
20 #include <linux/moduleparam.h>
21 #include <linux/stringify.h>
22 #include <linux/kernel.h>
23 #include <linux/types.h>
24 #include <linux/compiler.h>
25 #include <linux/slab.h>
26 #include <linux/delay.h>
27 #include <linux/in.h>
28 #include <linux/init.h>
29 #include <linux/interrupt.h>
30 #include <linux/ioport.h>
31 #include <linux/pci.h>
32 #include <linux/netdevice.h>
33 #include <linux/etherdevice.h>
34 #include <linux/skbuff.h>
35 #include <linux/ethtool.h>
36 #include <linux/mdio.h>
37 #include <linux/mii.h>
38 #include <linux/phy.h>
39 #include <linux/brcmphy.h>
40 #include <linux/if_vlan.h>
41 #include <linux/ip.h>
42 #include <linux/tcp.h>
43 #include <linux/workqueue.h>
44 #include <linux/prefetch.h>
45 #include <linux/dma-mapping.h>
46 #include <linux/firmware.h>
47
48 #include <net/checksum.h>
49 #include <net/ip.h>
50
51 #include <asm/system.h>
52 #include <linux/io.h>
53 #include <asm/byteorder.h>
54 #include <linux/uaccess.h>
55
56 #ifdef CONFIG_SPARC
57 #include <asm/idprom.h>
58 #include <asm/prom.h>
59 #endif
60
61 #define BAR_0   0
62 #define BAR_2   2
63
64 #include "tg3.h"
65
66 /* Functions & macros to verify TG3_FLAGS types */
67
68 static inline int _tg3_flag(enum TG3_FLAGS flag, unsigned long *bits)
69 {
70         return test_bit(flag, bits);
71 }
72
73 static inline void _tg3_flag_set(enum TG3_FLAGS flag, unsigned long *bits)
74 {
75         set_bit(flag, bits);
76 }
77
78 static inline void _tg3_flag_clear(enum TG3_FLAGS flag, unsigned long *bits)
79 {
80         clear_bit(flag, bits);
81 }
82
83 #define tg3_flag(tp, flag)                              \
84         _tg3_flag(TG3_FLAG_##flag, (tp)->tg3_flags)
85 #define tg3_flag_set(tp, flag)                          \
86         _tg3_flag_set(TG3_FLAG_##flag, (tp)->tg3_flags)
87 #define tg3_flag_clear(tp, flag)                        \
88         _tg3_flag_clear(TG3_FLAG_##flag, (tp)->tg3_flags)
89
90 #define DRV_MODULE_NAME         "tg3"
91 #define TG3_MAJ_NUM                     3
92 #define TG3_MIN_NUM                     119
93 #define DRV_MODULE_VERSION      \
94         __stringify(TG3_MAJ_NUM) "." __stringify(TG3_MIN_NUM)
95 #define DRV_MODULE_RELDATE      "May 18, 2011"
96
97 #define TG3_DEF_MAC_MODE        0
98 #define TG3_DEF_RX_MODE         0
99 #define TG3_DEF_TX_MODE         0
100 #define TG3_DEF_MSG_ENABLE        \
101         (NETIF_MSG_DRV          | \
102          NETIF_MSG_PROBE        | \
103          NETIF_MSG_LINK         | \
104          NETIF_MSG_TIMER        | \
105          NETIF_MSG_IFDOWN       | \
106          NETIF_MSG_IFUP         | \
107          NETIF_MSG_RX_ERR       | \
108          NETIF_MSG_TX_ERR)
109
110 #define TG3_GRC_LCLCTL_PWRSW_DELAY      100
111
112 /* length of time before we decide the hardware is borked,
113  * and dev->tx_timeout() should be called to fix the problem
114  */
115
116 #define TG3_TX_TIMEOUT                  (5 * HZ)
117
118 /* hardware minimum and maximum for a single frame's data payload */
119 #define TG3_MIN_MTU                     60
120 #define TG3_MAX_MTU(tp) \
121         (tg3_flag(tp, JUMBO_CAPABLE) ? 9000 : 1500)
122
123 /* These numbers seem to be hard coded in the NIC firmware somehow.
124  * You can't change the ring sizes, but you can change where you place
125  * them in the NIC onboard memory.
126  */
127 #define TG3_RX_STD_RING_SIZE(tp) \
128         (tg3_flag(tp, LRG_PROD_RING_CAP) ? \
129          TG3_RX_STD_MAX_SIZE_5717 : TG3_RX_STD_MAX_SIZE_5700)
130 #define TG3_DEF_RX_RING_PENDING         200
131 #define TG3_RX_JMB_RING_SIZE(tp) \
132         (tg3_flag(tp, LRG_PROD_RING_CAP) ? \
133          TG3_RX_JMB_MAX_SIZE_5717 : TG3_RX_JMB_MAX_SIZE_5700)
134 #define TG3_DEF_RX_JUMBO_RING_PENDING   100
135 #define TG3_RSS_INDIR_TBL_SIZE          128
136
137 /* Do not place this n-ring entries value into the tp struct itself,
138  * we really want to expose these constants to GCC so that modulo et
139  * al.  operations are done with shifts and masks instead of with
140  * hw multiply/modulo instructions.  Another solution would be to
141  * replace things like '% foo' with '& (foo - 1)'.
142  */
143
144 #define TG3_TX_RING_SIZE                512
145 #define TG3_DEF_TX_RING_PENDING         (TG3_TX_RING_SIZE - 1)
146
147 #define TG3_RX_STD_RING_BYTES(tp) \
148         (sizeof(struct tg3_rx_buffer_desc) * TG3_RX_STD_RING_SIZE(tp))
149 #define TG3_RX_JMB_RING_BYTES(tp) \
150         (sizeof(struct tg3_ext_rx_buffer_desc) * TG3_RX_JMB_RING_SIZE(tp))
151 #define TG3_RX_RCB_RING_BYTES(tp) \
152         (sizeof(struct tg3_rx_buffer_desc) * (tp->rx_ret_ring_mask + 1))
153 #define TG3_TX_RING_BYTES       (sizeof(struct tg3_tx_buffer_desc) * \
154                                  TG3_TX_RING_SIZE)
155 #define NEXT_TX(N)              (((N) + 1) & (TG3_TX_RING_SIZE - 1))
156
157 #define TG3_DMA_BYTE_ENAB               64
158
159 #define TG3_RX_STD_DMA_SZ               1536
160 #define TG3_RX_JMB_DMA_SZ               9046
161
162 #define TG3_RX_DMA_TO_MAP_SZ(x)         ((x) + TG3_DMA_BYTE_ENAB)
163
164 #define TG3_RX_STD_MAP_SZ               TG3_RX_DMA_TO_MAP_SZ(TG3_RX_STD_DMA_SZ)
165 #define TG3_RX_JMB_MAP_SZ               TG3_RX_DMA_TO_MAP_SZ(TG3_RX_JMB_DMA_SZ)
166
167 #define TG3_RX_STD_BUFF_RING_SIZE(tp) \
168         (sizeof(struct ring_info) * TG3_RX_STD_RING_SIZE(tp))
169
170 #define TG3_RX_JMB_BUFF_RING_SIZE(tp) \
171         (sizeof(struct ring_info) * TG3_RX_JMB_RING_SIZE(tp))
172
173 /* Due to a hardware bug, the 5701 can only DMA to memory addresses
174  * that are at least dword aligned when used in PCIX mode.  The driver
175  * works around this bug by double copying the packet.  This workaround
176  * is built into the normal double copy length check for efficiency.
177  *
178  * However, the double copy is only necessary on those architectures
179  * where unaligned memory accesses are inefficient.  For those architectures
180  * where unaligned memory accesses incur little penalty, we can reintegrate
181  * the 5701 in the normal rx path.  Doing so saves a device structure
182  * dereference by hardcoding the double copy threshold in place.
183  */
184 #define TG3_RX_COPY_THRESHOLD           256
185 #if NET_IP_ALIGN == 0 || defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS)
186         #define TG3_RX_COPY_THRESH(tp)  TG3_RX_COPY_THRESHOLD
187 #else
188         #define TG3_RX_COPY_THRESH(tp)  ((tp)->rx_copy_thresh)
189 #endif
190
191 /* minimum number of free TX descriptors required to wake up TX process */
192 #define TG3_TX_WAKEUP_THRESH(tnapi)             ((tnapi)->tx_pending / 4)
193
194 #define TG3_RAW_IP_ALIGN 2
195
196 #define TG3_FW_UPDATE_TIMEOUT_SEC       5
197
198 #define FIRMWARE_TG3            "tigon/tg3.bin"
199 #define FIRMWARE_TG3TSO         "tigon/tg3_tso.bin"
200 #define FIRMWARE_TG3TSO5        "tigon/tg3_tso5.bin"
201
202 static char version[] __devinitdata =
203         DRV_MODULE_NAME ".c:v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")";
204
205 MODULE_AUTHOR("David S. Miller (davem@redhat.com) and Jeff Garzik (jgarzik@pobox.com)");
206 MODULE_DESCRIPTION("Broadcom Tigon3 ethernet driver");
207 MODULE_LICENSE("GPL");
208 MODULE_VERSION(DRV_MODULE_VERSION);
209 MODULE_FIRMWARE(FIRMWARE_TG3);
210 MODULE_FIRMWARE(FIRMWARE_TG3TSO);
211 MODULE_FIRMWARE(FIRMWARE_TG3TSO5);
212
213 static int tg3_debug = -1;      /* -1 == use TG3_DEF_MSG_ENABLE as value */
214 module_param(tg3_debug, int, 0);
215 MODULE_PARM_DESC(tg3_debug, "Tigon3 bitmapped debugging message enable value");
216
217 static DEFINE_PCI_DEVICE_TABLE(tg3_pci_tbl) = {
218         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5700)},
219         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5701)},
220         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702)},
221         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703)},
222         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704)},
223         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702FE)},
224         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705)},
225         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705_2)},
226         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705M)},
227         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705M_2)},
228         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702X)},
229         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703X)},
230         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704S)},
231         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702A3)},
232         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703A3)},
233         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5782)},
234         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5788)},
235         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5789)},
236         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5901)},
237         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5901_2)},
238         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704S_2)},
239         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705F)},
240         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5721)},
241         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5722)},
242         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751)},
243         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751M)},
244         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751F)},
245         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5752)},
246         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5752M)},
247         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753)},
248         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753M)},
249         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753F)},
250         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5754)},
251         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5754M)},
252         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5755)},
253         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5755M)},
254         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5756)},
255         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5786)},
256         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787)},
257         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787M)},
258         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787F)},
259         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5714)},
260         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5714S)},
261         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5715)},
262         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5715S)},
263         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5780)},
264         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5780S)},
265         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5781)},
266         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5906)},
267         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5906M)},
268         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5784)},
269         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5764)},
270         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5723)},
271         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5761)},
272         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5761E)},
273         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5761S)},
274         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5761SE)},
275         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5785_G)},
276         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5785_F)},
277         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57780)},
278         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57760)},
279         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57790)},
280         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57788)},
281         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5717)},
282         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5718)},
283         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57781)},
284         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57785)},
285         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57761)},
286         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57765)},
287         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57791)},
288         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57795)},
289         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5719)},
290         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5720)},
291         {PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9DXX)},
292         {PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9MXX)},
293         {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1000)},
294         {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1001)},
295         {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1003)},
296         {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC9100)},
297         {PCI_DEVICE(PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_TIGON3)},
298         {PCI_DEVICE(0x10cf, 0x11a2)}, /* Fujitsu 1000base-SX with BCM5703SKHB */
299         {}
300 };
301
302 MODULE_DEVICE_TABLE(pci, tg3_pci_tbl);
303
304 static const struct {
305         const char string[ETH_GSTRING_LEN];
306 } ethtool_stats_keys[] = {
307         { "rx_octets" },
308         { "rx_fragments" },
309         { "rx_ucast_packets" },
310         { "rx_mcast_packets" },
311         { "rx_bcast_packets" },
312         { "rx_fcs_errors" },
313         { "rx_align_errors" },
314         { "rx_xon_pause_rcvd" },
315         { "rx_xoff_pause_rcvd" },
316         { "rx_mac_ctrl_rcvd" },
317         { "rx_xoff_entered" },
318         { "rx_frame_too_long_errors" },
319         { "rx_jabbers" },
320         { "rx_undersize_packets" },
321         { "rx_in_length_errors" },
322         { "rx_out_length_errors" },
323         { "rx_64_or_less_octet_packets" },
324         { "rx_65_to_127_octet_packets" },
325         { "rx_128_to_255_octet_packets" },
326         { "rx_256_to_511_octet_packets" },
327         { "rx_512_to_1023_octet_packets" },
328         { "rx_1024_to_1522_octet_packets" },
329         { "rx_1523_to_2047_octet_packets" },
330         { "rx_2048_to_4095_octet_packets" },
331         { "rx_4096_to_8191_octet_packets" },
332         { "rx_8192_to_9022_octet_packets" },
333
334         { "tx_octets" },
335         { "tx_collisions" },
336
337         { "tx_xon_sent" },
338         { "tx_xoff_sent" },
339         { "tx_flow_control" },
340         { "tx_mac_errors" },
341         { "tx_single_collisions" },
342         { "tx_mult_collisions" },
343         { "tx_deferred" },
344         { "tx_excessive_collisions" },
345         { "tx_late_collisions" },
346         { "tx_collide_2times" },
347         { "tx_collide_3times" },
348         { "tx_collide_4times" },
349         { "tx_collide_5times" },
350         { "tx_collide_6times" },
351         { "tx_collide_7times" },
352         { "tx_collide_8times" },
353         { "tx_collide_9times" },
354         { "tx_collide_10times" },
355         { "tx_collide_11times" },
356         { "tx_collide_12times" },
357         { "tx_collide_13times" },
358         { "tx_collide_14times" },
359         { "tx_collide_15times" },
360         { "tx_ucast_packets" },
361         { "tx_mcast_packets" },
362         { "tx_bcast_packets" },
363         { "tx_carrier_sense_errors" },
364         { "tx_discards" },
365         { "tx_errors" },
366
367         { "dma_writeq_full" },
368         { "dma_write_prioq_full" },
369         { "rxbds_empty" },
370         { "rx_discards" },
371         { "rx_errors" },
372         { "rx_threshold_hit" },
373
374         { "dma_readq_full" },
375         { "dma_read_prioq_full" },
376         { "tx_comp_queue_full" },
377
378         { "ring_set_send_prod_index" },
379         { "ring_status_update" },
380         { "nic_irqs" },
381         { "nic_avoided_irqs" },
382         { "nic_tx_threshold_hit" },
383
384         { "mbuf_lwm_thresh_hit" },
385 };
386
387 #define TG3_NUM_STATS   ARRAY_SIZE(ethtool_stats_keys)
388
389
390 static const struct {
391         const char string[ETH_GSTRING_LEN];
392 } ethtool_test_keys[] = {
393         { "nvram test     (online) " },
394         { "link test      (online) " },
395         { "register test  (offline)" },
396         { "memory test    (offline)" },
397         { "loopback test  (offline)" },
398         { "interrupt test (offline)" },
399 };
400
401 #define TG3_NUM_TEST    ARRAY_SIZE(ethtool_test_keys)
402
403
404 static void tg3_write32(struct tg3 *tp, u32 off, u32 val)
405 {
406         writel(val, tp->regs + off);
407 }
408
409 static u32 tg3_read32(struct tg3 *tp, u32 off)
410 {
411         return readl(tp->regs + off);
412 }
413
414 static void tg3_ape_write32(struct tg3 *tp, u32 off, u32 val)
415 {
416         writel(val, tp->aperegs + off);
417 }
418
419 static u32 tg3_ape_read32(struct tg3 *tp, u32 off)
420 {
421         return readl(tp->aperegs + off);
422 }
423
424 static void tg3_write_indirect_reg32(struct tg3 *tp, u32 off, u32 val)
425 {
426         unsigned long flags;
427
428         spin_lock_irqsave(&tp->indirect_lock, flags);
429         pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off);
430         pci_write_config_dword(tp->pdev, TG3PCI_REG_DATA, val);
431         spin_unlock_irqrestore(&tp->indirect_lock, flags);
432 }
433
434 static void tg3_write_flush_reg32(struct tg3 *tp, u32 off, u32 val)
435 {
436         writel(val, tp->regs + off);
437         readl(tp->regs + off);
438 }
439
440 static u32 tg3_read_indirect_reg32(struct tg3 *tp, u32 off)
441 {
442         unsigned long flags;
443         u32 val;
444
445         spin_lock_irqsave(&tp->indirect_lock, flags);
446         pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off);
447         pci_read_config_dword(tp->pdev, TG3PCI_REG_DATA, &val);
448         spin_unlock_irqrestore(&tp->indirect_lock, flags);
449         return val;
450 }
451
452 static void tg3_write_indirect_mbox(struct tg3 *tp, u32 off, u32 val)
453 {
454         unsigned long flags;
455
456         if (off == (MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW)) {
457                 pci_write_config_dword(tp->pdev, TG3PCI_RCV_RET_RING_CON_IDX +
458                                        TG3_64BIT_REG_LOW, val);
459                 return;
460         }
461         if (off == TG3_RX_STD_PROD_IDX_REG) {
462                 pci_write_config_dword(tp->pdev, TG3PCI_STD_RING_PROD_IDX +
463                                        TG3_64BIT_REG_LOW, val);
464                 return;
465         }
466
467         spin_lock_irqsave(&tp->indirect_lock, flags);
468         pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off + 0x5600);
469         pci_write_config_dword(tp->pdev, TG3PCI_REG_DATA, val);
470         spin_unlock_irqrestore(&tp->indirect_lock, flags);
471
472         /* In indirect mode when disabling interrupts, we also need
473          * to clear the interrupt bit in the GRC local ctrl register.
474          */
475         if ((off == (MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW)) &&
476             (val == 0x1)) {
477                 pci_write_config_dword(tp->pdev, TG3PCI_MISC_LOCAL_CTRL,
478                                        tp->grc_local_ctrl|GRC_LCLCTRL_CLEARINT);
479         }
480 }
481
482 static u32 tg3_read_indirect_mbox(struct tg3 *tp, u32 off)
483 {
484         unsigned long flags;
485         u32 val;
486
487         spin_lock_irqsave(&tp->indirect_lock, flags);
488         pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off + 0x5600);
489         pci_read_config_dword(tp->pdev, TG3PCI_REG_DATA, &val);
490         spin_unlock_irqrestore(&tp->indirect_lock, flags);
491         return val;
492 }
493
494 /* usec_wait specifies the wait time in usec when writing to certain registers
495  * where it is unsafe to read back the register without some delay.
496  * GRC_LOCAL_CTRL is one example if the GPIOs are toggled to switch power.
497  * TG3PCI_CLOCK_CTRL is another example if the clock frequencies are changed.
498  */
499 static void _tw32_flush(struct tg3 *tp, u32 off, u32 val, u32 usec_wait)
500 {
501         if (tg3_flag(tp, PCIX_TARGET_HWBUG) || tg3_flag(tp, ICH_WORKAROUND))
502                 /* Non-posted methods */
503                 tp->write32(tp, off, val);
504         else {
505                 /* Posted method */
506                 tg3_write32(tp, off, val);
507                 if (usec_wait)
508                         udelay(usec_wait);
509                 tp->read32(tp, off);
510         }
511         /* Wait again after the read for the posted method to guarantee that
512          * the wait time is met.
513          */
514         if (usec_wait)
515                 udelay(usec_wait);
516 }
517
518 static inline void tw32_mailbox_flush(struct tg3 *tp, u32 off, u32 val)
519 {
520         tp->write32_mbox(tp, off, val);
521         if (!tg3_flag(tp, MBOX_WRITE_REORDER) && !tg3_flag(tp, ICH_WORKAROUND))
522                 tp->read32_mbox(tp, off);
523 }
524
525 static void tg3_write32_tx_mbox(struct tg3 *tp, u32 off, u32 val)
526 {
527         void __iomem *mbox = tp->regs + off;
528         writel(val, mbox);
529         if (tg3_flag(tp, TXD_MBOX_HWBUG))
530                 writel(val, mbox);
531         if (tg3_flag(tp, MBOX_WRITE_REORDER))
532                 readl(mbox);
533 }
534
535 static u32 tg3_read32_mbox_5906(struct tg3 *tp, u32 off)
536 {
537         return readl(tp->regs + off + GRCMBOX_BASE);
538 }
539
540 static void tg3_write32_mbox_5906(struct tg3 *tp, u32 off, u32 val)
541 {
542         writel(val, tp->regs + off + GRCMBOX_BASE);
543 }
544
545 #define tw32_mailbox(reg, val)          tp->write32_mbox(tp, reg, val)
546 #define tw32_mailbox_f(reg, val)        tw32_mailbox_flush(tp, (reg), (val))
547 #define tw32_rx_mbox(reg, val)          tp->write32_rx_mbox(tp, reg, val)
548 #define tw32_tx_mbox(reg, val)          tp->write32_tx_mbox(tp, reg, val)
549 #define tr32_mailbox(reg)               tp->read32_mbox(tp, reg)
550
551 #define tw32(reg, val)                  tp->write32(tp, reg, val)
552 #define tw32_f(reg, val)                _tw32_flush(tp, (reg), (val), 0)
553 #define tw32_wait_f(reg, val, us)       _tw32_flush(tp, (reg), (val), (us))
554 #define tr32(reg)                       tp->read32(tp, reg)
555
556 static void tg3_write_mem(struct tg3 *tp, u32 off, u32 val)
557 {
558         unsigned long flags;
559
560         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906 &&
561             (off >= NIC_SRAM_STATS_BLK) && (off < NIC_SRAM_TX_BUFFER_DESC))
562                 return;
563
564         spin_lock_irqsave(&tp->indirect_lock, flags);
565         if (tg3_flag(tp, SRAM_USE_CONFIG)) {
566                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off);
567                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
568
569                 /* Always leave this as zero. */
570                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
571         } else {
572                 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, off);
573                 tw32_f(TG3PCI_MEM_WIN_DATA, val);
574
575                 /* Always leave this as zero. */
576                 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, 0);
577         }
578         spin_unlock_irqrestore(&tp->indirect_lock, flags);
579 }
580
581 static void tg3_read_mem(struct tg3 *tp, u32 off, u32 *val)
582 {
583         unsigned long flags;
584
585         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906 &&
586             (off >= NIC_SRAM_STATS_BLK) && (off < NIC_SRAM_TX_BUFFER_DESC)) {
587                 *val = 0;
588                 return;
589         }
590
591         spin_lock_irqsave(&tp->indirect_lock, flags);
592         if (tg3_flag(tp, SRAM_USE_CONFIG)) {
593                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off);
594                 pci_read_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
595
596                 /* Always leave this as zero. */
597                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
598         } else {
599                 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, off);
600                 *val = tr32(TG3PCI_MEM_WIN_DATA);
601
602                 /* Always leave this as zero. */
603                 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, 0);
604         }
605         spin_unlock_irqrestore(&tp->indirect_lock, flags);
606 }
607
608 static void tg3_ape_lock_init(struct tg3 *tp)
609 {
610         int i;
611         u32 regbase;
612
613         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
614                 regbase = TG3_APE_LOCK_GRANT;
615         else
616                 regbase = TG3_APE_PER_LOCK_GRANT;
617
618         /* Make sure the driver hasn't any stale locks. */
619         for (i = 0; i < 8; i++)
620                 tg3_ape_write32(tp, regbase + 4 * i, APE_LOCK_GRANT_DRIVER);
621 }
622
623 static int tg3_ape_lock(struct tg3 *tp, int locknum)
624 {
625         int i, off;
626         int ret = 0;
627         u32 status, req, gnt;
628
629         if (!tg3_flag(tp, ENABLE_APE))
630                 return 0;
631
632         switch (locknum) {
633         case TG3_APE_LOCK_GRC:
634         case TG3_APE_LOCK_MEM:
635                 break;
636         default:
637                 return -EINVAL;
638         }
639
640         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761) {
641                 req = TG3_APE_LOCK_REQ;
642                 gnt = TG3_APE_LOCK_GRANT;
643         } else {
644                 req = TG3_APE_PER_LOCK_REQ;
645                 gnt = TG3_APE_PER_LOCK_GRANT;
646         }
647
648         off = 4 * locknum;
649
650         tg3_ape_write32(tp, req + off, APE_LOCK_REQ_DRIVER);
651
652         /* Wait for up to 1 millisecond to acquire lock. */
653         for (i = 0; i < 100; i++) {
654                 status = tg3_ape_read32(tp, gnt + off);
655                 if (status == APE_LOCK_GRANT_DRIVER)
656                         break;
657                 udelay(10);
658         }
659
660         if (status != APE_LOCK_GRANT_DRIVER) {
661                 /* Revoke the lock request. */
662                 tg3_ape_write32(tp, gnt + off,
663                                 APE_LOCK_GRANT_DRIVER);
664
665                 ret = -EBUSY;
666         }
667
668         return ret;
669 }
670
671 static void tg3_ape_unlock(struct tg3 *tp, int locknum)
672 {
673         u32 gnt;
674
675         if (!tg3_flag(tp, ENABLE_APE))
676                 return;
677
678         switch (locknum) {
679         case TG3_APE_LOCK_GRC:
680         case TG3_APE_LOCK_MEM:
681                 break;
682         default:
683                 return;
684         }
685
686         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
687                 gnt = TG3_APE_LOCK_GRANT;
688         else
689                 gnt = TG3_APE_PER_LOCK_GRANT;
690
691         tg3_ape_write32(tp, gnt + 4 * locknum, APE_LOCK_GRANT_DRIVER);
692 }
693
694 static void tg3_disable_ints(struct tg3 *tp)
695 {
696         int i;
697
698         tw32(TG3PCI_MISC_HOST_CTRL,
699              (tp->misc_host_ctrl | MISC_HOST_CTRL_MASK_PCI_INT));
700         for (i = 0; i < tp->irq_max; i++)
701                 tw32_mailbox_f(tp->napi[i].int_mbox, 0x00000001);
702 }
703
704 static void tg3_enable_ints(struct tg3 *tp)
705 {
706         int i;
707
708         tp->irq_sync = 0;
709         wmb();
710
711         tw32(TG3PCI_MISC_HOST_CTRL,
712              (tp->misc_host_ctrl & ~MISC_HOST_CTRL_MASK_PCI_INT));
713
714         tp->coal_now = tp->coalesce_mode | HOSTCC_MODE_ENABLE;
715         for (i = 0; i < tp->irq_cnt; i++) {
716                 struct tg3_napi *tnapi = &tp->napi[i];
717
718                 tw32_mailbox_f(tnapi->int_mbox, tnapi->last_tag << 24);
719                 if (tg3_flag(tp, 1SHOT_MSI))
720                         tw32_mailbox_f(tnapi->int_mbox, tnapi->last_tag << 24);
721
722                 tp->coal_now |= tnapi->coal_now;
723         }
724
725         /* Force an initial interrupt */
726         if (!tg3_flag(tp, TAGGED_STATUS) &&
727             (tp->napi[0].hw_status->status & SD_STATUS_UPDATED))
728                 tw32(GRC_LOCAL_CTRL, tp->grc_local_ctrl | GRC_LCLCTRL_SETINT);
729         else
730                 tw32(HOSTCC_MODE, tp->coal_now);
731
732         tp->coal_now &= ~(tp->napi[0].coal_now | tp->napi[1].coal_now);
733 }
734
735 static inline unsigned int tg3_has_work(struct tg3_napi *tnapi)
736 {
737         struct tg3 *tp = tnapi->tp;
738         struct tg3_hw_status *sblk = tnapi->hw_status;
739         unsigned int work_exists = 0;
740
741         /* check for phy events */
742         if (!(tg3_flag(tp, USE_LINKCHG_REG) || tg3_flag(tp, POLL_SERDES))) {
743                 if (sblk->status & SD_STATUS_LINK_CHG)
744                         work_exists = 1;
745         }
746         /* check for RX/TX work to do */
747         if (sblk->idx[0].tx_consumer != tnapi->tx_cons ||
748             *(tnapi->rx_rcb_prod_idx) != tnapi->rx_rcb_ptr)
749                 work_exists = 1;
750
751         return work_exists;
752 }
753
754 /* tg3_int_reenable
755  *  similar to tg3_enable_ints, but it accurately determines whether there
756  *  is new work pending and can return without flushing the PIO write
757  *  which reenables interrupts
758  */
759 static void tg3_int_reenable(struct tg3_napi *tnapi)
760 {
761         struct tg3 *tp = tnapi->tp;
762
763         tw32_mailbox(tnapi->int_mbox, tnapi->last_tag << 24);
764         mmiowb();
765
766         /* When doing tagged status, this work check is unnecessary.
767          * The last_tag we write above tells the chip which piece of
768          * work we've completed.
769          */
770         if (!tg3_flag(tp, TAGGED_STATUS) && tg3_has_work(tnapi))
771                 tw32(HOSTCC_MODE, tp->coalesce_mode |
772                      HOSTCC_MODE_ENABLE | tnapi->coal_now);
773 }
774
775 static void tg3_switch_clocks(struct tg3 *tp)
776 {
777         u32 clock_ctrl;
778         u32 orig_clock_ctrl;
779
780         if (tg3_flag(tp, CPMU_PRESENT) || tg3_flag(tp, 5780_CLASS))
781                 return;
782
783         clock_ctrl = tr32(TG3PCI_CLOCK_CTRL);
784
785         orig_clock_ctrl = clock_ctrl;
786         clock_ctrl &= (CLOCK_CTRL_FORCE_CLKRUN |
787                        CLOCK_CTRL_CLKRUN_OENABLE |
788                        0x1f);
789         tp->pci_clock_ctrl = clock_ctrl;
790
791         if (tg3_flag(tp, 5705_PLUS)) {
792                 if (orig_clock_ctrl & CLOCK_CTRL_625_CORE) {
793                         tw32_wait_f(TG3PCI_CLOCK_CTRL,
794                                     clock_ctrl | CLOCK_CTRL_625_CORE, 40);
795                 }
796         } else if ((orig_clock_ctrl & CLOCK_CTRL_44MHZ_CORE) != 0) {
797                 tw32_wait_f(TG3PCI_CLOCK_CTRL,
798                             clock_ctrl |
799                             (CLOCK_CTRL_44MHZ_CORE | CLOCK_CTRL_ALTCLK),
800                             40);
801                 tw32_wait_f(TG3PCI_CLOCK_CTRL,
802                             clock_ctrl | (CLOCK_CTRL_ALTCLK),
803                             40);
804         }
805         tw32_wait_f(TG3PCI_CLOCK_CTRL, clock_ctrl, 40);
806 }
807
808 #define PHY_BUSY_LOOPS  5000
809
810 static int tg3_readphy(struct tg3 *tp, int reg, u32 *val)
811 {
812         u32 frame_val;
813         unsigned int loops;
814         int ret;
815
816         if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
817                 tw32_f(MAC_MI_MODE,
818                      (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
819                 udelay(80);
820         }
821
822         *val = 0x0;
823
824         frame_val  = ((tp->phy_addr << MI_COM_PHY_ADDR_SHIFT) &
825                       MI_COM_PHY_ADDR_MASK);
826         frame_val |= ((reg << MI_COM_REG_ADDR_SHIFT) &
827                       MI_COM_REG_ADDR_MASK);
828         frame_val |= (MI_COM_CMD_READ | MI_COM_START);
829
830         tw32_f(MAC_MI_COM, frame_val);
831
832         loops = PHY_BUSY_LOOPS;
833         while (loops != 0) {
834                 udelay(10);
835                 frame_val = tr32(MAC_MI_COM);
836
837                 if ((frame_val & MI_COM_BUSY) == 0) {
838                         udelay(5);
839                         frame_val = tr32(MAC_MI_COM);
840                         break;
841                 }
842                 loops -= 1;
843         }
844
845         ret = -EBUSY;
846         if (loops != 0) {
847                 *val = frame_val & MI_COM_DATA_MASK;
848                 ret = 0;
849         }
850
851         if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
852                 tw32_f(MAC_MI_MODE, tp->mi_mode);
853                 udelay(80);
854         }
855
856         return ret;
857 }
858
859 static int tg3_writephy(struct tg3 *tp, int reg, u32 val)
860 {
861         u32 frame_val;
862         unsigned int loops;
863         int ret;
864
865         if ((tp->phy_flags & TG3_PHYFLG_IS_FET) &&
866             (reg == MII_CTRL1000 || reg == MII_TG3_AUX_CTRL))
867                 return 0;
868
869         if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
870                 tw32_f(MAC_MI_MODE,
871                      (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
872                 udelay(80);
873         }
874
875         frame_val  = ((tp->phy_addr << MI_COM_PHY_ADDR_SHIFT) &
876                       MI_COM_PHY_ADDR_MASK);
877         frame_val |= ((reg << MI_COM_REG_ADDR_SHIFT) &
878                       MI_COM_REG_ADDR_MASK);
879         frame_val |= (val & MI_COM_DATA_MASK);
880         frame_val |= (MI_COM_CMD_WRITE | MI_COM_START);
881
882         tw32_f(MAC_MI_COM, frame_val);
883
884         loops = PHY_BUSY_LOOPS;
885         while (loops != 0) {
886                 udelay(10);
887                 frame_val = tr32(MAC_MI_COM);
888                 if ((frame_val & MI_COM_BUSY) == 0) {
889                         udelay(5);
890                         frame_val = tr32(MAC_MI_COM);
891                         break;
892                 }
893                 loops -= 1;
894         }
895
896         ret = -EBUSY;
897         if (loops != 0)
898                 ret = 0;
899
900         if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
901                 tw32_f(MAC_MI_MODE, tp->mi_mode);
902                 udelay(80);
903         }
904
905         return ret;
906 }
907
908 static int tg3_phy_cl45_write(struct tg3 *tp, u32 devad, u32 addr, u32 val)
909 {
910         int err;
911
912         err = tg3_writephy(tp, MII_TG3_MMD_CTRL, devad);
913         if (err)
914                 goto done;
915
916         err = tg3_writephy(tp, MII_TG3_MMD_ADDRESS, addr);
917         if (err)
918                 goto done;
919
920         err = tg3_writephy(tp, MII_TG3_MMD_CTRL,
921                            MII_TG3_MMD_CTRL_DATA_NOINC | devad);
922         if (err)
923                 goto done;
924
925         err = tg3_writephy(tp, MII_TG3_MMD_ADDRESS, val);
926
927 done:
928         return err;
929 }
930
931 static int tg3_phy_cl45_read(struct tg3 *tp, u32 devad, u32 addr, u32 *val)
932 {
933         int err;
934
935         err = tg3_writephy(tp, MII_TG3_MMD_CTRL, devad);
936         if (err)
937                 goto done;
938
939         err = tg3_writephy(tp, MII_TG3_MMD_ADDRESS, addr);
940         if (err)
941                 goto done;
942
943         err = tg3_writephy(tp, MII_TG3_MMD_CTRL,
944                            MII_TG3_MMD_CTRL_DATA_NOINC | devad);
945         if (err)
946                 goto done;
947
948         err = tg3_readphy(tp, MII_TG3_MMD_ADDRESS, val);
949
950 done:
951         return err;
952 }
953
954 static int tg3_phydsp_read(struct tg3 *tp, u32 reg, u32 *val)
955 {
956         int err;
957
958         err = tg3_writephy(tp, MII_TG3_DSP_ADDRESS, reg);
959         if (!err)
960                 err = tg3_readphy(tp, MII_TG3_DSP_RW_PORT, val);
961
962         return err;
963 }
964
965 static int tg3_phydsp_write(struct tg3 *tp, u32 reg, u32 val)
966 {
967         int err;
968
969         err = tg3_writephy(tp, MII_TG3_DSP_ADDRESS, reg);
970         if (!err)
971                 err = tg3_writephy(tp, MII_TG3_DSP_RW_PORT, val);
972
973         return err;
974 }
975
976 static int tg3_phy_auxctl_read(struct tg3 *tp, int reg, u32 *val)
977 {
978         int err;
979
980         err = tg3_writephy(tp, MII_TG3_AUX_CTRL,
981                            (reg << MII_TG3_AUXCTL_MISC_RDSEL_SHIFT) |
982                            MII_TG3_AUXCTL_SHDWSEL_MISC);
983         if (!err)
984                 err = tg3_readphy(tp, MII_TG3_AUX_CTRL, val);
985
986         return err;
987 }
988
989 static int tg3_phy_auxctl_write(struct tg3 *tp, int reg, u32 set)
990 {
991         if (reg == MII_TG3_AUXCTL_SHDWSEL_MISC)
992                 set |= MII_TG3_AUXCTL_MISC_WREN;
993
994         return tg3_writephy(tp, MII_TG3_AUX_CTRL, set | reg);
995 }
996
997 #define TG3_PHY_AUXCTL_SMDSP_ENABLE(tp) \
998         tg3_phy_auxctl_write((tp), MII_TG3_AUXCTL_SHDWSEL_AUXCTL, \
999                              MII_TG3_AUXCTL_ACTL_SMDSP_ENA | \
1000                              MII_TG3_AUXCTL_ACTL_TX_6DB)
1001
1002 #define TG3_PHY_AUXCTL_SMDSP_DISABLE(tp) \
1003         tg3_phy_auxctl_write((tp), MII_TG3_AUXCTL_SHDWSEL_AUXCTL, \
1004                              MII_TG3_AUXCTL_ACTL_TX_6DB);
1005
1006 static int tg3_bmcr_reset(struct tg3 *tp)
1007 {
1008         u32 phy_control;
1009         int limit, err;
1010
1011         /* OK, reset it, and poll the BMCR_RESET bit until it
1012          * clears or we time out.
1013          */
1014         phy_control = BMCR_RESET;
1015         err = tg3_writephy(tp, MII_BMCR, phy_control);
1016         if (err != 0)
1017                 return -EBUSY;
1018
1019         limit = 5000;
1020         while (limit--) {
1021                 err = tg3_readphy(tp, MII_BMCR, &phy_control);
1022                 if (err != 0)
1023                         return -EBUSY;
1024
1025                 if ((phy_control & BMCR_RESET) == 0) {
1026                         udelay(40);
1027                         break;
1028                 }
1029                 udelay(10);
1030         }
1031         if (limit < 0)
1032                 return -EBUSY;
1033
1034         return 0;
1035 }
1036
1037 static int tg3_mdio_read(struct mii_bus *bp, int mii_id, int reg)
1038 {
1039         struct tg3 *tp = bp->priv;
1040         u32 val;
1041
1042         spin_lock_bh(&tp->lock);
1043
1044         if (tg3_readphy(tp, reg, &val))
1045                 val = -EIO;
1046
1047         spin_unlock_bh(&tp->lock);
1048
1049         return val;
1050 }
1051
1052 static int tg3_mdio_write(struct mii_bus *bp, int mii_id, int reg, u16 val)
1053 {
1054         struct tg3 *tp = bp->priv;
1055         u32 ret = 0;
1056
1057         spin_lock_bh(&tp->lock);
1058
1059         if (tg3_writephy(tp, reg, val))
1060                 ret = -EIO;
1061
1062         spin_unlock_bh(&tp->lock);
1063
1064         return ret;
1065 }
1066
1067 static int tg3_mdio_reset(struct mii_bus *bp)
1068 {
1069         return 0;
1070 }
1071
1072 static void tg3_mdio_config_5785(struct tg3 *tp)
1073 {
1074         u32 val;
1075         struct phy_device *phydev;
1076
1077         phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
1078         switch (phydev->drv->phy_id & phydev->drv->phy_id_mask) {
1079         case PHY_ID_BCM50610:
1080         case PHY_ID_BCM50610M:
1081                 val = MAC_PHYCFG2_50610_LED_MODES;
1082                 break;
1083         case PHY_ID_BCMAC131:
1084                 val = MAC_PHYCFG2_AC131_LED_MODES;
1085                 break;
1086         case PHY_ID_RTL8211C:
1087                 val = MAC_PHYCFG2_RTL8211C_LED_MODES;
1088                 break;
1089         case PHY_ID_RTL8201E:
1090                 val = MAC_PHYCFG2_RTL8201E_LED_MODES;
1091                 break;
1092         default:
1093                 return;
1094         }
1095
1096         if (phydev->interface != PHY_INTERFACE_MODE_RGMII) {
1097                 tw32(MAC_PHYCFG2, val);
1098
1099                 val = tr32(MAC_PHYCFG1);
1100                 val &= ~(MAC_PHYCFG1_RGMII_INT |
1101                          MAC_PHYCFG1_RXCLK_TO_MASK | MAC_PHYCFG1_TXCLK_TO_MASK);
1102                 val |= MAC_PHYCFG1_RXCLK_TIMEOUT | MAC_PHYCFG1_TXCLK_TIMEOUT;
1103                 tw32(MAC_PHYCFG1, val);
1104
1105                 return;
1106         }
1107
1108         if (!tg3_flag(tp, RGMII_INBAND_DISABLE))
1109                 val |= MAC_PHYCFG2_EMODE_MASK_MASK |
1110                        MAC_PHYCFG2_FMODE_MASK_MASK |
1111                        MAC_PHYCFG2_GMODE_MASK_MASK |
1112                        MAC_PHYCFG2_ACT_MASK_MASK   |
1113                        MAC_PHYCFG2_QUAL_MASK_MASK |
1114                        MAC_PHYCFG2_INBAND_ENABLE;
1115
1116         tw32(MAC_PHYCFG2, val);
1117
1118         val = tr32(MAC_PHYCFG1);
1119         val &= ~(MAC_PHYCFG1_RXCLK_TO_MASK | MAC_PHYCFG1_TXCLK_TO_MASK |
1120                  MAC_PHYCFG1_RGMII_EXT_RX_DEC | MAC_PHYCFG1_RGMII_SND_STAT_EN);
1121         if (!tg3_flag(tp, RGMII_INBAND_DISABLE)) {
1122                 if (tg3_flag(tp, RGMII_EXT_IBND_RX_EN))
1123                         val |= MAC_PHYCFG1_RGMII_EXT_RX_DEC;
1124                 if (tg3_flag(tp, RGMII_EXT_IBND_TX_EN))
1125                         val |= MAC_PHYCFG1_RGMII_SND_STAT_EN;
1126         }
1127         val |= MAC_PHYCFG1_RXCLK_TIMEOUT | MAC_PHYCFG1_TXCLK_TIMEOUT |
1128                MAC_PHYCFG1_RGMII_INT | MAC_PHYCFG1_TXC_DRV;
1129         tw32(MAC_PHYCFG1, val);
1130
1131         val = tr32(MAC_EXT_RGMII_MODE);
1132         val &= ~(MAC_RGMII_MODE_RX_INT_B |
1133                  MAC_RGMII_MODE_RX_QUALITY |
1134                  MAC_RGMII_MODE_RX_ACTIVITY |
1135                  MAC_RGMII_MODE_RX_ENG_DET |
1136                  MAC_RGMII_MODE_TX_ENABLE |
1137                  MAC_RGMII_MODE_TX_LOWPWR |
1138                  MAC_RGMII_MODE_TX_RESET);
1139         if (!tg3_flag(tp, RGMII_INBAND_DISABLE)) {
1140                 if (tg3_flag(tp, RGMII_EXT_IBND_RX_EN))
1141                         val |= MAC_RGMII_MODE_RX_INT_B |
1142                                MAC_RGMII_MODE_RX_QUALITY |
1143                                MAC_RGMII_MODE_RX_ACTIVITY |
1144                                MAC_RGMII_MODE_RX_ENG_DET;
1145                 if (tg3_flag(tp, RGMII_EXT_IBND_TX_EN))
1146                         val |= MAC_RGMII_MODE_TX_ENABLE |
1147                                MAC_RGMII_MODE_TX_LOWPWR |
1148                                MAC_RGMII_MODE_TX_RESET;
1149         }
1150         tw32(MAC_EXT_RGMII_MODE, val);
1151 }
1152
1153 static void tg3_mdio_start(struct tg3 *tp)
1154 {
1155         tp->mi_mode &= ~MAC_MI_MODE_AUTO_POLL;
1156         tw32_f(MAC_MI_MODE, tp->mi_mode);
1157         udelay(80);
1158
1159         if (tg3_flag(tp, MDIOBUS_INITED) &&
1160             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
1161                 tg3_mdio_config_5785(tp);
1162 }
1163
1164 static int tg3_mdio_init(struct tg3 *tp)
1165 {
1166         int i;
1167         u32 reg;
1168         struct phy_device *phydev;
1169
1170         if (tg3_flag(tp, 5717_PLUS)) {
1171                 u32 is_serdes;
1172
1173                 tp->phy_addr = tp->pci_fn + 1;
1174
1175                 if (tp->pci_chip_rev_id != CHIPREV_ID_5717_A0)
1176                         is_serdes = tr32(SG_DIG_STATUS) & SG_DIG_IS_SERDES;
1177                 else
1178                         is_serdes = tr32(TG3_CPMU_PHY_STRAP) &
1179                                     TG3_CPMU_PHY_STRAP_IS_SERDES;
1180                 if (is_serdes)
1181                         tp->phy_addr += 7;
1182         } else
1183                 tp->phy_addr = TG3_PHY_MII_ADDR;
1184
1185         tg3_mdio_start(tp);
1186
1187         if (!tg3_flag(tp, USE_PHYLIB) || tg3_flag(tp, MDIOBUS_INITED))
1188                 return 0;
1189
1190         tp->mdio_bus = mdiobus_alloc();
1191         if (tp->mdio_bus == NULL)
1192                 return -ENOMEM;
1193
1194         tp->mdio_bus->name     = "tg3 mdio bus";
1195         snprintf(tp->mdio_bus->id, MII_BUS_ID_SIZE, "%x",
1196                  (tp->pdev->bus->number << 8) | tp->pdev->devfn);
1197         tp->mdio_bus->priv     = tp;
1198         tp->mdio_bus->parent   = &tp->pdev->dev;
1199         tp->mdio_bus->read     = &tg3_mdio_read;
1200         tp->mdio_bus->write    = &tg3_mdio_write;
1201         tp->mdio_bus->reset    = &tg3_mdio_reset;
1202         tp->mdio_bus->phy_mask = ~(1 << TG3_PHY_MII_ADDR);
1203         tp->mdio_bus->irq      = &tp->mdio_irq[0];
1204
1205         for (i = 0; i < PHY_MAX_ADDR; i++)
1206                 tp->mdio_bus->irq[i] = PHY_POLL;
1207
1208         /* The bus registration will look for all the PHYs on the mdio bus.
1209          * Unfortunately, it does not ensure the PHY is powered up before
1210          * accessing the PHY ID registers.  A chip reset is the
1211          * quickest way to bring the device back to an operational state..
1212          */
1213         if (tg3_readphy(tp, MII_BMCR, &reg) || (reg & BMCR_PDOWN))
1214                 tg3_bmcr_reset(tp);
1215
1216         i = mdiobus_register(tp->mdio_bus);
1217         if (i) {
1218                 dev_warn(&tp->pdev->dev, "mdiobus_reg failed (0x%x)\n", i);
1219                 mdiobus_free(tp->mdio_bus);
1220                 return i;
1221         }
1222
1223         phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
1224
1225         if (!phydev || !phydev->drv) {
1226                 dev_warn(&tp->pdev->dev, "No PHY devices\n");
1227                 mdiobus_unregister(tp->mdio_bus);
1228                 mdiobus_free(tp->mdio_bus);
1229                 return -ENODEV;
1230         }
1231
1232         switch (phydev->drv->phy_id & phydev->drv->phy_id_mask) {
1233         case PHY_ID_BCM57780:
1234                 phydev->interface = PHY_INTERFACE_MODE_GMII;
1235                 phydev->dev_flags |= PHY_BRCM_AUTO_PWRDWN_ENABLE;
1236                 break;
1237         case PHY_ID_BCM50610:
1238         case PHY_ID_BCM50610M:
1239                 phydev->dev_flags |= PHY_BRCM_CLEAR_RGMII_MODE |
1240                                      PHY_BRCM_RX_REFCLK_UNUSED |
1241                                      PHY_BRCM_DIS_TXCRXC_NOENRGY |
1242                                      PHY_BRCM_AUTO_PWRDWN_ENABLE;
1243                 if (tg3_flag(tp, RGMII_INBAND_DISABLE))
1244                         phydev->dev_flags |= PHY_BRCM_STD_IBND_DISABLE;
1245                 if (tg3_flag(tp, RGMII_EXT_IBND_RX_EN))
1246                         phydev->dev_flags |= PHY_BRCM_EXT_IBND_RX_ENABLE;
1247                 if (tg3_flag(tp, RGMII_EXT_IBND_TX_EN))
1248                         phydev->dev_flags |= PHY_BRCM_EXT_IBND_TX_ENABLE;
1249                 /* fallthru */
1250         case PHY_ID_RTL8211C:
1251                 phydev->interface = PHY_INTERFACE_MODE_RGMII;
1252                 break;
1253         case PHY_ID_RTL8201E:
1254         case PHY_ID_BCMAC131:
1255                 phydev->interface = PHY_INTERFACE_MODE_MII;
1256                 phydev->dev_flags |= PHY_BRCM_AUTO_PWRDWN_ENABLE;
1257                 tp->phy_flags |= TG3_PHYFLG_IS_FET;
1258                 break;
1259         }
1260
1261         tg3_flag_set(tp, MDIOBUS_INITED);
1262
1263         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
1264                 tg3_mdio_config_5785(tp);
1265
1266         return 0;
1267 }
1268
1269 static void tg3_mdio_fini(struct tg3 *tp)
1270 {
1271         if (tg3_flag(tp, MDIOBUS_INITED)) {
1272                 tg3_flag_clear(tp, MDIOBUS_INITED);
1273                 mdiobus_unregister(tp->mdio_bus);
1274                 mdiobus_free(tp->mdio_bus);
1275         }
1276 }
1277
1278 /* tp->lock is held. */
1279 static inline void tg3_generate_fw_event(struct tg3 *tp)
1280 {
1281         u32 val;
1282
1283         val = tr32(GRC_RX_CPU_EVENT);
1284         val |= GRC_RX_CPU_DRIVER_EVENT;
1285         tw32_f(GRC_RX_CPU_EVENT, val);
1286
1287         tp->last_event_jiffies = jiffies;
1288 }
1289
1290 #define TG3_FW_EVENT_TIMEOUT_USEC 2500
1291
1292 /* tp->lock is held. */
1293 static void tg3_wait_for_event_ack(struct tg3 *tp)
1294 {
1295         int i;
1296         unsigned int delay_cnt;
1297         long time_remain;
1298
1299         /* If enough time has passed, no wait is necessary. */
1300         time_remain = (long)(tp->last_event_jiffies + 1 +
1301                       usecs_to_jiffies(TG3_FW_EVENT_TIMEOUT_USEC)) -
1302                       (long)jiffies;
1303         if (time_remain < 0)
1304                 return;
1305
1306         /* Check if we can shorten the wait time. */
1307         delay_cnt = jiffies_to_usecs(time_remain);
1308         if (delay_cnt > TG3_FW_EVENT_TIMEOUT_USEC)
1309                 delay_cnt = TG3_FW_EVENT_TIMEOUT_USEC;
1310         delay_cnt = (delay_cnt >> 3) + 1;
1311
1312         for (i = 0; i < delay_cnt; i++) {
1313                 if (!(tr32(GRC_RX_CPU_EVENT) & GRC_RX_CPU_DRIVER_EVENT))
1314                         break;
1315                 udelay(8);
1316         }
1317 }
1318
1319 /* tp->lock is held. */
1320 static void tg3_ump_link_report(struct tg3 *tp)
1321 {
1322         u32 reg;
1323         u32 val;
1324
1325         if (!tg3_flag(tp, 5780_CLASS) || !tg3_flag(tp, ENABLE_ASF))
1326                 return;
1327
1328         tg3_wait_for_event_ack(tp);
1329
1330         tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX, FWCMD_NICDRV_LINK_UPDATE);
1331
1332         tg3_write_mem(tp, NIC_SRAM_FW_CMD_LEN_MBOX, 14);
1333
1334         val = 0;
1335         if (!tg3_readphy(tp, MII_BMCR, &reg))
1336                 val = reg << 16;
1337         if (!tg3_readphy(tp, MII_BMSR, &reg))
1338                 val |= (reg & 0xffff);
1339         tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX, val);
1340
1341         val = 0;
1342         if (!tg3_readphy(tp, MII_ADVERTISE, &reg))
1343                 val = reg << 16;
1344         if (!tg3_readphy(tp, MII_LPA, &reg))
1345                 val |= (reg & 0xffff);
1346         tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 4, val);
1347
1348         val = 0;
1349         if (!(tp->phy_flags & TG3_PHYFLG_MII_SERDES)) {
1350                 if (!tg3_readphy(tp, MII_CTRL1000, &reg))
1351                         val = reg << 16;
1352                 if (!tg3_readphy(tp, MII_STAT1000, &reg))
1353                         val |= (reg & 0xffff);
1354         }
1355         tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 8, val);
1356
1357         if (!tg3_readphy(tp, MII_PHYADDR, &reg))
1358                 val = reg << 16;
1359         else
1360                 val = 0;
1361         tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 12, val);
1362
1363         tg3_generate_fw_event(tp);
1364 }
1365
1366 static void tg3_link_report(struct tg3 *tp)
1367 {
1368         if (!netif_carrier_ok(tp->dev)) {
1369                 netif_info(tp, link, tp->dev, "Link is down\n");
1370                 tg3_ump_link_report(tp);
1371         } else if (netif_msg_link(tp)) {
1372                 netdev_info(tp->dev, "Link is up at %d Mbps, %s duplex\n",
1373                             (tp->link_config.active_speed == SPEED_1000 ?
1374                              1000 :
1375                              (tp->link_config.active_speed == SPEED_100 ?
1376                               100 : 10)),
1377                             (tp->link_config.active_duplex == DUPLEX_FULL ?
1378                              "full" : "half"));
1379
1380                 netdev_info(tp->dev, "Flow control is %s for TX and %s for RX\n",
1381                             (tp->link_config.active_flowctrl & FLOW_CTRL_TX) ?
1382                             "on" : "off",
1383                             (tp->link_config.active_flowctrl & FLOW_CTRL_RX) ?
1384                             "on" : "off");
1385
1386                 if (tp->phy_flags & TG3_PHYFLG_EEE_CAP)
1387                         netdev_info(tp->dev, "EEE is %s\n",
1388                                     tp->setlpicnt ? "enabled" : "disabled");
1389
1390                 tg3_ump_link_report(tp);
1391         }
1392 }
1393
1394 static u16 tg3_advert_flowctrl_1000T(u8 flow_ctrl)
1395 {
1396         u16 miireg;
1397
1398         if ((flow_ctrl & FLOW_CTRL_TX) && (flow_ctrl & FLOW_CTRL_RX))
1399                 miireg = ADVERTISE_PAUSE_CAP;
1400         else if (flow_ctrl & FLOW_CTRL_TX)
1401                 miireg = ADVERTISE_PAUSE_ASYM;
1402         else if (flow_ctrl & FLOW_CTRL_RX)
1403                 miireg = ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM;
1404         else
1405                 miireg = 0;
1406
1407         return miireg;
1408 }
1409
1410 static u16 tg3_advert_flowctrl_1000X(u8 flow_ctrl)
1411 {
1412         u16 miireg;
1413
1414         if ((flow_ctrl & FLOW_CTRL_TX) && (flow_ctrl & FLOW_CTRL_RX))
1415                 miireg = ADVERTISE_1000XPAUSE;
1416         else if (flow_ctrl & FLOW_CTRL_TX)
1417                 miireg = ADVERTISE_1000XPSE_ASYM;
1418         else if (flow_ctrl & FLOW_CTRL_RX)
1419                 miireg = ADVERTISE_1000XPAUSE | ADVERTISE_1000XPSE_ASYM;
1420         else
1421                 miireg = 0;
1422
1423         return miireg;
1424 }
1425
1426 static u8 tg3_resolve_flowctrl_1000X(u16 lcladv, u16 rmtadv)
1427 {
1428         u8 cap = 0;
1429
1430         if (lcladv & ADVERTISE_1000XPAUSE) {
1431                 if (lcladv & ADVERTISE_1000XPSE_ASYM) {
1432                         if (rmtadv & LPA_1000XPAUSE)
1433                                 cap = FLOW_CTRL_TX | FLOW_CTRL_RX;
1434                         else if (rmtadv & LPA_1000XPAUSE_ASYM)
1435                                 cap = FLOW_CTRL_RX;
1436                 } else {
1437                         if (rmtadv & LPA_1000XPAUSE)
1438                                 cap = FLOW_CTRL_TX | FLOW_CTRL_RX;
1439                 }
1440         } else if (lcladv & ADVERTISE_1000XPSE_ASYM) {
1441                 if ((rmtadv & LPA_1000XPAUSE) && (rmtadv & LPA_1000XPAUSE_ASYM))
1442                         cap = FLOW_CTRL_TX;
1443         }
1444
1445         return cap;
1446 }
1447
1448 static void tg3_setup_flow_control(struct tg3 *tp, u32 lcladv, u32 rmtadv)
1449 {
1450         u8 autoneg;
1451         u8 flowctrl = 0;
1452         u32 old_rx_mode = tp->rx_mode;
1453         u32 old_tx_mode = tp->tx_mode;
1454
1455         if (tg3_flag(tp, USE_PHYLIB))
1456                 autoneg = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]->autoneg;
1457         else
1458                 autoneg = tp->link_config.autoneg;
1459
1460         if (autoneg == AUTONEG_ENABLE && tg3_flag(tp, PAUSE_AUTONEG)) {
1461                 if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)
1462                         flowctrl = tg3_resolve_flowctrl_1000X(lcladv, rmtadv);
1463                 else
1464                         flowctrl = mii_resolve_flowctrl_fdx(lcladv, rmtadv);
1465         } else
1466                 flowctrl = tp->link_config.flowctrl;
1467
1468         tp->link_config.active_flowctrl = flowctrl;
1469
1470         if (flowctrl & FLOW_CTRL_RX)
1471                 tp->rx_mode |= RX_MODE_FLOW_CTRL_ENABLE;
1472         else
1473                 tp->rx_mode &= ~RX_MODE_FLOW_CTRL_ENABLE;
1474
1475         if (old_rx_mode != tp->rx_mode)
1476                 tw32_f(MAC_RX_MODE, tp->rx_mode);
1477
1478         if (flowctrl & FLOW_CTRL_TX)
1479                 tp->tx_mode |= TX_MODE_FLOW_CTRL_ENABLE;
1480         else
1481                 tp->tx_mode &= ~TX_MODE_FLOW_CTRL_ENABLE;
1482
1483         if (old_tx_mode != tp->tx_mode)
1484                 tw32_f(MAC_TX_MODE, tp->tx_mode);
1485 }
1486
1487 static void tg3_adjust_link(struct net_device *dev)
1488 {
1489         u8 oldflowctrl, linkmesg = 0;
1490         u32 mac_mode, lcl_adv, rmt_adv;
1491         struct tg3 *tp = netdev_priv(dev);
1492         struct phy_device *phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
1493
1494         spin_lock_bh(&tp->lock);
1495
1496         mac_mode = tp->mac_mode & ~(MAC_MODE_PORT_MODE_MASK |
1497                                     MAC_MODE_HALF_DUPLEX);
1498
1499         oldflowctrl = tp->link_config.active_flowctrl;
1500
1501         if (phydev->link) {
1502                 lcl_adv = 0;
1503                 rmt_adv = 0;
1504
1505                 if (phydev->speed == SPEED_100 || phydev->speed == SPEED_10)
1506                         mac_mode |= MAC_MODE_PORT_MODE_MII;
1507                 else if (phydev->speed == SPEED_1000 ||
1508                          GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785)
1509                         mac_mode |= MAC_MODE_PORT_MODE_GMII;
1510                 else
1511                         mac_mode |= MAC_MODE_PORT_MODE_MII;
1512
1513                 if (phydev->duplex == DUPLEX_HALF)
1514                         mac_mode |= MAC_MODE_HALF_DUPLEX;
1515                 else {
1516                         lcl_adv = tg3_advert_flowctrl_1000T(
1517                                   tp->link_config.flowctrl);
1518
1519                         if (phydev->pause)
1520                                 rmt_adv = LPA_PAUSE_CAP;
1521                         if (phydev->asym_pause)
1522                                 rmt_adv |= LPA_PAUSE_ASYM;
1523                 }
1524
1525                 tg3_setup_flow_control(tp, lcl_adv, rmt_adv);
1526         } else
1527                 mac_mode |= MAC_MODE_PORT_MODE_GMII;
1528
1529         if (mac_mode != tp->mac_mode) {
1530                 tp->mac_mode = mac_mode;
1531                 tw32_f(MAC_MODE, tp->mac_mode);
1532                 udelay(40);
1533         }
1534
1535         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785) {
1536                 if (phydev->speed == SPEED_10)
1537                         tw32(MAC_MI_STAT,
1538                              MAC_MI_STAT_10MBPS_MODE |
1539                              MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
1540                 else
1541                         tw32(MAC_MI_STAT, MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
1542         }
1543
1544         if (phydev->speed == SPEED_1000 && phydev->duplex == DUPLEX_HALF)
1545                 tw32(MAC_TX_LENGTHS,
1546                      ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
1547                       (6 << TX_LENGTHS_IPG_SHIFT) |
1548                       (0xff << TX_LENGTHS_SLOT_TIME_SHIFT)));
1549         else
1550                 tw32(MAC_TX_LENGTHS,
1551                      ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
1552                       (6 << TX_LENGTHS_IPG_SHIFT) |
1553                       (32 << TX_LENGTHS_SLOT_TIME_SHIFT)));
1554
1555         if ((phydev->link && tp->link_config.active_speed == SPEED_INVALID) ||
1556             (!phydev->link && tp->link_config.active_speed != SPEED_INVALID) ||
1557             phydev->speed != tp->link_config.active_speed ||
1558             phydev->duplex != tp->link_config.active_duplex ||
1559             oldflowctrl != tp->link_config.active_flowctrl)
1560                 linkmesg = 1;
1561
1562         tp->link_config.active_speed = phydev->speed;
1563         tp->link_config.active_duplex = phydev->duplex;
1564
1565         spin_unlock_bh(&tp->lock);
1566
1567         if (linkmesg)
1568                 tg3_link_report(tp);
1569 }
1570
1571 static int tg3_phy_init(struct tg3 *tp)
1572 {
1573         struct phy_device *phydev;
1574
1575         if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED)
1576                 return 0;
1577
1578         /* Bring the PHY back to a known state. */
1579         tg3_bmcr_reset(tp);
1580
1581         phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
1582
1583         /* Attach the MAC to the PHY. */
1584         phydev = phy_connect(tp->dev, dev_name(&phydev->dev), tg3_adjust_link,
1585                              phydev->dev_flags, phydev->interface);
1586         if (IS_ERR(phydev)) {
1587                 dev_err(&tp->pdev->dev, "Could not attach to PHY\n");
1588                 return PTR_ERR(phydev);
1589         }
1590
1591         /* Mask with MAC supported features. */
1592         switch (phydev->interface) {
1593         case PHY_INTERFACE_MODE_GMII:
1594         case PHY_INTERFACE_MODE_RGMII:
1595                 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
1596                         phydev->supported &= (PHY_GBIT_FEATURES |
1597                                               SUPPORTED_Pause |
1598                                               SUPPORTED_Asym_Pause);
1599                         break;
1600                 }
1601                 /* fallthru */
1602         case PHY_INTERFACE_MODE_MII:
1603                 phydev->supported &= (PHY_BASIC_FEATURES |
1604                                       SUPPORTED_Pause |
1605                                       SUPPORTED_Asym_Pause);
1606                 break;
1607         default:
1608                 phy_disconnect(tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]);
1609                 return -EINVAL;
1610         }
1611
1612         tp->phy_flags |= TG3_PHYFLG_IS_CONNECTED;
1613
1614         phydev->advertising = phydev->supported;
1615
1616         return 0;
1617 }
1618
1619 static void tg3_phy_start(struct tg3 *tp)
1620 {
1621         struct phy_device *phydev;
1622
1623         if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
1624                 return;
1625
1626         phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
1627
1628         if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) {
1629                 tp->phy_flags &= ~TG3_PHYFLG_IS_LOW_POWER;
1630                 phydev->speed = tp->link_config.orig_speed;
1631                 phydev->duplex = tp->link_config.orig_duplex;
1632                 phydev->autoneg = tp->link_config.orig_autoneg;
1633                 phydev->advertising = tp->link_config.orig_advertising;
1634         }
1635
1636         phy_start(phydev);
1637
1638         phy_start_aneg(phydev);
1639 }
1640
1641 static void tg3_phy_stop(struct tg3 *tp)
1642 {
1643         if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
1644                 return;
1645
1646         phy_stop(tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]);
1647 }
1648
1649 static void tg3_phy_fini(struct tg3 *tp)
1650 {
1651         if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) {
1652                 phy_disconnect(tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]);
1653                 tp->phy_flags &= ~TG3_PHYFLG_IS_CONNECTED;
1654         }
1655 }
1656
1657 static void tg3_phy_fet_toggle_apd(struct tg3 *tp, bool enable)
1658 {
1659         u32 phytest;
1660
1661         if (!tg3_readphy(tp, MII_TG3_FET_TEST, &phytest)) {
1662                 u32 phy;
1663
1664                 tg3_writephy(tp, MII_TG3_FET_TEST,
1665                              phytest | MII_TG3_FET_SHADOW_EN);
1666                 if (!tg3_readphy(tp, MII_TG3_FET_SHDW_AUXSTAT2, &phy)) {
1667                         if (enable)
1668                                 phy |= MII_TG3_FET_SHDW_AUXSTAT2_APD;
1669                         else
1670                                 phy &= ~MII_TG3_FET_SHDW_AUXSTAT2_APD;
1671                         tg3_writephy(tp, MII_TG3_FET_SHDW_AUXSTAT2, phy);
1672                 }
1673                 tg3_writephy(tp, MII_TG3_FET_TEST, phytest);
1674         }
1675 }
1676
1677 static void tg3_phy_toggle_apd(struct tg3 *tp, bool enable)
1678 {
1679         u32 reg;
1680
1681         if (!tg3_flag(tp, 5705_PLUS) ||
1682             (tg3_flag(tp, 5717_PLUS) &&
1683              (tp->phy_flags & TG3_PHYFLG_MII_SERDES)))
1684                 return;
1685
1686         if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
1687                 tg3_phy_fet_toggle_apd(tp, enable);
1688                 return;
1689         }
1690
1691         reg = MII_TG3_MISC_SHDW_WREN |
1692               MII_TG3_MISC_SHDW_SCR5_SEL |
1693               MII_TG3_MISC_SHDW_SCR5_LPED |
1694               MII_TG3_MISC_SHDW_SCR5_DLPTLM |
1695               MII_TG3_MISC_SHDW_SCR5_SDTL |
1696               MII_TG3_MISC_SHDW_SCR5_C125OE;
1697         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5784 || !enable)
1698                 reg |= MII_TG3_MISC_SHDW_SCR5_DLLAPD;
1699
1700         tg3_writephy(tp, MII_TG3_MISC_SHDW, reg);
1701
1702
1703         reg = MII_TG3_MISC_SHDW_WREN |
1704               MII_TG3_MISC_SHDW_APD_SEL |
1705               MII_TG3_MISC_SHDW_APD_WKTM_84MS;
1706         if (enable)
1707                 reg |= MII_TG3_MISC_SHDW_APD_ENABLE;
1708
1709         tg3_writephy(tp, MII_TG3_MISC_SHDW, reg);
1710 }
1711
1712 static void tg3_phy_toggle_automdix(struct tg3 *tp, int enable)
1713 {
1714         u32 phy;
1715
1716         if (!tg3_flag(tp, 5705_PLUS) ||
1717             (tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
1718                 return;
1719
1720         if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
1721                 u32 ephy;
1722
1723                 if (!tg3_readphy(tp, MII_TG3_FET_TEST, &ephy)) {
1724                         u32 reg = MII_TG3_FET_SHDW_MISCCTRL;
1725
1726                         tg3_writephy(tp, MII_TG3_FET_TEST,
1727                                      ephy | MII_TG3_FET_SHADOW_EN);
1728                         if (!tg3_readphy(tp, reg, &phy)) {
1729                                 if (enable)
1730                                         phy |= MII_TG3_FET_SHDW_MISCCTRL_MDIX;
1731                                 else
1732                                         phy &= ~MII_TG3_FET_SHDW_MISCCTRL_MDIX;
1733                                 tg3_writephy(tp, reg, phy);
1734                         }
1735                         tg3_writephy(tp, MII_TG3_FET_TEST, ephy);
1736                 }
1737         } else {
1738                 int ret;
1739
1740                 ret = tg3_phy_auxctl_read(tp,
1741                                           MII_TG3_AUXCTL_SHDWSEL_MISC, &phy);
1742                 if (!ret) {
1743                         if (enable)
1744                                 phy |= MII_TG3_AUXCTL_MISC_FORCE_AMDIX;
1745                         else
1746                                 phy &= ~MII_TG3_AUXCTL_MISC_FORCE_AMDIX;
1747                         tg3_phy_auxctl_write(tp,
1748                                              MII_TG3_AUXCTL_SHDWSEL_MISC, phy);
1749                 }
1750         }
1751 }
1752
1753 static void tg3_phy_set_wirespeed(struct tg3 *tp)
1754 {
1755         int ret;
1756         u32 val;
1757
1758         if (tp->phy_flags & TG3_PHYFLG_NO_ETH_WIRE_SPEED)
1759                 return;
1760
1761         ret = tg3_phy_auxctl_read(tp, MII_TG3_AUXCTL_SHDWSEL_MISC, &val);
1762         if (!ret)
1763                 tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_MISC,
1764                                      val | MII_TG3_AUXCTL_MISC_WIRESPD_EN);
1765 }
1766
1767 static void tg3_phy_apply_otp(struct tg3 *tp)
1768 {
1769         u32 otp, phy;
1770
1771         if (!tp->phy_otp)
1772                 return;
1773
1774         otp = tp->phy_otp;
1775
1776         if (TG3_PHY_AUXCTL_SMDSP_ENABLE(tp))
1777                 return;
1778
1779         phy = ((otp & TG3_OTP_AGCTGT_MASK) >> TG3_OTP_AGCTGT_SHIFT);
1780         phy |= MII_TG3_DSP_TAP1_AGCTGT_DFLT;
1781         tg3_phydsp_write(tp, MII_TG3_DSP_TAP1, phy);
1782
1783         phy = ((otp & TG3_OTP_HPFFLTR_MASK) >> TG3_OTP_HPFFLTR_SHIFT) |
1784               ((otp & TG3_OTP_HPFOVER_MASK) >> TG3_OTP_HPFOVER_SHIFT);
1785         tg3_phydsp_write(tp, MII_TG3_DSP_AADJ1CH0, phy);
1786
1787         phy = ((otp & TG3_OTP_LPFDIS_MASK) >> TG3_OTP_LPFDIS_SHIFT);
1788         phy |= MII_TG3_DSP_AADJ1CH3_ADCCKADJ;
1789         tg3_phydsp_write(tp, MII_TG3_DSP_AADJ1CH3, phy);
1790
1791         phy = ((otp & TG3_OTP_VDAC_MASK) >> TG3_OTP_VDAC_SHIFT);
1792         tg3_phydsp_write(tp, MII_TG3_DSP_EXP75, phy);
1793
1794         phy = ((otp & TG3_OTP_10BTAMP_MASK) >> TG3_OTP_10BTAMP_SHIFT);
1795         tg3_phydsp_write(tp, MII_TG3_DSP_EXP96, phy);
1796
1797         phy = ((otp & TG3_OTP_ROFF_MASK) >> TG3_OTP_ROFF_SHIFT) |
1798               ((otp & TG3_OTP_RCOFF_MASK) >> TG3_OTP_RCOFF_SHIFT);
1799         tg3_phydsp_write(tp, MII_TG3_DSP_EXP97, phy);
1800
1801         TG3_PHY_AUXCTL_SMDSP_DISABLE(tp);
1802 }
1803
1804 static void tg3_phy_eee_adjust(struct tg3 *tp, u32 current_link_up)
1805 {
1806         u32 val;
1807
1808         if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP))
1809                 return;
1810
1811         tp->setlpicnt = 0;
1812
1813         if (tp->link_config.autoneg == AUTONEG_ENABLE &&
1814             current_link_up == 1 &&
1815             tp->link_config.active_duplex == DUPLEX_FULL &&
1816             (tp->link_config.active_speed == SPEED_100 ||
1817              tp->link_config.active_speed == SPEED_1000)) {
1818                 u32 eeectl;
1819
1820                 if (tp->link_config.active_speed == SPEED_1000)
1821                         eeectl = TG3_CPMU_EEE_CTRL_EXIT_16_5_US;
1822                 else
1823                         eeectl = TG3_CPMU_EEE_CTRL_EXIT_36_US;
1824
1825                 tw32(TG3_CPMU_EEE_CTRL, eeectl);
1826
1827                 tg3_phy_cl45_read(tp, MDIO_MMD_AN,
1828                                   TG3_CL45_D7_EEERES_STAT, &val);
1829
1830                 if (val == TG3_CL45_D7_EEERES_STAT_LP_1000T ||
1831                     val == TG3_CL45_D7_EEERES_STAT_LP_100TX)
1832                         tp->setlpicnt = 2;
1833         }
1834
1835         if (!tp->setlpicnt) {
1836                 val = tr32(TG3_CPMU_EEE_MODE);
1837                 tw32(TG3_CPMU_EEE_MODE, val & ~TG3_CPMU_EEEMD_LPI_ENABLE);
1838         }
1839 }
1840
1841 static void tg3_phy_eee_enable(struct tg3 *tp)
1842 {
1843         u32 val;
1844
1845         if (tp->link_config.active_speed == SPEED_1000 &&
1846             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
1847              GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
1848              GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765) &&
1849             !TG3_PHY_AUXCTL_SMDSP_ENABLE(tp)) {
1850                 tg3_phydsp_write(tp, MII_TG3_DSP_TAP26, 0x0003);
1851                 TG3_PHY_AUXCTL_SMDSP_DISABLE(tp);
1852         }
1853
1854         val = tr32(TG3_CPMU_EEE_MODE);
1855         tw32(TG3_CPMU_EEE_MODE, val | TG3_CPMU_EEEMD_LPI_ENABLE);
1856 }
1857
1858 static int tg3_wait_macro_done(struct tg3 *tp)
1859 {
1860         int limit = 100;
1861
1862         while (limit--) {
1863                 u32 tmp32;
1864
1865                 if (!tg3_readphy(tp, MII_TG3_DSP_CONTROL, &tmp32)) {
1866                         if ((tmp32 & 0x1000) == 0)
1867                                 break;
1868                 }
1869         }
1870         if (limit < 0)
1871                 return -EBUSY;
1872
1873         return 0;
1874 }
1875
1876 static int tg3_phy_write_and_check_testpat(struct tg3 *tp, int *resetp)
1877 {
1878         static const u32 test_pat[4][6] = {
1879         { 0x00005555, 0x00000005, 0x00002aaa, 0x0000000a, 0x00003456, 0x00000003 },
1880         { 0x00002aaa, 0x0000000a, 0x00003333, 0x00000003, 0x0000789a, 0x00000005 },
1881         { 0x00005a5a, 0x00000005, 0x00002a6a, 0x0000000a, 0x00001bcd, 0x00000003 },
1882         { 0x00002a5a, 0x0000000a, 0x000033c3, 0x00000003, 0x00002ef1, 0x00000005 }
1883         };
1884         int chan;
1885
1886         for (chan = 0; chan < 4; chan++) {
1887                 int i;
1888
1889                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
1890                              (chan * 0x2000) | 0x0200);
1891                 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0002);
1892
1893                 for (i = 0; i < 6; i++)
1894                         tg3_writephy(tp, MII_TG3_DSP_RW_PORT,
1895                                      test_pat[chan][i]);
1896
1897                 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0202);
1898                 if (tg3_wait_macro_done(tp)) {
1899                         *resetp = 1;
1900                         return -EBUSY;
1901                 }
1902
1903                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
1904                              (chan * 0x2000) | 0x0200);
1905                 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0082);
1906                 if (tg3_wait_macro_done(tp)) {
1907                         *resetp = 1;
1908                         return -EBUSY;
1909                 }
1910
1911                 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0802);
1912                 if (tg3_wait_macro_done(tp)) {
1913                         *resetp = 1;
1914                         return -EBUSY;
1915                 }
1916
1917                 for (i = 0; i < 6; i += 2) {
1918                         u32 low, high;
1919
1920                         if (tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &low) ||
1921                             tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &high) ||
1922                             tg3_wait_macro_done(tp)) {
1923                                 *resetp = 1;
1924                                 return -EBUSY;
1925                         }
1926                         low &= 0x7fff;
1927                         high &= 0x000f;
1928                         if (low != test_pat[chan][i] ||
1929                             high != test_pat[chan][i+1]) {
1930                                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000b);
1931                                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x4001);
1932                                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x4005);
1933
1934                                 return -EBUSY;
1935                         }
1936                 }
1937         }
1938
1939         return 0;
1940 }
1941
1942 static int tg3_phy_reset_chanpat(struct tg3 *tp)
1943 {
1944         int chan;
1945
1946         for (chan = 0; chan < 4; chan++) {
1947                 int i;
1948
1949                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
1950                              (chan * 0x2000) | 0x0200);
1951                 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0002);
1952                 for (i = 0; i < 6; i++)
1953                         tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x000);
1954                 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0202);
1955                 if (tg3_wait_macro_done(tp))
1956                         return -EBUSY;
1957         }
1958
1959         return 0;
1960 }
1961
1962 static int tg3_phy_reset_5703_4_5(struct tg3 *tp)
1963 {
1964         u32 reg32, phy9_orig;
1965         int retries, do_phy_reset, err;
1966
1967         retries = 10;
1968         do_phy_reset = 1;
1969         do {
1970                 if (do_phy_reset) {
1971                         err = tg3_bmcr_reset(tp);
1972                         if (err)
1973                                 return err;
1974                         do_phy_reset = 0;
1975                 }
1976
1977                 /* Disable transmitter and interrupt.  */
1978                 if (tg3_readphy(tp, MII_TG3_EXT_CTRL, &reg32))
1979                         continue;
1980
1981                 reg32 |= 0x3000;
1982                 tg3_writephy(tp, MII_TG3_EXT_CTRL, reg32);
1983
1984                 /* Set full-duplex, 1000 mbps.  */
1985                 tg3_writephy(tp, MII_BMCR,
1986                              BMCR_FULLDPLX | BMCR_SPEED1000);
1987
1988                 /* Set to master mode.  */
1989                 if (tg3_readphy(tp, MII_CTRL1000, &phy9_orig))
1990                         continue;
1991
1992                 tg3_writephy(tp, MII_CTRL1000,
1993                              CTL1000_AS_MASTER | CTL1000_ENABLE_MASTER);
1994
1995                 err = TG3_PHY_AUXCTL_SMDSP_ENABLE(tp);
1996                 if (err)
1997                         return err;
1998
1999                 /* Block the PHY control access.  */
2000                 tg3_phydsp_write(tp, 0x8005, 0x0800);
2001
2002                 err = tg3_phy_write_and_check_testpat(tp, &do_phy_reset);
2003                 if (!err)
2004                         break;
2005         } while (--retries);
2006
2007         err = tg3_phy_reset_chanpat(tp);
2008         if (err)
2009                 return err;
2010
2011         tg3_phydsp_write(tp, 0x8005, 0x0000);
2012
2013         tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8200);
2014         tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0000);
2015
2016         TG3_PHY_AUXCTL_SMDSP_DISABLE(tp);
2017
2018         tg3_writephy(tp, MII_CTRL1000, phy9_orig);
2019
2020         if (!tg3_readphy(tp, MII_TG3_EXT_CTRL, &reg32)) {
2021                 reg32 &= ~0x3000;
2022                 tg3_writephy(tp, MII_TG3_EXT_CTRL, reg32);
2023         } else if (!err)
2024                 err = -EBUSY;
2025
2026         return err;
2027 }
2028
2029 /* This will reset the tigon3 PHY if there is no valid
2030  * link unless the FORCE argument is non-zero.
2031  */
2032 static int tg3_phy_reset(struct tg3 *tp)
2033 {
2034         u32 val, cpmuctrl;
2035         int err;
2036
2037         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
2038                 val = tr32(GRC_MISC_CFG);
2039                 tw32_f(GRC_MISC_CFG, val & ~GRC_MISC_CFG_EPHY_IDDQ);
2040                 udelay(40);
2041         }
2042         err  = tg3_readphy(tp, MII_BMSR, &val);
2043         err |= tg3_readphy(tp, MII_BMSR, &val);
2044         if (err != 0)
2045                 return -EBUSY;
2046
2047         if (netif_running(tp->dev) && netif_carrier_ok(tp->dev)) {
2048                 netif_carrier_off(tp->dev);
2049                 tg3_link_report(tp);
2050         }
2051
2052         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
2053             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
2054             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
2055                 err = tg3_phy_reset_5703_4_5(tp);
2056                 if (err)
2057                         return err;
2058                 goto out;
2059         }
2060
2061         cpmuctrl = 0;
2062         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 &&
2063             GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5784_AX) {
2064                 cpmuctrl = tr32(TG3_CPMU_CTRL);
2065                 if (cpmuctrl & CPMU_CTRL_GPHY_10MB_RXONLY)
2066                         tw32(TG3_CPMU_CTRL,
2067                              cpmuctrl & ~CPMU_CTRL_GPHY_10MB_RXONLY);
2068         }
2069
2070         err = tg3_bmcr_reset(tp);
2071         if (err)
2072                 return err;
2073
2074         if (cpmuctrl & CPMU_CTRL_GPHY_10MB_RXONLY) {
2075                 val = MII_TG3_DSP_EXP8_AEDW | MII_TG3_DSP_EXP8_REJ2MHz;
2076                 tg3_phydsp_write(tp, MII_TG3_DSP_EXP8, val);
2077
2078                 tw32(TG3_CPMU_CTRL, cpmuctrl);
2079         }
2080
2081         if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5784_AX ||
2082             GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5761_AX) {
2083                 val = tr32(TG3_CPMU_LSPD_1000MB_CLK);
2084                 if ((val & CPMU_LSPD_1000MB_MACCLK_MASK) ==
2085                     CPMU_LSPD_1000MB_MACCLK_12_5) {
2086                         val &= ~CPMU_LSPD_1000MB_MACCLK_MASK;
2087                         udelay(40);
2088                         tw32_f(TG3_CPMU_LSPD_1000MB_CLK, val);
2089                 }
2090         }
2091
2092         if (tg3_flag(tp, 5717_PLUS) &&
2093             (tp->phy_flags & TG3_PHYFLG_MII_SERDES))
2094                 return 0;
2095
2096         tg3_phy_apply_otp(tp);
2097
2098         if (tp->phy_flags & TG3_PHYFLG_ENABLE_APD)
2099                 tg3_phy_toggle_apd(tp, true);
2100         else
2101                 tg3_phy_toggle_apd(tp, false);
2102
2103 out:
2104         if ((tp->phy_flags & TG3_PHYFLG_ADC_BUG) &&
2105             !TG3_PHY_AUXCTL_SMDSP_ENABLE(tp)) {
2106                 tg3_phydsp_write(tp, 0x201f, 0x2aaa);
2107                 tg3_phydsp_write(tp, 0x000a, 0x0323);
2108                 TG3_PHY_AUXCTL_SMDSP_DISABLE(tp);
2109         }
2110
2111         if (tp->phy_flags & TG3_PHYFLG_5704_A0_BUG) {
2112                 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8d68);
2113                 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8d68);
2114         }
2115
2116         if (tp->phy_flags & TG3_PHYFLG_BER_BUG) {
2117                 if (!TG3_PHY_AUXCTL_SMDSP_ENABLE(tp)) {
2118                         tg3_phydsp_write(tp, 0x000a, 0x310b);
2119                         tg3_phydsp_write(tp, 0x201f, 0x9506);
2120                         tg3_phydsp_write(tp, 0x401f, 0x14e2);
2121                         TG3_PHY_AUXCTL_SMDSP_DISABLE(tp);
2122                 }
2123         } else if (tp->phy_flags & TG3_PHYFLG_JITTER_BUG) {
2124                 if (!TG3_PHY_AUXCTL_SMDSP_ENABLE(tp)) {
2125                         tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000a);
2126                         if (tp->phy_flags & TG3_PHYFLG_ADJUST_TRIM) {
2127                                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x110b);
2128                                 tg3_writephy(tp, MII_TG3_TEST1,
2129                                              MII_TG3_TEST1_TRIM_EN | 0x4);
2130                         } else
2131                                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x010b);
2132
2133                         TG3_PHY_AUXCTL_SMDSP_DISABLE(tp);
2134                 }
2135         }
2136
2137         /* Set Extended packet length bit (bit 14) on all chips that */
2138         /* support jumbo frames */
2139         if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) {
2140                 /* Cannot do read-modify-write on 5401 */
2141                 tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_AUXCTL, 0x4c20);
2142         } else if (tg3_flag(tp, JUMBO_CAPABLE)) {
2143                 /* Set bit 14 with read-modify-write to preserve other bits */
2144                 err = tg3_phy_auxctl_read(tp,
2145                                           MII_TG3_AUXCTL_SHDWSEL_AUXCTL, &val);
2146                 if (!err)
2147                         tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_AUXCTL,
2148                                            val | MII_TG3_AUXCTL_ACTL_EXTPKTLEN);
2149         }
2150
2151         /* Set phy register 0x10 bit 0 to high fifo elasticity to support
2152          * jumbo frames transmission.
2153          */
2154         if (tg3_flag(tp, JUMBO_CAPABLE)) {
2155                 if (!tg3_readphy(tp, MII_TG3_EXT_CTRL, &val))
2156                         tg3_writephy(tp, MII_TG3_EXT_CTRL,
2157                                      val | MII_TG3_EXT_CTRL_FIFO_ELASTIC);
2158         }
2159
2160         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
2161                 /* adjust output voltage */
2162                 tg3_writephy(tp, MII_TG3_FET_PTEST, 0x12);
2163         }
2164
2165         tg3_phy_toggle_automdix(tp, 1);
2166         tg3_phy_set_wirespeed(tp);
2167         return 0;
2168 }
2169
2170 static inline int tg3_pwrsrc_switch_to_vmain(struct tg3 *tp)
2171 {
2172         if (!tg3_flag(tp, IS_NIC))
2173                 return 0;
2174
2175         tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl,
2176                     TG3_GRC_LCLCTL_PWRSW_DELAY);
2177
2178         return 0;
2179 }
2180
2181 static void tg3_pwrsrc_die_with_vmain(struct tg3 *tp)
2182 {
2183         u32 grc_local_ctrl;
2184
2185         if (!tg3_flag(tp, IS_NIC) ||
2186             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
2187             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)
2188                 return;
2189
2190         grc_local_ctrl = tp->grc_local_ctrl | GRC_LCLCTRL_GPIO_OE1;
2191
2192         tw32_wait_f(GRC_LOCAL_CTRL,
2193                     grc_local_ctrl | GRC_LCLCTRL_GPIO_OUTPUT1,
2194                     TG3_GRC_LCLCTL_PWRSW_DELAY);
2195
2196         tw32_wait_f(GRC_LOCAL_CTRL,
2197                     grc_local_ctrl,
2198                     TG3_GRC_LCLCTL_PWRSW_DELAY);
2199
2200         tw32_wait_f(GRC_LOCAL_CTRL,
2201                     grc_local_ctrl | GRC_LCLCTRL_GPIO_OUTPUT1,
2202                     TG3_GRC_LCLCTL_PWRSW_DELAY);
2203 }
2204
2205 static void tg3_pwrsrc_switch_to_vaux(struct tg3 *tp)
2206 {
2207         if (!tg3_flag(tp, IS_NIC))
2208                 return;
2209
2210         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
2211             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
2212                 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
2213                             (GRC_LCLCTRL_GPIO_OE0 |
2214                              GRC_LCLCTRL_GPIO_OE1 |
2215                              GRC_LCLCTRL_GPIO_OE2 |
2216                              GRC_LCLCTRL_GPIO_OUTPUT0 |
2217                              GRC_LCLCTRL_GPIO_OUTPUT1),
2218                             TG3_GRC_LCLCTL_PWRSW_DELAY);
2219         } else if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761 ||
2220                    tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761S) {
2221                 /* The 5761 non-e device swaps GPIO 0 and GPIO 2. */
2222                 u32 grc_local_ctrl = GRC_LCLCTRL_GPIO_OE0 |
2223                                      GRC_LCLCTRL_GPIO_OE1 |
2224                                      GRC_LCLCTRL_GPIO_OE2 |
2225                                      GRC_LCLCTRL_GPIO_OUTPUT0 |
2226                                      GRC_LCLCTRL_GPIO_OUTPUT1 |
2227                                      tp->grc_local_ctrl;
2228                 tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl,
2229                             TG3_GRC_LCLCTL_PWRSW_DELAY);
2230
2231                 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OUTPUT2;
2232                 tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl,
2233                             TG3_GRC_LCLCTL_PWRSW_DELAY);
2234
2235                 grc_local_ctrl &= ~GRC_LCLCTRL_GPIO_OUTPUT0;
2236                 tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl,
2237                             TG3_GRC_LCLCTL_PWRSW_DELAY);
2238         } else {
2239                 u32 no_gpio2;
2240                 u32 grc_local_ctrl = 0;
2241
2242                 /* Workaround to prevent overdrawing Amps. */
2243                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) {
2244                         grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE3;
2245                         tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
2246                                     grc_local_ctrl,
2247                                     TG3_GRC_LCLCTL_PWRSW_DELAY);
2248                 }
2249
2250                 /* On 5753 and variants, GPIO2 cannot be used. */
2251                 no_gpio2 = tp->nic_sram_data_cfg &
2252                            NIC_SRAM_DATA_CFG_NO_GPIO2;
2253
2254                 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE0 |
2255                                   GRC_LCLCTRL_GPIO_OE1 |
2256                                   GRC_LCLCTRL_GPIO_OE2 |
2257                                   GRC_LCLCTRL_GPIO_OUTPUT1 |
2258                                   GRC_LCLCTRL_GPIO_OUTPUT2;
2259                 if (no_gpio2) {
2260                         grc_local_ctrl &= ~(GRC_LCLCTRL_GPIO_OE2 |
2261                                             GRC_LCLCTRL_GPIO_OUTPUT2);
2262                 }
2263                 tw32_wait_f(GRC_LOCAL_CTRL,
2264                             tp->grc_local_ctrl | grc_local_ctrl,
2265                             TG3_GRC_LCLCTL_PWRSW_DELAY);
2266
2267                 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OUTPUT0;
2268
2269                 tw32_wait_f(GRC_LOCAL_CTRL,
2270                             tp->grc_local_ctrl | grc_local_ctrl,
2271                             TG3_GRC_LCLCTL_PWRSW_DELAY);
2272
2273                 if (!no_gpio2) {
2274                         grc_local_ctrl &= ~GRC_LCLCTRL_GPIO_OUTPUT2;
2275                         tw32_wait_f(GRC_LOCAL_CTRL,
2276                                     tp->grc_local_ctrl | grc_local_ctrl,
2277                                     TG3_GRC_LCLCTL_PWRSW_DELAY);
2278                 }
2279         }
2280 }
2281
2282 static void tg3_frob_aux_power(struct tg3 *tp)
2283 {
2284         bool need_vaux = false;
2285
2286         /* The GPIOs do something completely different on 57765. */
2287         if (!tg3_flag(tp, IS_NIC) ||
2288             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
2289             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765)
2290                 return;
2291
2292         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
2293              GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714 ||
2294              GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
2295              GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) &&
2296             tp->pdev_peer != tp->pdev) {
2297                 struct net_device *dev_peer;
2298
2299                 dev_peer = pci_get_drvdata(tp->pdev_peer);
2300
2301                 /* remove_one() may have been run on the peer. */
2302                 if (dev_peer) {
2303                         struct tg3 *tp_peer = netdev_priv(dev_peer);
2304
2305                         if (tg3_flag(tp_peer, INIT_COMPLETE))
2306                                 return;
2307
2308                         if (tg3_flag(tp_peer, WOL_ENABLE) ||
2309                             tg3_flag(tp_peer, ENABLE_ASF))
2310                                 need_vaux = true;
2311                 }
2312         }
2313
2314         if (tg3_flag(tp, WOL_ENABLE) || tg3_flag(tp, ENABLE_ASF))
2315                 need_vaux = true;
2316
2317         if (need_vaux)
2318                 tg3_pwrsrc_switch_to_vaux(tp);
2319         else
2320                 tg3_pwrsrc_die_with_vmain(tp);
2321 }
2322
2323 static int tg3_5700_link_polarity(struct tg3 *tp, u32 speed)
2324 {
2325         if (tp->led_ctrl == LED_CTRL_MODE_PHY_2)
2326                 return 1;
2327         else if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5411) {
2328                 if (speed != SPEED_10)
2329                         return 1;
2330         } else if (speed == SPEED_10)
2331                 return 1;
2332
2333         return 0;
2334 }
2335
2336 static int tg3_setup_phy(struct tg3 *, int);
2337
2338 #define RESET_KIND_SHUTDOWN     0
2339 #define RESET_KIND_INIT         1
2340 #define RESET_KIND_SUSPEND      2
2341
2342 static void tg3_write_sig_post_reset(struct tg3 *, int);
2343 static int tg3_halt_cpu(struct tg3 *, u32);
2344
2345 static void tg3_power_down_phy(struct tg3 *tp, bool do_low_power)
2346 {
2347         u32 val;
2348
2349         if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
2350                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
2351                         u32 sg_dig_ctrl = tr32(SG_DIG_CTRL);
2352                         u32 serdes_cfg = tr32(MAC_SERDES_CFG);
2353
2354                         sg_dig_ctrl |=
2355                                 SG_DIG_USING_HW_AUTONEG | SG_DIG_SOFT_RESET;
2356                         tw32(SG_DIG_CTRL, sg_dig_ctrl);
2357                         tw32(MAC_SERDES_CFG, serdes_cfg | (1 << 15));
2358                 }
2359                 return;
2360         }
2361
2362         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
2363                 tg3_bmcr_reset(tp);
2364                 val = tr32(GRC_MISC_CFG);
2365                 tw32_f(GRC_MISC_CFG, val | GRC_MISC_CFG_EPHY_IDDQ);
2366                 udelay(40);
2367                 return;
2368         } else if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
2369                 u32 phytest;
2370                 if (!tg3_readphy(tp, MII_TG3_FET_TEST, &phytest)) {
2371                         u32 phy;
2372
2373                         tg3_writephy(tp, MII_ADVERTISE, 0);
2374                         tg3_writephy(tp, MII_BMCR,
2375                                      BMCR_ANENABLE | BMCR_ANRESTART);
2376
2377                         tg3_writephy(tp, MII_TG3_FET_TEST,
2378                                      phytest | MII_TG3_FET_SHADOW_EN);
2379                         if (!tg3_readphy(tp, MII_TG3_FET_SHDW_AUXMODE4, &phy)) {
2380                                 phy |= MII_TG3_FET_SHDW_AUXMODE4_SBPD;
2381                                 tg3_writephy(tp,
2382                                              MII_TG3_FET_SHDW_AUXMODE4,
2383                                              phy);
2384                         }
2385                         tg3_writephy(tp, MII_TG3_FET_TEST, phytest);
2386                 }
2387                 return;
2388         } else if (do_low_power) {
2389                 tg3_writephy(tp, MII_TG3_EXT_CTRL,
2390                              MII_TG3_EXT_CTRL_FORCE_LED_OFF);
2391
2392                 val = MII_TG3_AUXCTL_PCTL_100TX_LPWR |
2393                       MII_TG3_AUXCTL_PCTL_SPR_ISOLATE |
2394                       MII_TG3_AUXCTL_PCTL_VREG_11V;
2395                 tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_PWRCTL, val);
2396         }
2397
2398         /* The PHY should not be powered down on some chips because
2399          * of bugs.
2400          */
2401         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
2402             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
2403             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780 &&
2404              (tp->phy_flags & TG3_PHYFLG_MII_SERDES)))
2405                 return;
2406
2407         if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5784_AX ||
2408             GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5761_AX) {
2409                 val = tr32(TG3_CPMU_LSPD_1000MB_CLK);
2410                 val &= ~CPMU_LSPD_1000MB_MACCLK_MASK;
2411                 val |= CPMU_LSPD_1000MB_MACCLK_12_5;
2412                 tw32_f(TG3_CPMU_LSPD_1000MB_CLK, val);
2413         }
2414
2415         tg3_writephy(tp, MII_BMCR, BMCR_PDOWN);
2416 }
2417
2418 /* tp->lock is held. */
2419 static int tg3_nvram_lock(struct tg3 *tp)
2420 {
2421         if (tg3_flag(tp, NVRAM)) {
2422                 int i;
2423
2424                 if (tp->nvram_lock_cnt == 0) {
2425                         tw32(NVRAM_SWARB, SWARB_REQ_SET1);
2426                         for (i = 0; i < 8000; i++) {
2427                                 if (tr32(NVRAM_SWARB) & SWARB_GNT1)
2428                                         break;
2429                                 udelay(20);
2430                         }
2431                         if (i == 8000) {
2432                                 tw32(NVRAM_SWARB, SWARB_REQ_CLR1);
2433                                 return -ENODEV;
2434                         }
2435                 }
2436                 tp->nvram_lock_cnt++;
2437         }
2438         return 0;
2439 }
2440
2441 /* tp->lock is held. */
2442 static void tg3_nvram_unlock(struct tg3 *tp)
2443 {
2444         if (tg3_flag(tp, NVRAM)) {
2445                 if (tp->nvram_lock_cnt > 0)
2446                         tp->nvram_lock_cnt--;
2447                 if (tp->nvram_lock_cnt == 0)
2448                         tw32_f(NVRAM_SWARB, SWARB_REQ_CLR1);
2449         }
2450 }
2451
2452 /* tp->lock is held. */
2453 static void tg3_enable_nvram_access(struct tg3 *tp)
2454 {
2455         if (tg3_flag(tp, 5750_PLUS) && !tg3_flag(tp, PROTECTED_NVRAM)) {
2456                 u32 nvaccess = tr32(NVRAM_ACCESS);
2457
2458                 tw32(NVRAM_ACCESS, nvaccess | ACCESS_ENABLE);
2459         }
2460 }
2461
2462 /* tp->lock is held. */
2463 static void tg3_disable_nvram_access(struct tg3 *tp)
2464 {
2465         if (tg3_flag(tp, 5750_PLUS) && !tg3_flag(tp, PROTECTED_NVRAM)) {
2466                 u32 nvaccess = tr32(NVRAM_ACCESS);
2467
2468                 tw32(NVRAM_ACCESS, nvaccess & ~ACCESS_ENABLE);
2469         }
2470 }
2471
2472 static int tg3_nvram_read_using_eeprom(struct tg3 *tp,
2473                                         u32 offset, u32 *val)
2474 {
2475         u32 tmp;
2476         int i;
2477
2478         if (offset > EEPROM_ADDR_ADDR_MASK || (offset % 4) != 0)
2479                 return -EINVAL;
2480
2481         tmp = tr32(GRC_EEPROM_ADDR) & ~(EEPROM_ADDR_ADDR_MASK |
2482                                         EEPROM_ADDR_DEVID_MASK |
2483                                         EEPROM_ADDR_READ);
2484         tw32(GRC_EEPROM_ADDR,
2485              tmp |
2486              (0 << EEPROM_ADDR_DEVID_SHIFT) |
2487              ((offset << EEPROM_ADDR_ADDR_SHIFT) &
2488               EEPROM_ADDR_ADDR_MASK) |
2489              EEPROM_ADDR_READ | EEPROM_ADDR_START);
2490
2491         for (i = 0; i < 1000; i++) {
2492                 tmp = tr32(GRC_EEPROM_ADDR);
2493
2494                 if (tmp & EEPROM_ADDR_COMPLETE)
2495                         break;
2496                 msleep(1);
2497         }
2498         if (!(tmp & EEPROM_ADDR_COMPLETE))
2499                 return -EBUSY;
2500
2501         tmp = tr32(GRC_EEPROM_DATA);
2502
2503         /*
2504          * The data will always be opposite the native endian
2505          * format.  Perform a blind byteswap to compensate.
2506          */
2507         *val = swab32(tmp);
2508
2509         return 0;
2510 }
2511
2512 #define NVRAM_CMD_TIMEOUT 10000
2513
2514 static int tg3_nvram_exec_cmd(struct tg3 *tp, u32 nvram_cmd)
2515 {
2516         int i;
2517
2518         tw32(NVRAM_CMD, nvram_cmd);
2519         for (i = 0; i < NVRAM_CMD_TIMEOUT; i++) {
2520                 udelay(10);
2521                 if (tr32(NVRAM_CMD) & NVRAM_CMD_DONE) {
2522                         udelay(10);
2523                         break;
2524                 }
2525         }
2526
2527         if (i == NVRAM_CMD_TIMEOUT)
2528                 return -EBUSY;
2529
2530         return 0;
2531 }
2532
2533 static u32 tg3_nvram_phys_addr(struct tg3 *tp, u32 addr)
2534 {
2535         if (tg3_flag(tp, NVRAM) &&
2536             tg3_flag(tp, NVRAM_BUFFERED) &&
2537             tg3_flag(tp, FLASH) &&
2538             !tg3_flag(tp, NO_NVRAM_ADDR_TRANS) &&
2539             (tp->nvram_jedecnum == JEDEC_ATMEL))
2540
2541                 addr = ((addr / tp->nvram_pagesize) <<
2542                         ATMEL_AT45DB0X1B_PAGE_POS) +
2543                        (addr % tp->nvram_pagesize);
2544
2545         return addr;
2546 }
2547
2548 static u32 tg3_nvram_logical_addr(struct tg3 *tp, u32 addr)
2549 {
2550         if (tg3_flag(tp, NVRAM) &&
2551             tg3_flag(tp, NVRAM_BUFFERED) &&
2552             tg3_flag(tp, FLASH) &&
2553             !tg3_flag(tp, NO_NVRAM_ADDR_TRANS) &&
2554             (tp->nvram_jedecnum == JEDEC_ATMEL))
2555
2556                 addr = ((addr >> ATMEL_AT45DB0X1B_PAGE_POS) *
2557                         tp->nvram_pagesize) +
2558                        (addr & ((1 << ATMEL_AT45DB0X1B_PAGE_POS) - 1));
2559
2560         return addr;
2561 }
2562
2563 /* NOTE: Data read in from NVRAM is byteswapped according to
2564  * the byteswapping settings for all other register accesses.
2565  * tg3 devices are BE devices, so on a BE machine, the data
2566  * returned will be exactly as it is seen in NVRAM.  On a LE
2567  * machine, the 32-bit value will be byteswapped.
2568  */
2569 static int tg3_nvram_read(struct tg3 *tp, u32 offset, u32 *val)
2570 {
2571         int ret;
2572
2573         if (!tg3_flag(tp, NVRAM))
2574                 return tg3_nvram_read_using_eeprom(tp, offset, val);
2575
2576         offset = tg3_nvram_phys_addr(tp, offset);
2577
2578         if (offset > NVRAM_ADDR_MSK)
2579                 return -EINVAL;
2580
2581         ret = tg3_nvram_lock(tp);
2582         if (ret)
2583                 return ret;
2584
2585         tg3_enable_nvram_access(tp);
2586
2587         tw32(NVRAM_ADDR, offset);
2588         ret = tg3_nvram_exec_cmd(tp, NVRAM_CMD_RD | NVRAM_CMD_GO |
2589                 NVRAM_CMD_FIRST | NVRAM_CMD_LAST | NVRAM_CMD_DONE);
2590
2591         if (ret == 0)
2592                 *val = tr32(NVRAM_RDDATA);
2593
2594         tg3_disable_nvram_access(tp);
2595
2596         tg3_nvram_unlock(tp);
2597
2598         return ret;
2599 }
2600
2601 /* Ensures NVRAM data is in bytestream format. */
2602 static int tg3_nvram_read_be32(struct tg3 *tp, u32 offset, __be32 *val)
2603 {
2604         u32 v;
2605         int res = tg3_nvram_read(tp, offset, &v);
2606         if (!res)
2607                 *val = cpu_to_be32(v);
2608         return res;
2609 }
2610
2611 /* tp->lock is held. */
2612 static void __tg3_set_mac_addr(struct tg3 *tp, int skip_mac_1)
2613 {
2614         u32 addr_high, addr_low;
2615         int i;
2616
2617         addr_high = ((tp->dev->dev_addr[0] << 8) |
2618                      tp->dev->dev_addr[1]);
2619         addr_low = ((tp->dev->dev_addr[2] << 24) |
2620                     (tp->dev->dev_addr[3] << 16) |
2621                     (tp->dev->dev_addr[4] <<  8) |
2622                     (tp->dev->dev_addr[5] <<  0));
2623         for (i = 0; i < 4; i++) {
2624                 if (i == 1 && skip_mac_1)
2625                         continue;
2626                 tw32(MAC_ADDR_0_HIGH + (i * 8), addr_high);
2627                 tw32(MAC_ADDR_0_LOW + (i * 8), addr_low);
2628         }
2629
2630         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
2631             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
2632                 for (i = 0; i < 12; i++) {
2633                         tw32(MAC_EXTADDR_0_HIGH + (i * 8), addr_high);
2634                         tw32(MAC_EXTADDR_0_LOW + (i * 8), addr_low);
2635                 }
2636         }
2637
2638         addr_high = (tp->dev->dev_addr[0] +
2639                      tp->dev->dev_addr[1] +
2640                      tp->dev->dev_addr[2] +
2641                      tp->dev->dev_addr[3] +
2642                      tp->dev->dev_addr[4] +
2643                      tp->dev->dev_addr[5]) &
2644                 TX_BACKOFF_SEED_MASK;
2645         tw32(MAC_TX_BACKOFF_SEED, addr_high);
2646 }
2647
2648 static void tg3_enable_register_access(struct tg3 *tp)
2649 {
2650         /*
2651          * Make sure register accesses (indirect or otherwise) will function
2652          * correctly.
2653          */
2654         pci_write_config_dword(tp->pdev,
2655                                TG3PCI_MISC_HOST_CTRL, tp->misc_host_ctrl);
2656 }
2657
2658 static int tg3_power_up(struct tg3 *tp)
2659 {
2660         int err;
2661
2662         tg3_enable_register_access(tp);
2663
2664         err = pci_set_power_state(tp->pdev, PCI_D0);
2665         if (!err) {
2666                 /* Switch out of Vaux if it is a NIC */
2667                 tg3_pwrsrc_switch_to_vmain(tp);
2668         } else {
2669                 netdev_err(tp->dev, "Transition to D0 failed\n");
2670         }
2671
2672         return err;
2673 }
2674
2675 static int tg3_power_down_prepare(struct tg3 *tp)
2676 {
2677         u32 misc_host_ctrl;
2678         bool device_should_wake, do_low_power;
2679
2680         tg3_enable_register_access(tp);
2681
2682         /* Restore the CLKREQ setting. */
2683         if (tg3_flag(tp, CLKREQ_BUG)) {
2684                 u16 lnkctl;
2685
2686                 pci_read_config_word(tp->pdev,
2687                                      pci_pcie_cap(tp->pdev) + PCI_EXP_LNKCTL,
2688                                      &lnkctl);
2689                 lnkctl |= PCI_EXP_LNKCTL_CLKREQ_EN;
2690                 pci_write_config_word(tp->pdev,
2691                                       pci_pcie_cap(tp->pdev) + PCI_EXP_LNKCTL,
2692                                       lnkctl);
2693         }
2694
2695         misc_host_ctrl = tr32(TG3PCI_MISC_HOST_CTRL);
2696         tw32(TG3PCI_MISC_HOST_CTRL,
2697              misc_host_ctrl | MISC_HOST_CTRL_MASK_PCI_INT);
2698
2699         device_should_wake = device_may_wakeup(&tp->pdev->dev) &&
2700                              tg3_flag(tp, WOL_ENABLE);
2701
2702         if (tg3_flag(tp, USE_PHYLIB)) {
2703                 do_low_power = false;
2704                 if ((tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) &&
2705                     !(tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)) {
2706                         struct phy_device *phydev;
2707                         u32 phyid, advertising;
2708
2709                         phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
2710
2711                         tp->phy_flags |= TG3_PHYFLG_IS_LOW_POWER;
2712
2713                         tp->link_config.orig_speed = phydev->speed;
2714                         tp->link_config.orig_duplex = phydev->duplex;
2715                         tp->link_config.orig_autoneg = phydev->autoneg;
2716                         tp->link_config.orig_advertising = phydev->advertising;
2717
2718                         advertising = ADVERTISED_TP |
2719                                       ADVERTISED_Pause |
2720                                       ADVERTISED_Autoneg |
2721                                       ADVERTISED_10baseT_Half;
2722
2723                         if (tg3_flag(tp, ENABLE_ASF) || device_should_wake) {
2724                                 if (tg3_flag(tp, WOL_SPEED_100MB))
2725                                         advertising |=
2726                                                 ADVERTISED_100baseT_Half |
2727                                                 ADVERTISED_100baseT_Full |
2728                                                 ADVERTISED_10baseT_Full;
2729                                 else
2730                                         advertising |= ADVERTISED_10baseT_Full;
2731                         }
2732
2733                         phydev->advertising = advertising;
2734
2735                         phy_start_aneg(phydev);
2736
2737                         phyid = phydev->drv->phy_id & phydev->drv->phy_id_mask;
2738                         if (phyid != PHY_ID_BCMAC131) {
2739                                 phyid &= PHY_BCM_OUI_MASK;
2740                                 if (phyid == PHY_BCM_OUI_1 ||
2741                                     phyid == PHY_BCM_OUI_2 ||
2742                                     phyid == PHY_BCM_OUI_3)
2743                                         do_low_power = true;
2744                         }
2745                 }
2746         } else {
2747                 do_low_power = true;
2748
2749                 if (!(tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)) {
2750                         tp->phy_flags |= TG3_PHYFLG_IS_LOW_POWER;
2751                         tp->link_config.orig_speed = tp->link_config.speed;
2752                         tp->link_config.orig_duplex = tp->link_config.duplex;
2753                         tp->link_config.orig_autoneg = tp->link_config.autoneg;
2754                 }
2755
2756                 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES)) {
2757                         tp->link_config.speed = SPEED_10;
2758                         tp->link_config.duplex = DUPLEX_HALF;
2759                         tp->link_config.autoneg = AUTONEG_ENABLE;
2760                         tg3_setup_phy(tp, 0);
2761                 }
2762         }
2763
2764         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
2765                 u32 val;
2766
2767                 val = tr32(GRC_VCPU_EXT_CTRL);
2768                 tw32(GRC_VCPU_EXT_CTRL, val | GRC_VCPU_EXT_CTRL_DISABLE_WOL);
2769         } else if (!tg3_flag(tp, ENABLE_ASF)) {
2770                 int i;
2771                 u32 val;
2772
2773                 for (i = 0; i < 200; i++) {
2774                         tg3_read_mem(tp, NIC_SRAM_FW_ASF_STATUS_MBOX, &val);
2775                         if (val == ~NIC_SRAM_FIRMWARE_MBOX_MAGIC1)
2776                                 break;
2777                         msleep(1);
2778                 }
2779         }
2780         if (tg3_flag(tp, WOL_CAP))
2781                 tg3_write_mem(tp, NIC_SRAM_WOL_MBOX, WOL_SIGNATURE |
2782                                                      WOL_DRV_STATE_SHUTDOWN |
2783                                                      WOL_DRV_WOL |
2784                                                      WOL_SET_MAGIC_PKT);
2785
2786         if (device_should_wake) {
2787                 u32 mac_mode;
2788
2789                 if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES)) {
2790                         if (do_low_power &&
2791                             !(tp->phy_flags & TG3_PHYFLG_IS_FET)) {
2792                                 tg3_phy_auxctl_write(tp,
2793                                                MII_TG3_AUXCTL_SHDWSEL_PWRCTL,
2794                                                MII_TG3_AUXCTL_PCTL_WOL_EN |
2795                                                MII_TG3_AUXCTL_PCTL_100TX_LPWR |
2796                                                MII_TG3_AUXCTL_PCTL_CL_AB_TXDAC);
2797                                 udelay(40);
2798                         }
2799
2800                         if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
2801                                 mac_mode = MAC_MODE_PORT_MODE_GMII;
2802                         else
2803                                 mac_mode = MAC_MODE_PORT_MODE_MII;
2804
2805                         mac_mode |= tp->mac_mode & MAC_MODE_LINK_POLARITY;
2806                         if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
2807                             ASIC_REV_5700) {
2808                                 u32 speed = tg3_flag(tp, WOL_SPEED_100MB) ?
2809                                              SPEED_100 : SPEED_10;
2810                                 if (tg3_5700_link_polarity(tp, speed))
2811                                         mac_mode |= MAC_MODE_LINK_POLARITY;
2812                                 else
2813                                         mac_mode &= ~MAC_MODE_LINK_POLARITY;
2814                         }
2815                 } else {
2816                         mac_mode = MAC_MODE_PORT_MODE_TBI;
2817                 }
2818
2819                 if (!tg3_flag(tp, 5750_PLUS))
2820                         tw32(MAC_LED_CTRL, tp->led_ctrl);
2821
2822                 mac_mode |= MAC_MODE_MAGIC_PKT_ENABLE;
2823                 if ((tg3_flag(tp, 5705_PLUS) && !tg3_flag(tp, 5780_CLASS)) &&
2824                     (tg3_flag(tp, ENABLE_ASF) || tg3_flag(tp, ENABLE_APE)))
2825                         mac_mode |= MAC_MODE_KEEP_FRAME_IN_WOL;
2826
2827                 if (tg3_flag(tp, ENABLE_APE))
2828                         mac_mode |= MAC_MODE_APE_TX_EN |
2829                                     MAC_MODE_APE_RX_EN |
2830                                     MAC_MODE_TDE_ENABLE;
2831
2832                 tw32_f(MAC_MODE, mac_mode);
2833                 udelay(100);
2834
2835                 tw32_f(MAC_RX_MODE, RX_MODE_ENABLE);
2836                 udelay(10);
2837         }
2838
2839         if (!tg3_flag(tp, WOL_SPEED_100MB) &&
2840             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
2841              GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)) {
2842                 u32 base_val;
2843
2844                 base_val = tp->pci_clock_ctrl;
2845                 base_val |= (CLOCK_CTRL_RXCLK_DISABLE |
2846                              CLOCK_CTRL_TXCLK_DISABLE);
2847
2848                 tw32_wait_f(TG3PCI_CLOCK_CTRL, base_val | CLOCK_CTRL_ALTCLK |
2849                             CLOCK_CTRL_PWRDOWN_PLL133, 40);
2850         } else if (tg3_flag(tp, 5780_CLASS) ||
2851                    tg3_flag(tp, CPMU_PRESENT) ||
2852                    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
2853                 /* do nothing */
2854         } else if (!(tg3_flag(tp, 5750_PLUS) && tg3_flag(tp, ENABLE_ASF))) {
2855                 u32 newbits1, newbits2;
2856
2857                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
2858                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
2859                         newbits1 = (CLOCK_CTRL_RXCLK_DISABLE |
2860                                     CLOCK_CTRL_TXCLK_DISABLE |
2861                                     CLOCK_CTRL_ALTCLK);
2862                         newbits2 = newbits1 | CLOCK_CTRL_44MHZ_CORE;
2863                 } else if (tg3_flag(tp, 5705_PLUS)) {
2864                         newbits1 = CLOCK_CTRL_625_CORE;
2865                         newbits2 = newbits1 | CLOCK_CTRL_ALTCLK;
2866                 } else {
2867                         newbits1 = CLOCK_CTRL_ALTCLK;
2868                         newbits2 = newbits1 | CLOCK_CTRL_44MHZ_CORE;
2869                 }
2870
2871                 tw32_wait_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl | newbits1,
2872                             40);
2873
2874                 tw32_wait_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl | newbits2,
2875                             40);
2876
2877                 if (!tg3_flag(tp, 5705_PLUS)) {
2878                         u32 newbits3;
2879
2880                         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
2881                             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
2882                                 newbits3 = (CLOCK_CTRL_RXCLK_DISABLE |
2883                                             CLOCK_CTRL_TXCLK_DISABLE |
2884                                             CLOCK_CTRL_44MHZ_CORE);
2885                         } else {
2886                                 newbits3 = CLOCK_CTRL_44MHZ_CORE;
2887                         }
2888
2889                         tw32_wait_f(TG3PCI_CLOCK_CTRL,
2890                                     tp->pci_clock_ctrl | newbits3, 40);
2891                 }
2892         }
2893
2894         if (!(device_should_wake) && !tg3_flag(tp, ENABLE_ASF))
2895                 tg3_power_down_phy(tp, do_low_power);
2896
2897         tg3_frob_aux_power(tp);
2898
2899         /* Workaround for unstable PLL clock */
2900         if ((GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_AX) ||
2901             (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_BX)) {
2902                 u32 val = tr32(0x7d00);
2903
2904                 val &= ~((1 << 16) | (1 << 4) | (1 << 2) | (1 << 1) | 1);
2905                 tw32(0x7d00, val);
2906                 if (!tg3_flag(tp, ENABLE_ASF)) {
2907                         int err;
2908
2909                         err = tg3_nvram_lock(tp);
2910                         tg3_halt_cpu(tp, RX_CPU_BASE);
2911                         if (!err)
2912                                 tg3_nvram_unlock(tp);
2913                 }
2914         }
2915
2916         tg3_write_sig_post_reset(tp, RESET_KIND_SHUTDOWN);
2917
2918         return 0;
2919 }
2920
2921 static void tg3_power_down(struct tg3 *tp)
2922 {
2923         tg3_power_down_prepare(tp);
2924
2925         pci_wake_from_d3(tp->pdev, tg3_flag(tp, WOL_ENABLE));
2926         pci_set_power_state(tp->pdev, PCI_D3hot);
2927 }
2928
2929 static void tg3_aux_stat_to_speed_duplex(struct tg3 *tp, u32 val, u16 *speed, u8 *duplex)
2930 {
2931         switch (val & MII_TG3_AUX_STAT_SPDMASK) {
2932         case MII_TG3_AUX_STAT_10HALF:
2933                 *speed = SPEED_10;
2934                 *duplex = DUPLEX_HALF;
2935                 break;
2936
2937         case MII_TG3_AUX_STAT_10FULL:
2938                 *speed = SPEED_10;
2939                 *duplex = DUPLEX_FULL;
2940                 break;
2941
2942         case MII_TG3_AUX_STAT_100HALF:
2943                 *speed = SPEED_100;
2944                 *duplex = DUPLEX_HALF;
2945                 break;
2946
2947         case MII_TG3_AUX_STAT_100FULL:
2948                 *speed = SPEED_100;
2949                 *duplex = DUPLEX_FULL;
2950                 break;
2951
2952         case MII_TG3_AUX_STAT_1000HALF:
2953                 *speed = SPEED_1000;
2954                 *duplex = DUPLEX_HALF;
2955                 break;
2956
2957         case MII_TG3_AUX_STAT_1000FULL:
2958                 *speed = SPEED_1000;
2959                 *duplex = DUPLEX_FULL;
2960                 break;
2961
2962         default:
2963                 if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
2964                         *speed = (val & MII_TG3_AUX_STAT_100) ? SPEED_100 :
2965                                  SPEED_10;
2966                         *duplex = (val & MII_TG3_AUX_STAT_FULL) ? DUPLEX_FULL :
2967                                   DUPLEX_HALF;
2968                         break;
2969                 }
2970                 *speed = SPEED_INVALID;
2971                 *duplex = DUPLEX_INVALID;
2972                 break;
2973         }
2974 }
2975
2976 static int tg3_phy_autoneg_cfg(struct tg3 *tp, u32 advertise, u32 flowctrl)
2977 {
2978         int err = 0;
2979         u32 val, new_adv;
2980
2981         new_adv = ADVERTISE_CSMA;
2982         if (advertise & ADVERTISED_10baseT_Half)
2983                 new_adv |= ADVERTISE_10HALF;
2984         if (advertise & ADVERTISED_10baseT_Full)
2985                 new_adv |= ADVERTISE_10FULL;
2986         if (advertise & ADVERTISED_100baseT_Half)
2987                 new_adv |= ADVERTISE_100HALF;
2988         if (advertise & ADVERTISED_100baseT_Full)
2989                 new_adv |= ADVERTISE_100FULL;
2990
2991         new_adv |= tg3_advert_flowctrl_1000T(flowctrl);
2992
2993         err = tg3_writephy(tp, MII_ADVERTISE, new_adv);
2994         if (err)
2995                 goto done;
2996
2997         if (tp->phy_flags & TG3_PHYFLG_10_100_ONLY)
2998                 goto done;
2999
3000         new_adv = 0;
3001         if (advertise & ADVERTISED_1000baseT_Half)
3002                 new_adv |= ADVERTISE_1000HALF;
3003         if (advertise & ADVERTISED_1000baseT_Full)
3004                 new_adv |= ADVERTISE_1000FULL;
3005
3006         if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
3007             tp->pci_chip_rev_id == CHIPREV_ID_5701_B0)
3008                 new_adv |= CTL1000_AS_MASTER | CTL1000_ENABLE_MASTER;
3009
3010         err = tg3_writephy(tp, MII_CTRL1000, new_adv);
3011         if (err)
3012                 goto done;
3013
3014         if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP))
3015                 goto done;
3016
3017         tw32(TG3_CPMU_EEE_MODE,
3018              tr32(TG3_CPMU_EEE_MODE) & ~TG3_CPMU_EEEMD_LPI_ENABLE);
3019
3020         err = TG3_PHY_AUXCTL_SMDSP_ENABLE(tp);
3021         if (!err) {
3022                 u32 err2;
3023
3024                 switch (GET_ASIC_REV(tp->pci_chip_rev_id)) {
3025                 case ASIC_REV_5717:
3026                 case ASIC_REV_57765:
3027                         if (!tg3_phydsp_read(tp, MII_TG3_DSP_CH34TP2, &val))
3028                                 tg3_phydsp_write(tp, MII_TG3_DSP_CH34TP2, val |
3029                                                  MII_TG3_DSP_CH34TP2_HIBW01);
3030                         /* Fall through */
3031                 case ASIC_REV_5719:
3032                         val = MII_TG3_DSP_TAP26_ALNOKO |
3033                               MII_TG3_DSP_TAP26_RMRXSTO |
3034                               MII_TG3_DSP_TAP26_OPCSINPT;
3035                         tg3_phydsp_write(tp, MII_TG3_DSP_TAP26, val);
3036                 }
3037
3038                 val = 0;
3039                 /* Advertise 100-BaseTX EEE ability */
3040                 if (advertise & ADVERTISED_100baseT_Full)
3041                         val |= MDIO_AN_EEE_ADV_100TX;
3042                 /* Advertise 1000-BaseT EEE ability */
3043                 if (advertise & ADVERTISED_1000baseT_Full)
3044                         val |= MDIO_AN_EEE_ADV_1000T;
3045                 err = tg3_phy_cl45_write(tp, MDIO_MMD_AN, MDIO_AN_EEE_ADV, val);
3046
3047                 err2 = TG3_PHY_AUXCTL_SMDSP_DISABLE(tp);
3048                 if (!err)
3049                         err = err2;
3050         }
3051
3052 done:
3053         return err;
3054 }
3055
3056 static void tg3_phy_copper_begin(struct tg3 *tp)
3057 {
3058         u32 new_adv;
3059         int i;
3060
3061         if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) {
3062                 new_adv = ADVERTISED_10baseT_Half |
3063                           ADVERTISED_10baseT_Full;
3064                 if (tg3_flag(tp, WOL_SPEED_100MB))
3065                         new_adv |= ADVERTISED_100baseT_Half |
3066                                    ADVERTISED_100baseT_Full;
3067
3068                 tg3_phy_autoneg_cfg(tp, new_adv,
3069                                     FLOW_CTRL_TX | FLOW_CTRL_RX);
3070         } else if (tp->link_config.speed == SPEED_INVALID) {
3071                 if (tp->phy_flags & TG3_PHYFLG_10_100_ONLY)
3072                         tp->link_config.advertising &=
3073                                 ~(ADVERTISED_1000baseT_Half |
3074                                   ADVERTISED_1000baseT_Full);
3075
3076                 tg3_phy_autoneg_cfg(tp, tp->link_config.advertising,
3077                                     tp->link_config.flowctrl);
3078         } else {
3079                 /* Asking for a specific link mode. */
3080                 if (tp->link_config.speed == SPEED_1000) {
3081                         if (tp->link_config.duplex == DUPLEX_FULL)
3082                                 new_adv = ADVERTISED_1000baseT_Full;
3083                         else
3084                                 new_adv = ADVERTISED_1000baseT_Half;
3085                 } else if (tp->link_config.speed == SPEED_100) {
3086                         if (tp->link_config.duplex == DUPLEX_FULL)
3087                                 new_adv = ADVERTISED_100baseT_Full;
3088                         else
3089                                 new_adv = ADVERTISED_100baseT_Half;
3090                 } else {
3091                         if (tp->link_config.duplex == DUPLEX_FULL)
3092                                 new_adv = ADVERTISED_10baseT_Full;
3093                         else
3094                                 new_adv = ADVERTISED_10baseT_Half;
3095                 }
3096
3097                 tg3_phy_autoneg_cfg(tp, new_adv,
3098                                     tp->link_config.flowctrl);
3099         }
3100
3101         if (tp->link_config.autoneg == AUTONEG_DISABLE &&
3102             tp->link_config.speed != SPEED_INVALID) {
3103                 u32 bmcr, orig_bmcr;
3104
3105                 tp->link_config.active_speed = tp->link_config.speed;
3106                 tp->link_config.active_duplex = tp->link_config.duplex;
3107
3108                 bmcr = 0;
3109                 switch (tp->link_config.speed) {
3110                 default:
3111                 case SPEED_10:
3112                         break;
3113
3114                 case SPEED_100:
3115                         bmcr |= BMCR_SPEED100;
3116                         break;
3117
3118                 case SPEED_1000:
3119                         bmcr |= BMCR_SPEED1000;
3120                         break;
3121                 }
3122
3123                 if (tp->link_config.duplex == DUPLEX_FULL)
3124                         bmcr |= BMCR_FULLDPLX;
3125
3126                 if (!tg3_readphy(tp, MII_BMCR, &orig_bmcr) &&
3127                     (bmcr != orig_bmcr)) {
3128                         tg3_writephy(tp, MII_BMCR, BMCR_LOOPBACK);
3129                         for (i = 0; i < 1500; i++) {
3130                                 u32 tmp;
3131
3132                                 udelay(10);
3133                                 if (tg3_readphy(tp, MII_BMSR, &tmp) ||
3134                                     tg3_readphy(tp, MII_BMSR, &tmp))
3135                                         continue;
3136                                 if (!(tmp & BMSR_LSTATUS)) {
3137                                         udelay(40);
3138                                         break;
3139                                 }
3140                         }
3141                         tg3_writephy(tp, MII_BMCR, bmcr);
3142                         udelay(40);
3143                 }
3144         } else {
3145                 tg3_writephy(tp, MII_BMCR,
3146                              BMCR_ANENABLE | BMCR_ANRESTART);
3147         }
3148 }
3149
3150 static int tg3_init_5401phy_dsp(struct tg3 *tp)
3151 {
3152         int err;
3153
3154         /* Turn off tap power management. */
3155         /* Set Extended packet length bit */
3156         err = tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_AUXCTL, 0x4c20);
3157
3158         err |= tg3_phydsp_write(tp, 0x0012, 0x1804);
3159         err |= tg3_phydsp_write(tp, 0x0013, 0x1204);
3160         err |= tg3_phydsp_write(tp, 0x8006, 0x0132);
3161         err |= tg3_phydsp_write(tp, 0x8006, 0x0232);
3162         err |= tg3_phydsp_write(tp, 0x201f, 0x0a20);
3163
3164         udelay(40);
3165
3166         return err;
3167 }
3168
3169 static int tg3_copper_is_advertising_all(struct tg3 *tp, u32 mask)
3170 {
3171         u32 adv_reg, all_mask = 0;
3172
3173         if (mask & ADVERTISED_10baseT_Half)
3174                 all_mask |= ADVERTISE_10HALF;
3175         if (mask & ADVERTISED_10baseT_Full)
3176                 all_mask |= ADVERTISE_10FULL;
3177         if (mask & ADVERTISED_100baseT_Half)
3178                 all_mask |= ADVERTISE_100HALF;
3179         if (mask & ADVERTISED_100baseT_Full)
3180                 all_mask |= ADVERTISE_100FULL;
3181
3182         if (tg3_readphy(tp, MII_ADVERTISE, &adv_reg))
3183                 return 0;
3184
3185         if ((adv_reg & all_mask) != all_mask)
3186                 return 0;
3187         if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
3188                 u32 tg3_ctrl;
3189
3190                 all_mask = 0;
3191                 if (mask & ADVERTISED_1000baseT_Half)
3192                         all_mask |= ADVERTISE_1000HALF;
3193                 if (mask & ADVERTISED_1000baseT_Full)
3194                         all_mask |= ADVERTISE_1000FULL;
3195
3196                 if (tg3_readphy(tp, MII_CTRL1000, &tg3_ctrl))
3197                         return 0;
3198
3199                 if ((tg3_ctrl & all_mask) != all_mask)
3200                         return 0;
3201         }
3202         return 1;
3203 }
3204
3205 static int tg3_adv_1000T_flowctrl_ok(struct tg3 *tp, u32 *lcladv, u32 *rmtadv)
3206 {
3207         u32 curadv, reqadv;
3208
3209         if (tg3_readphy(tp, MII_ADVERTISE, lcladv))
3210                 return 1;
3211
3212         curadv = *lcladv & (ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM);
3213         reqadv = tg3_advert_flowctrl_1000T(tp->link_config.flowctrl);
3214
3215         if (tp->link_config.active_duplex == DUPLEX_FULL) {
3216                 if (curadv != reqadv)
3217                         return 0;
3218
3219                 if (tg3_flag(tp, PAUSE_AUTONEG))
3220                         tg3_readphy(tp, MII_LPA, rmtadv);
3221         } else {
3222                 /* Reprogram the advertisement register, even if it
3223                  * does not affect the current link.  If the link
3224                  * gets renegotiated in the future, we can save an
3225                  * additional renegotiation cycle by advertising
3226                  * it correctly in the first place.
3227                  */
3228                 if (curadv != reqadv) {
3229                         *lcladv &= ~(ADVERTISE_PAUSE_CAP |
3230                                      ADVERTISE_PAUSE_ASYM);
3231                         tg3_writephy(tp, MII_ADVERTISE, *lcladv | reqadv);
3232                 }
3233         }
3234
3235         return 1;
3236 }
3237
3238 static int tg3_setup_copper_phy(struct tg3 *tp, int force_reset)
3239 {
3240         int current_link_up;
3241         u32 bmsr, val;
3242         u32 lcl_adv, rmt_adv;
3243         u16 current_speed;
3244         u8 current_duplex;
3245         int i, err;
3246
3247         tw32(MAC_EVENT, 0);
3248
3249         tw32_f(MAC_STATUS,
3250              (MAC_STATUS_SYNC_CHANGED |
3251               MAC_STATUS_CFG_CHANGED |
3252               MAC_STATUS_MI_COMPLETION |
3253               MAC_STATUS_LNKSTATE_CHANGED));
3254         udelay(40);
3255
3256         if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
3257                 tw32_f(MAC_MI_MODE,
3258                      (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
3259                 udelay(80);
3260         }
3261
3262         tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_PWRCTL, 0);
3263
3264         /* Some third-party PHYs need to be reset on link going
3265          * down.
3266          */
3267         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
3268              GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
3269              GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) &&
3270             netif_carrier_ok(tp->dev)) {
3271                 tg3_readphy(tp, MII_BMSR, &bmsr);
3272                 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
3273                     !(bmsr & BMSR_LSTATUS))
3274                         force_reset = 1;
3275         }
3276         if (force_reset)
3277                 tg3_phy_reset(tp);
3278
3279         if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) {
3280                 tg3_readphy(tp, MII_BMSR, &bmsr);
3281                 if (tg3_readphy(tp, MII_BMSR, &bmsr) ||
3282                     !tg3_flag(tp, INIT_COMPLETE))
3283                         bmsr = 0;
3284
3285                 if (!(bmsr & BMSR_LSTATUS)) {
3286                         err = tg3_init_5401phy_dsp(tp);
3287                         if (err)
3288                                 return err;
3289
3290                         tg3_readphy(tp, MII_BMSR, &bmsr);
3291                         for (i = 0; i < 1000; i++) {
3292                                 udelay(10);
3293                                 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
3294                                     (bmsr & BMSR_LSTATUS)) {
3295                                         udelay(40);
3296                                         break;
3297                                 }
3298                         }
3299
3300                         if ((tp->phy_id & TG3_PHY_ID_REV_MASK) ==
3301                             TG3_PHY_REV_BCM5401_B0 &&
3302                             !(bmsr & BMSR_LSTATUS) &&
3303                             tp->link_config.active_speed == SPEED_1000) {
3304                                 err = tg3_phy_reset(tp);
3305                                 if (!err)
3306                                         err = tg3_init_5401phy_dsp(tp);
3307                                 if (err)
3308                                         return err;
3309                         }
3310                 }
3311         } else if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
3312                    tp->pci_chip_rev_id == CHIPREV_ID_5701_B0) {
3313                 /* 5701 {A0,B0} CRC bug workaround */
3314                 tg3_writephy(tp, 0x15, 0x0a75);
3315                 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8c68);
3316                 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8d68);
3317                 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8c68);
3318         }
3319
3320         /* Clear pending interrupts... */
3321         tg3_readphy(tp, MII_TG3_ISTAT, &val);
3322         tg3_readphy(tp, MII_TG3_ISTAT, &val);
3323
3324         if (tp->phy_flags & TG3_PHYFLG_USE_MI_INTERRUPT)
3325                 tg3_writephy(tp, MII_TG3_IMASK, ~MII_TG3_INT_LINKCHG);
3326         else if (!(tp->phy_flags & TG3_PHYFLG_IS_FET))
3327                 tg3_writephy(tp, MII_TG3_IMASK, ~0);
3328
3329         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
3330             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
3331                 if (tp->led_ctrl == LED_CTRL_MODE_PHY_1)
3332                         tg3_writephy(tp, MII_TG3_EXT_CTRL,
3333                                      MII_TG3_EXT_CTRL_LNK3_LED_MODE);
3334                 else
3335                         tg3_writephy(tp, MII_TG3_EXT_CTRL, 0);
3336         }
3337
3338         current_link_up = 0;
3339         current_speed = SPEED_INVALID;
3340         current_duplex = DUPLEX_INVALID;
3341
3342         if (tp->phy_flags & TG3_PHYFLG_CAPACITIVE_COUPLING) {
3343                 err = tg3_phy_auxctl_read(tp,
3344                                           MII_TG3_AUXCTL_SHDWSEL_MISCTEST,
3345                                           &val);
3346                 if (!err && !(val & (1 << 10))) {
3347                         tg3_phy_auxctl_write(tp,
3348                                              MII_TG3_AUXCTL_SHDWSEL_MISCTEST,
3349                                              val | (1 << 10));
3350                         goto relink;
3351                 }
3352         }
3353
3354         bmsr = 0;
3355         for (i = 0; i < 100; i++) {
3356                 tg3_readphy(tp, MII_BMSR, &bmsr);
3357                 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
3358                     (bmsr & BMSR_LSTATUS))
3359                         break;
3360                 udelay(40);
3361         }
3362
3363         if (bmsr & BMSR_LSTATUS) {
3364                 u32 aux_stat, bmcr;
3365
3366                 tg3_readphy(tp, MII_TG3_AUX_STAT, &aux_stat);
3367                 for (i = 0; i < 2000; i++) {
3368                         udelay(10);
3369                         if (!tg3_readphy(tp, MII_TG3_AUX_STAT, &aux_stat) &&
3370                             aux_stat)
3371                                 break;
3372                 }
3373
3374                 tg3_aux_stat_to_speed_duplex(tp, aux_stat,
3375                                              &current_speed,
3376                                              &current_duplex);
3377
3378                 bmcr = 0;
3379                 for (i = 0; i < 200; i++) {
3380                         tg3_readphy(tp, MII_BMCR, &bmcr);
3381                         if (tg3_readphy(tp, MII_BMCR, &bmcr))
3382                                 continue;
3383                         if (bmcr && bmcr != 0x7fff)
3384                                 break;
3385                         udelay(10);
3386                 }
3387
3388                 lcl_adv = 0;
3389                 rmt_adv = 0;
3390
3391                 tp->link_config.active_speed = current_speed;
3392                 tp->link_config.active_duplex = current_duplex;
3393
3394                 if (tp->link_config.autoneg == AUTONEG_ENABLE) {
3395                         if ((bmcr & BMCR_ANENABLE) &&
3396                             tg3_copper_is_advertising_all(tp,
3397                                                 tp->link_config.advertising)) {
3398                                 if (tg3_adv_1000T_flowctrl_ok(tp, &lcl_adv,
3399                                                                   &rmt_adv))
3400                                         current_link_up = 1;
3401                         }
3402                 } else {
3403                         if (!(bmcr & BMCR_ANENABLE) &&
3404                             tp->link_config.speed == current_speed &&
3405                             tp->link_config.duplex == current_duplex &&
3406                             tp->link_config.flowctrl ==
3407                             tp->link_config.active_flowctrl) {
3408                                 current_link_up = 1;
3409                         }
3410                 }
3411
3412                 if (current_link_up == 1 &&
3413                     tp->link_config.active_duplex == DUPLEX_FULL)
3414                         tg3_setup_flow_control(tp, lcl_adv, rmt_adv);
3415         }
3416
3417 relink:
3418         if (current_link_up == 0 || (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)) {
3419                 tg3_phy_copper_begin(tp);
3420
3421                 tg3_readphy(tp, MII_BMSR, &bmsr);
3422                 if ((!tg3_readphy(tp, MII_BMSR, &bmsr) && (bmsr & BMSR_LSTATUS)) ||
3423                     (tp->mac_mode & MAC_MODE_PORT_INT_LPBACK))
3424                         current_link_up = 1;
3425         }
3426
3427         tp->mac_mode &= ~MAC_MODE_PORT_MODE_MASK;
3428         if (current_link_up == 1) {
3429                 if (tp->link_config.active_speed == SPEED_100 ||
3430                     tp->link_config.active_speed == SPEED_10)
3431                         tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
3432                 else
3433                         tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
3434         } else if (tp->phy_flags & TG3_PHYFLG_IS_FET)
3435                 tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
3436         else
3437                 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
3438
3439         tp->mac_mode &= ~MAC_MODE_HALF_DUPLEX;
3440         if (tp->link_config.active_duplex == DUPLEX_HALF)
3441                 tp->mac_mode |= MAC_MODE_HALF_DUPLEX;
3442
3443         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700) {
3444                 if (current_link_up == 1 &&
3445                     tg3_5700_link_polarity(tp, tp->link_config.active_speed))
3446                         tp->mac_mode |= MAC_MODE_LINK_POLARITY;
3447                 else
3448                         tp->mac_mode &= ~MAC_MODE_LINK_POLARITY;
3449         }
3450
3451         /* ??? Without this setting Netgear GA302T PHY does not
3452          * ??? send/receive packets...
3453          */
3454         if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5411 &&
3455             tp->pci_chip_rev_id == CHIPREV_ID_5700_ALTIMA) {
3456                 tp->mi_mode |= MAC_MI_MODE_AUTO_POLL;
3457                 tw32_f(MAC_MI_MODE, tp->mi_mode);
3458                 udelay(80);
3459         }
3460
3461         tw32_f(MAC_MODE, tp->mac_mode);
3462         udelay(40);
3463
3464         tg3_phy_eee_adjust(tp, current_link_up);
3465
3466         if (tg3_flag(tp, USE_LINKCHG_REG)) {
3467                 /* Polled via timer. */
3468                 tw32_f(MAC_EVENT, 0);
3469         } else {
3470                 tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
3471         }
3472         udelay(40);
3473
3474         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 &&
3475             current_link_up == 1 &&
3476             tp->link_config.active_speed == SPEED_1000 &&
3477             (tg3_flag(tp, PCIX_MODE) || tg3_flag(tp, PCI_HIGH_SPEED))) {
3478                 udelay(120);
3479                 tw32_f(MAC_STATUS,
3480                      (MAC_STATUS_SYNC_CHANGED |
3481                       MAC_STATUS_CFG_CHANGED));
3482                 udelay(40);
3483                 tg3_write_mem(tp,
3484                               NIC_SRAM_FIRMWARE_MBOX,
3485                               NIC_SRAM_FIRMWARE_MBOX_MAGIC2);
3486         }
3487
3488         /* Prevent send BD corruption. */
3489         if (tg3_flag(tp, CLKREQ_BUG)) {
3490                 u16 oldlnkctl, newlnkctl;
3491
3492                 pci_read_config_word(tp->pdev,
3493                                      pci_pcie_cap(tp->pdev) + PCI_EXP_LNKCTL,
3494                                      &oldlnkctl);
3495                 if (tp->link_config.active_speed == SPEED_100 ||
3496                     tp->link_config.active_speed == SPEED_10)
3497                         newlnkctl = oldlnkctl & ~PCI_EXP_LNKCTL_CLKREQ_EN;
3498                 else
3499                         newlnkctl = oldlnkctl | PCI_EXP_LNKCTL_CLKREQ_EN;
3500                 if (newlnkctl != oldlnkctl)
3501                         pci_write_config_word(tp->pdev,
3502                                               pci_pcie_cap(tp->pdev) + PCI_EXP_LNKCTL,
3503                                               newlnkctl);
3504         }
3505
3506         if (current_link_up != netif_carrier_ok(tp->dev)) {
3507                 if (current_link_up)
3508                         netif_carrier_on(tp->dev);
3509                 else
3510                         netif_carrier_off(tp->dev);
3511                 tg3_link_report(tp);
3512         }
3513
3514         return 0;
3515 }
3516
3517 struct tg3_fiber_aneginfo {
3518         int state;
3519 #define ANEG_STATE_UNKNOWN              0
3520 #define ANEG_STATE_AN_ENABLE            1
3521 #define ANEG_STATE_RESTART_INIT         2
3522 #define ANEG_STATE_RESTART              3
3523 #define ANEG_STATE_DISABLE_LINK_OK      4
3524 #define ANEG_STATE_ABILITY_DETECT_INIT  5
3525 #define ANEG_STATE_ABILITY_DETECT       6
3526 #define ANEG_STATE_ACK_DETECT_INIT      7
3527 #define ANEG_STATE_ACK_DETECT           8
3528 #define ANEG_STATE_COMPLETE_ACK_INIT    9
3529 #define ANEG_STATE_COMPLETE_ACK         10
3530 #define ANEG_STATE_IDLE_DETECT_INIT     11
3531 #define ANEG_STATE_IDLE_DETECT          12
3532 #define ANEG_STATE_LINK_OK              13
3533 #define ANEG_STATE_NEXT_PAGE_WAIT_INIT  14
3534 #define ANEG_STATE_NEXT_PAGE_WAIT       15
3535
3536         u32 flags;
3537 #define MR_AN_ENABLE            0x00000001
3538 #define MR_RESTART_AN           0x00000002
3539 #define MR_AN_COMPLETE          0x00000004
3540 #define MR_PAGE_RX              0x00000008
3541 #define MR_NP_LOADED            0x00000010
3542 #define MR_TOGGLE_TX            0x00000020
3543 #define MR_LP_ADV_FULL_DUPLEX   0x00000040
3544 #define MR_LP_ADV_HALF_DUPLEX   0x00000080
3545 #define MR_LP_ADV_SYM_PAUSE     0x00000100
3546 #define MR_LP_ADV_ASYM_PAUSE    0x00000200
3547 #define MR_LP_ADV_REMOTE_FAULT1 0x00000400
3548 #define MR_LP_ADV_REMOTE_FAULT2 0x00000800
3549 #define MR_LP_ADV_NEXT_PAGE     0x00001000
3550 #define MR_TOGGLE_RX            0x00002000
3551 #define MR_NP_RX                0x00004000
3552
3553 #define MR_LINK_OK              0x80000000
3554
3555         unsigned long link_time, cur_time;
3556
3557         u32 ability_match_cfg;
3558         int ability_match_count;
3559
3560         char ability_match, idle_match, ack_match;
3561
3562         u32 txconfig, rxconfig;
3563 #define ANEG_CFG_NP             0x00000080
3564 #define ANEG_CFG_ACK            0x00000040
3565 #define ANEG_CFG_RF2            0x00000020
3566 #define ANEG_CFG_RF1            0x00000010
3567 #define ANEG_CFG_PS2            0x00000001
3568 #define ANEG_CFG_PS1            0x00008000
3569 #define ANEG_CFG_HD             0x00004000
3570 #define ANEG_CFG_FD             0x00002000
3571 #define ANEG_CFG_INVAL          0x00001f06
3572
3573 };
3574 #define ANEG_OK         0
3575 #define ANEG_DONE       1
3576 #define ANEG_TIMER_ENAB 2
3577 #define ANEG_FAILED     -1
3578
3579 #define ANEG_STATE_SETTLE_TIME  10000
3580
3581 static int tg3_fiber_aneg_smachine(struct tg3 *tp,
3582                                    struct tg3_fiber_aneginfo *ap)
3583 {
3584         u16 flowctrl;
3585         unsigned long delta;
3586         u32 rx_cfg_reg;
3587         int ret;
3588
3589         if (ap->state == ANEG_STATE_UNKNOWN) {
3590                 ap->rxconfig = 0;
3591                 ap->link_time = 0;
3592                 ap->cur_time = 0;
3593                 ap->ability_match_cfg = 0;
3594                 ap->ability_match_count = 0;
3595                 ap->ability_match = 0;
3596                 ap->idle_match = 0;
3597                 ap->ack_match = 0;
3598         }
3599         ap->cur_time++;
3600
3601         if (tr32(MAC_STATUS) & MAC_STATUS_RCVD_CFG) {
3602                 rx_cfg_reg = tr32(MAC_RX_AUTO_NEG);
3603
3604                 if (rx_cfg_reg != ap->ability_match_cfg) {
3605                         ap->ability_match_cfg = rx_cfg_reg;
3606                         ap->ability_match = 0;
3607                         ap->ability_match_count = 0;
3608                 } else {
3609                         if (++ap->ability_match_count > 1) {
3610                                 ap->ability_match = 1;
3611                                 ap->ability_match_cfg = rx_cfg_reg;
3612                         }
3613                 }
3614                 if (rx_cfg_reg & ANEG_CFG_ACK)
3615                         ap->ack_match = 1;
3616                 else
3617                         ap->ack_match = 0;
3618
3619                 ap->idle_match = 0;
3620         } else {
3621                 ap->idle_match = 1;
3622                 ap->ability_match_cfg = 0;
3623                 ap->ability_match_count = 0;
3624                 ap->ability_match = 0;
3625                 ap->ack_match = 0;
3626
3627                 rx_cfg_reg = 0;
3628         }
3629
3630         ap->rxconfig = rx_cfg_reg;
3631         ret = ANEG_OK;
3632
3633         switch (ap->state) {
3634         case ANEG_STATE_UNKNOWN:
3635                 if (ap->flags & (MR_AN_ENABLE | MR_RESTART_AN))
3636                         ap->state = ANEG_STATE_AN_ENABLE;
3637
3638                 /* fallthru */
3639         case ANEG_STATE_AN_ENABLE:
3640                 ap->flags &= ~(MR_AN_COMPLETE | MR_PAGE_RX);
3641                 if (ap->flags & MR_AN_ENABLE) {
3642                         ap->link_time = 0;
3643                         ap->cur_time = 0;
3644                         ap->ability_match_cfg = 0;
3645                         ap->ability_match_count = 0;
3646                         ap->ability_match = 0;
3647                         ap->idle_match = 0;
3648                         ap->ack_match = 0;
3649
3650                         ap->state = ANEG_STATE_RESTART_INIT;
3651                 } else {
3652                         ap->state = ANEG_STATE_DISABLE_LINK_OK;
3653                 }
3654                 break;
3655
3656         case ANEG_STATE_RESTART_INIT:
3657                 ap->link_time = ap->cur_time;
3658                 ap->flags &= ~(MR_NP_LOADED);
3659                 ap->txconfig = 0;
3660                 tw32(MAC_TX_AUTO_NEG, 0);
3661                 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
3662                 tw32_f(MAC_MODE, tp->mac_mode);
3663                 udelay(40);
3664
3665                 ret = ANEG_TIMER_ENAB;
3666                 ap->state = ANEG_STATE_RESTART;
3667
3668                 /* fallthru */
3669         case ANEG_STATE_RESTART:
3670                 delta = ap->cur_time - ap->link_time;
3671                 if (delta > ANEG_STATE_SETTLE_TIME)
3672                         ap->state = ANEG_STATE_ABILITY_DETECT_INIT;
3673                 else
3674                         ret = ANEG_TIMER_ENAB;
3675                 break;
3676
3677         case ANEG_STATE_DISABLE_LINK_OK:
3678                 ret = ANEG_DONE;
3679                 break;
3680
3681         case ANEG_STATE_ABILITY_DETECT_INIT:
3682                 ap->flags &= ~(MR_TOGGLE_TX);
3683                 ap->txconfig = ANEG_CFG_FD;
3684                 flowctrl = tg3_advert_flowctrl_1000X(tp->link_config.flowctrl);
3685                 if (flowctrl & ADVERTISE_1000XPAUSE)
3686                         ap->txconfig |= ANEG_CFG_PS1;
3687                 if (flowctrl & ADVERTISE_1000XPSE_ASYM)
3688                         ap->txconfig |= ANEG_CFG_PS2;
3689                 tw32(MAC_TX_AUTO_NEG, ap->txconfig);
3690                 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
3691                 tw32_f(MAC_MODE, tp->mac_mode);
3692                 udelay(40);
3693
3694                 ap->state = ANEG_STATE_ABILITY_DETECT;
3695                 break;
3696
3697         case ANEG_STATE_ABILITY_DETECT:
3698                 if (ap->ability_match != 0 && ap->rxconfig != 0)
3699                         ap->state = ANEG_STATE_ACK_DETECT_INIT;
3700                 break;
3701
3702         case ANEG_STATE_ACK_DETECT_INIT:
3703                 ap->txconfig |= ANEG_CFG_ACK;
3704                 tw32(MAC_TX_AUTO_NEG, ap->txconfig);
3705                 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
3706                 tw32_f(MAC_MODE, tp->mac_mode);
3707                 udelay(40);
3708
3709                 ap->state = ANEG_STATE_ACK_DETECT;
3710
3711                 /* fallthru */
3712         case ANEG_STATE_ACK_DETECT:
3713                 if (ap->ack_match != 0) {
3714                         if ((ap->rxconfig & ~ANEG_CFG_ACK) ==
3715                             (ap->ability_match_cfg & ~ANEG_CFG_ACK)) {
3716                                 ap->state = ANEG_STATE_COMPLETE_ACK_INIT;
3717                         } else {
3718                                 ap->state = ANEG_STATE_AN_ENABLE;
3719                         }
3720                 } else if (ap->ability_match != 0 &&
3721                            ap->rxconfig == 0) {
3722                         ap->state = ANEG_STATE_AN_ENABLE;
3723                 }
3724                 break;
3725
3726         case ANEG_STATE_COMPLETE_ACK_INIT:
3727                 if (ap->rxconfig & ANEG_CFG_INVAL) {
3728                         ret = ANEG_FAILED;
3729                         break;
3730                 }
3731                 ap->flags &= ~(MR_LP_ADV_FULL_DUPLEX |
3732                                MR_LP_ADV_HALF_DUPLEX |
3733                                MR_LP_ADV_SYM_PAUSE |
3734                                MR_LP_ADV_ASYM_PAUSE |
3735                                MR_LP_ADV_REMOTE_FAULT1 |
3736                                MR_LP_ADV_REMOTE_FAULT2 |
3737                                MR_LP_ADV_NEXT_PAGE |
3738                                MR_TOGGLE_RX |
3739                                MR_NP_RX);
3740                 if (ap->rxconfig & ANEG_CFG_FD)
3741                         ap->flags |= MR_LP_ADV_FULL_DUPLEX;
3742                 if (ap->rxconfig & ANEG_CFG_HD)
3743                         ap->flags |= MR_LP_ADV_HALF_DUPLEX;
3744                 if (ap->rxconfig & ANEG_CFG_PS1)
3745                         ap->flags |= MR_LP_ADV_SYM_PAUSE;
3746                 if (ap->rxconfig & ANEG_CFG_PS2)
3747                         ap->flags |= MR_LP_ADV_ASYM_PAUSE;
3748                 if (ap->rxconfig & ANEG_CFG_RF1)
3749                         ap->flags |= MR_LP_ADV_REMOTE_FAULT1;
3750                 if (ap->rxconfig & ANEG_CFG_RF2)
3751                         ap->flags |= MR_LP_ADV_REMOTE_FAULT2;
3752                 if (ap->rxconfig & ANEG_CFG_NP)
3753                         ap->flags |= MR_LP_ADV_NEXT_PAGE;
3754
3755                 ap->link_time = ap->cur_time;
3756
3757                 ap->flags ^= (MR_TOGGLE_TX);
3758                 if (ap->rxconfig & 0x0008)
3759                         ap->flags |= MR_TOGGLE_RX;
3760                 if (ap->rxconfig & ANEG_CFG_NP)
3761                         ap->flags |= MR_NP_RX;
3762                 ap->flags |= MR_PAGE_RX;
3763
3764                 ap->state = ANEG_STATE_COMPLETE_ACK;
3765                 ret = ANEG_TIMER_ENAB;
3766                 break;
3767
3768         case ANEG_STATE_COMPLETE_ACK:
3769                 if (ap->ability_match != 0 &&
3770                     ap->rxconfig == 0) {
3771                         ap->state = ANEG_STATE_AN_ENABLE;
3772                         break;
3773                 }
3774                 delta = ap->cur_time - ap->link_time;
3775                 if (delta > ANEG_STATE_SETTLE_TIME) {
3776                         if (!(ap->flags & (MR_LP_ADV_NEXT_PAGE))) {
3777                                 ap->state = ANEG_STATE_IDLE_DETECT_INIT;
3778                         } else {
3779                                 if ((ap->txconfig & ANEG_CFG_NP) == 0 &&
3780                                     !(ap->flags & MR_NP_RX)) {
3781                                         ap->state = ANEG_STATE_IDLE_DETECT_INIT;
3782                                 } else {
3783                                         ret = ANEG_FAILED;
3784                                 }
3785                         }
3786                 }
3787                 break;
3788
3789         case ANEG_STATE_IDLE_DETECT_INIT:
3790                 ap->link_time = ap->cur_time;
3791                 tp->mac_mode &= ~MAC_MODE_SEND_CONFIGS;
3792                 tw32_f(MAC_MODE, tp->mac_mode);
3793                 udelay(40);
3794
3795                 ap->state = ANEG_STATE_IDLE_DETECT;
3796                 ret = ANEG_TIMER_ENAB;
3797                 break;
3798
3799         case ANEG_STATE_IDLE_DETECT:
3800                 if (ap->ability_match != 0 &&
3801                     ap->rxconfig == 0) {
3802                         ap->state = ANEG_STATE_AN_ENABLE;
3803                         break;
3804                 }
3805                 delta = ap->cur_time - ap->link_time;
3806                 if (delta > ANEG_STATE_SETTLE_TIME) {
3807                         /* XXX another gem from the Broadcom driver :( */
3808                         ap->state = ANEG_STATE_LINK_OK;
3809                 }
3810                 break;
3811
3812         case ANEG_STATE_LINK_OK:
3813                 ap->flags |= (MR_AN_COMPLETE | MR_LINK_OK);
3814                 ret = ANEG_DONE;
3815                 break;
3816
3817         case ANEG_STATE_NEXT_PAGE_WAIT_INIT:
3818                 /* ??? unimplemented */
3819                 break;
3820
3821         case ANEG_STATE_NEXT_PAGE_WAIT:
3822                 /* ??? unimplemented */
3823                 break;
3824
3825         default:
3826                 ret = ANEG_FAILED;
3827                 break;
3828         }
3829
3830         return ret;
3831 }
3832
3833 static int fiber_autoneg(struct tg3 *tp, u32 *txflags, u32 *rxflags)
3834 {
3835         int res = 0;
3836         struct tg3_fiber_aneginfo aninfo;
3837         int status = ANEG_FAILED;
3838         unsigned int tick;
3839         u32 tmp;
3840
3841         tw32_f(MAC_TX_AUTO_NEG, 0);
3842
3843         tmp = tp->mac_mode & ~MAC_MODE_PORT_MODE_MASK;
3844         tw32_f(MAC_MODE, tmp | MAC_MODE_PORT_MODE_GMII);
3845         udelay(40);
3846
3847         tw32_f(MAC_MODE, tp->mac_mode | MAC_MODE_SEND_CONFIGS);
3848         udelay(40);
3849
3850         memset(&aninfo, 0, sizeof(aninfo));
3851         aninfo.flags |= MR_AN_ENABLE;
3852         aninfo.state = ANEG_STATE_UNKNOWN;
3853         aninfo.cur_time = 0;
3854         tick = 0;
3855         while (++tick < 195000) {
3856                 status = tg3_fiber_aneg_smachine(tp, &aninfo);
3857                 if (status == ANEG_DONE || status == ANEG_FAILED)
3858                         break;
3859
3860                 udelay(1);
3861         }
3862
3863         tp->mac_mode &= ~MAC_MODE_SEND_CONFIGS;
3864         tw32_f(MAC_MODE, tp->mac_mode);
3865         udelay(40);
3866
3867         *txflags = aninfo.txconfig;
3868         *rxflags = aninfo.flags;
3869
3870         if (status == ANEG_DONE &&
3871             (aninfo.flags & (MR_AN_COMPLETE | MR_LINK_OK |
3872                              MR_LP_ADV_FULL_DUPLEX)))
3873                 res = 1;
3874
3875         return res;
3876 }
3877
3878 static void tg3_init_bcm8002(struct tg3 *tp)
3879 {
3880         u32 mac_status = tr32(MAC_STATUS);
3881         int i;
3882
3883         /* Reset when initting first time or we have a link. */
3884         if (tg3_flag(tp, INIT_COMPLETE) &&
3885             !(mac_status & MAC_STATUS_PCS_SYNCED))
3886                 return;
3887
3888         /* Set PLL lock range. */
3889         tg3_writephy(tp, 0x16, 0x8007);
3890
3891         /* SW reset */
3892         tg3_writephy(tp, MII_BMCR, BMCR_RESET);
3893
3894         /* Wait for reset to complete. */
3895         /* XXX schedule_timeout() ... */
3896         for (i = 0; i < 500; i++)
3897                 udelay(10);
3898
3899         /* Config mode; select PMA/Ch 1 regs. */
3900         tg3_writephy(tp, 0x10, 0x8411);
3901
3902         /* Enable auto-lock and comdet, select txclk for tx. */
3903         tg3_writephy(tp, 0x11, 0x0a10);
3904
3905         tg3_writephy(tp, 0x18, 0x00a0);
3906         tg3_writephy(tp, 0x16, 0x41ff);
3907
3908         /* Assert and deassert POR. */
3909         tg3_writephy(tp, 0x13, 0x0400);
3910         udelay(40);
3911         tg3_writephy(tp, 0x13, 0x0000);
3912
3913         tg3_writephy(tp, 0x11, 0x0a50);
3914         udelay(40);
3915         tg3_writephy(tp, 0x11, 0x0a10);
3916
3917         /* Wait for signal to stabilize */
3918         /* XXX schedule_timeout() ... */
3919         for (i = 0; i < 15000; i++)
3920                 udelay(10);
3921
3922         /* Deselect the channel register so we can read the PHYID
3923          * later.
3924          */
3925         tg3_writephy(tp, 0x10, 0x8011);
3926 }
3927
3928 static int tg3_setup_fiber_hw_autoneg(struct tg3 *tp, u32 mac_status)
3929 {
3930         u16 flowctrl;
3931         u32 sg_dig_ctrl, sg_dig_status;
3932         u32 serdes_cfg, expected_sg_dig_ctrl;
3933         int workaround, port_a;
3934         int current_link_up;
3935
3936         serdes_cfg = 0;
3937         expected_sg_dig_ctrl = 0;
3938         workaround = 0;
3939         port_a = 1;
3940         current_link_up = 0;
3941
3942         if (tp->pci_chip_rev_id != CHIPREV_ID_5704_A0 &&
3943             tp->pci_chip_rev_id != CHIPREV_ID_5704_A1) {
3944                 workaround = 1;
3945                 if (tr32(TG3PCI_DUAL_MAC_CTRL) & DUAL_MAC_CTRL_ID)
3946                         port_a = 0;
3947
3948                 /* preserve bits 0-11,13,14 for signal pre-emphasis */
3949                 /* preserve bits 20-23 for voltage regulator */
3950                 serdes_cfg = tr32(MAC_SERDES_CFG) & 0x00f06fff;
3951         }
3952
3953         sg_dig_ctrl = tr32(SG_DIG_CTRL);
3954
3955         if (tp->link_config.autoneg != AUTONEG_ENABLE) {
3956                 if (sg_dig_ctrl & SG_DIG_USING_HW_AUTONEG) {
3957                         if (workaround) {
3958                                 u32 val = serdes_cfg;
3959
3960                                 if (port_a)
3961                                         val |= 0xc010000;
3962                                 else
3963                                         val |= 0x4010000;
3964                                 tw32_f(MAC_SERDES_CFG, val);
3965                         }
3966
3967                         tw32_f(SG_DIG_CTRL, SG_DIG_COMMON_SETUP);
3968                 }
3969                 if (mac_status & MAC_STATUS_PCS_SYNCED) {
3970                         tg3_setup_flow_control(tp, 0, 0);
3971                         current_link_up = 1;
3972                 }
3973                 goto out;
3974         }
3975
3976         /* Want auto-negotiation.  */
3977         expected_sg_dig_ctrl = SG_DIG_USING_HW_AUTONEG | SG_DIG_COMMON_SETUP;
3978
3979         flowctrl = tg3_advert_flowctrl_1000X(tp->link_config.flowctrl);
3980         if (flowctrl & ADVERTISE_1000XPAUSE)
3981                 expected_sg_dig_ctrl |= SG_DIG_PAUSE_CAP;
3982         if (flowctrl & ADVERTISE_1000XPSE_ASYM)
3983                 expected_sg_dig_ctrl |= SG_DIG_ASYM_PAUSE;
3984
3985         if (sg_dig_ctrl != expected_sg_dig_ctrl) {
3986                 if ((tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT) &&
3987                     tp->serdes_counter &&
3988                     ((mac_status & (MAC_STATUS_PCS_SYNCED |
3989                                     MAC_STATUS_RCVD_CFG)) ==
3990                      MAC_STATUS_PCS_SYNCED)) {
3991                         tp->serdes_counter--;
3992                         current_link_up = 1;
3993                         goto out;
3994                 }
3995 restart_autoneg:
3996                 if (workaround)
3997                         tw32_f(MAC_SERDES_CFG, serdes_cfg | 0xc011000);
3998                 tw32_f(SG_DIG_CTRL, expected_sg_dig_ctrl | SG_DIG_SOFT_RESET);
3999                 udelay(5);
4000                 tw32_f(SG_DIG_CTRL, expected_sg_dig_ctrl);
4001
4002                 tp->serdes_counter = SERDES_AN_TIMEOUT_5704S;
4003                 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
4004         } else if (mac_status & (MAC_STATUS_PCS_SYNCED |
4005                                  MAC_STATUS_SIGNAL_DET)) {
4006                 sg_dig_status = tr32(SG_DIG_STATUS);
4007                 mac_status = tr32(MAC_STATUS);
4008
4009                 if ((sg_dig_status & SG_DIG_AUTONEG_COMPLETE) &&
4010                     (mac_status & MAC_STATUS_PCS_SYNCED)) {
4011                         u32 local_adv = 0, remote_adv = 0;
4012
4013                         if (sg_dig_ctrl & SG_DIG_PAUSE_CAP)
4014                                 local_adv |= ADVERTISE_1000XPAUSE;
4015                         if (sg_dig_ctrl & SG_DIG_ASYM_PAUSE)
4016                                 local_adv |= ADVERTISE_1000XPSE_ASYM;
4017
4018                         if (sg_dig_status & SG_DIG_PARTNER_PAUSE_CAPABLE)
4019                                 remote_adv |= LPA_1000XPAUSE;
4020                         if (sg_dig_status & SG_DIG_PARTNER_ASYM_PAUSE)
4021                                 remote_adv |= LPA_1000XPAUSE_ASYM;
4022
4023                         tg3_setup_flow_control(tp, local_adv, remote_adv);
4024                         current_link_up = 1;
4025                         tp->serdes_counter = 0;
4026                         tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
4027                 } else if (!(sg_dig_status & SG_DIG_AUTONEG_COMPLETE)) {
4028                         if (tp->serdes_counter)
4029                                 tp->serdes_counter--;
4030                         else {
4031                                 if (workaround) {
4032                                         u32 val = serdes_cfg;
4033
4034                                         if (port_a)
4035                                                 val |= 0xc010000;
4036                                         else
4037                                                 val |= 0x4010000;
4038
4039                                         tw32_f(MAC_SERDES_CFG, val);
4040                                 }
4041
4042                                 tw32_f(SG_DIG_CTRL, SG_DIG_COMMON_SETUP);
4043                                 udelay(40);
4044
4045                                 /* Link parallel detection - link is up */
4046                                 /* only if we have PCS_SYNC and not */
4047                                 /* receiving config code words */
4048                                 mac_status = tr32(MAC_STATUS);
4049                                 if ((mac_status & MAC_STATUS_PCS_SYNCED) &&
4050                                     !(mac_status & MAC_STATUS_RCVD_CFG)) {
4051                                         tg3_setup_flow_control(tp, 0, 0);
4052                                         current_link_up = 1;
4053                                         tp->phy_flags |=
4054                                                 TG3_PHYFLG_PARALLEL_DETECT;
4055                                         tp->serdes_counter =
4056                                                 SERDES_PARALLEL_DET_TIMEOUT;
4057                                 } else
4058                                         goto restart_autoneg;
4059                         }
4060                 }
4061         } else {
4062                 tp->serdes_counter = SERDES_AN_TIMEOUT_5704S;
4063                 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
4064         }
4065
4066 out:
4067         return current_link_up;
4068 }
4069
4070 static int tg3_setup_fiber_by_hand(struct tg3 *tp, u32 mac_status)
4071 {
4072         int current_link_up = 0;
4073
4074         if (!(mac_status & MAC_STATUS_PCS_SYNCED))
4075                 goto out;
4076
4077         if (tp->link_config.autoneg == AUTONEG_ENABLE) {
4078                 u32 txflags, rxflags;
4079                 int i;
4080
4081                 if (fiber_autoneg(tp, &txflags, &rxflags)) {
4082                         u32 local_adv = 0, remote_adv = 0;
4083
4084                         if (txflags & ANEG_CFG_PS1)
4085                                 local_adv |= ADVERTISE_1000XPAUSE;
4086                         if (txflags & ANEG_CFG_PS2)
4087                                 local_adv |= ADVERTISE_1000XPSE_ASYM;
4088
4089                         if (rxflags & MR_LP_ADV_SYM_PAUSE)
4090                                 remote_adv |= LPA_1000XPAUSE;
4091                         if (rxflags & MR_LP_ADV_ASYM_PAUSE)
4092                                 remote_adv |= LPA_1000XPAUSE_ASYM;
4093
4094                         tg3_setup_flow_control(tp, local_adv, remote_adv);
4095
4096                         current_link_up = 1;
4097                 }
4098                 for (i = 0; i < 30; i++) {
4099                         udelay(20);
4100                         tw32_f(MAC_STATUS,
4101                                (MAC_STATUS_SYNC_CHANGED |
4102                                 MAC_STATUS_CFG_CHANGED));
4103                         udelay(40);
4104                         if ((tr32(MAC_STATUS) &
4105                              (MAC_STATUS_SYNC_CHANGED |
4106                               MAC_STATUS_CFG_CHANGED)) == 0)
4107                                 break;
4108                 }
4109
4110                 mac_status = tr32(MAC_STATUS);
4111                 if (current_link_up == 0 &&
4112                     (mac_status & MAC_STATUS_PCS_SYNCED) &&
4113                     !(mac_status & MAC_STATUS_RCVD_CFG))
4114                         current_link_up = 1;
4115         } else {
4116                 tg3_setup_flow_control(tp, 0, 0);
4117
4118                 /* Forcing 1000FD link up. */
4119                 current_link_up = 1;
4120
4121                 tw32_f(MAC_MODE, (tp->mac_mode | MAC_MODE_SEND_CONFIGS));
4122                 udelay(40);
4123
4124                 tw32_f(MAC_MODE, tp->mac_mode);
4125                 udelay(40);
4126         }
4127
4128 out:
4129         return current_link_up;
4130 }
4131
4132 static int tg3_setup_fiber_phy(struct tg3 *tp, int force_reset)
4133 {
4134         u32 orig_pause_cfg;
4135         u16 orig_active_speed;
4136         u8 orig_active_duplex;
4137         u32 mac_status;
4138         int current_link_up;
4139         int i;
4140
4141         orig_pause_cfg = tp->link_config.active_flowctrl;
4142         orig_active_speed = tp->link_config.active_speed;
4143         orig_active_duplex = tp->link_config.active_duplex;
4144
4145         if (!tg3_flag(tp, HW_AUTONEG) &&
4146             netif_carrier_ok(tp->dev) &&
4147             tg3_flag(tp, INIT_COMPLETE)) {
4148                 mac_status = tr32(MAC_STATUS);
4149                 mac_status &= (MAC_STATUS_PCS_SYNCED |
4150                                MAC_STATUS_SIGNAL_DET |
4151                                MAC_STATUS_CFG_CHANGED |
4152                                MAC_STATUS_RCVD_CFG);
4153                 if (mac_status == (MAC_STATUS_PCS_SYNCED |
4154                                    MAC_STATUS_SIGNAL_DET)) {
4155                         tw32_f(MAC_STATUS, (MAC_STATUS_SYNC_CHANGED |
4156                                             MAC_STATUS_CFG_CHANGED));
4157                         return 0;
4158                 }
4159         }
4160
4161         tw32_f(MAC_TX_AUTO_NEG, 0);
4162
4163         tp->mac_mode &= ~(MAC_MODE_PORT_MODE_MASK | MAC_MODE_HALF_DUPLEX);
4164         tp->mac_mode |= MAC_MODE_PORT_MODE_TBI;
4165         tw32_f(MAC_MODE, tp->mac_mode);
4166         udelay(40);
4167
4168         if (tp->phy_id == TG3_PHY_ID_BCM8002)
4169                 tg3_init_bcm8002(tp);
4170
4171         /* Enable link change event even when serdes polling.  */
4172         tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
4173         udelay(40);
4174
4175         current_link_up = 0;
4176         mac_status = tr32(MAC_STATUS);
4177
4178         if (tg3_flag(tp, HW_AUTONEG))
4179                 current_link_up = tg3_setup_fiber_hw_autoneg(tp, mac_status);
4180         else
4181                 current_link_up = tg3_setup_fiber_by_hand(tp, mac_status);
4182
4183         tp->napi[0].hw_status->status =
4184                 (SD_STATUS_UPDATED |
4185                  (tp->napi[0].hw_status->status & ~SD_STATUS_LINK_CHG));
4186
4187         for (i = 0; i < 100; i++) {
4188                 tw32_f(MAC_STATUS, (MAC_STATUS_SYNC_CHANGED |
4189                                     MAC_STATUS_CFG_CHANGED));
4190                 udelay(5);
4191                 if ((tr32(MAC_STATUS) & (MAC_STATUS_SYNC_CHANGED |
4192                                          MAC_STATUS_CFG_CHANGED |
4193                                          MAC_STATUS_LNKSTATE_CHANGED)) == 0)
4194                         break;
4195         }
4196
4197         mac_status = tr32(MAC_STATUS);
4198         if ((mac_status & MAC_STATUS_PCS_SYNCED) == 0) {
4199                 current_link_up = 0;
4200                 if (tp->link_config.autoneg == AUTONEG_ENABLE &&
4201                     tp->serdes_counter == 0) {
4202                         tw32_f(MAC_MODE, (tp->mac_mode |
4203                                           MAC_MODE_SEND_CONFIGS));
4204                         udelay(1);
4205                         tw32_f(MAC_MODE, tp->mac_mode);
4206                 }
4207         }
4208
4209         if (current_link_up == 1) {
4210                 tp->link_config.active_speed = SPEED_1000;
4211                 tp->link_config.active_duplex = DUPLEX_FULL;
4212                 tw32(MAC_LED_CTRL, (tp->led_ctrl |
4213                                     LED_CTRL_LNKLED_OVERRIDE |
4214                                     LED_CTRL_1000MBPS_ON));
4215         } else {
4216                 tp->link_config.active_speed = SPEED_INVALID;
4217                 tp->link_config.active_duplex = DUPLEX_INVALID;
4218                 tw32(MAC_LED_CTRL, (tp->led_ctrl |
4219                                     LED_CTRL_LNKLED_OVERRIDE |
4220                                     LED_CTRL_TRAFFIC_OVERRIDE));
4221         }
4222
4223         if (current_link_up != netif_carrier_ok(tp->dev)) {
4224                 if (current_link_up)
4225                         netif_carrier_on(tp->dev);
4226                 else
4227                         netif_carrier_off(tp->dev);
4228                 tg3_link_report(tp);
4229         } else {
4230                 u32 now_pause_cfg = tp->link_config.active_flowctrl;
4231                 if (orig_pause_cfg != now_pause_cfg ||
4232                     orig_active_speed != tp->link_config.active_speed ||
4233                     orig_active_duplex != tp->link_config.active_duplex)
4234                         tg3_link_report(tp);
4235         }
4236
4237         return 0;
4238 }
4239
4240 static int tg3_setup_fiber_mii_phy(struct tg3 *tp, int force_reset)
4241 {
4242         int current_link_up, err = 0;
4243         u32 bmsr, bmcr;
4244         u16 current_speed;
4245         u8 current_duplex;
4246         u32 local_adv, remote_adv;
4247
4248         tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
4249         tw32_f(MAC_MODE, tp->mac_mode);
4250         udelay(40);
4251
4252         tw32(MAC_EVENT, 0);
4253
4254         tw32_f(MAC_STATUS,
4255              (MAC_STATUS_SYNC_CHANGED |
4256               MAC_STATUS_CFG_CHANGED |
4257               MAC_STATUS_MI_COMPLETION |
4258               MAC_STATUS_LNKSTATE_CHANGED));
4259         udelay(40);
4260
4261         if (force_reset)
4262                 tg3_phy_reset(tp);
4263
4264         current_link_up = 0;
4265         current_speed = SPEED_INVALID;
4266         current_duplex = DUPLEX_INVALID;
4267
4268         err |= tg3_readphy(tp, MII_BMSR, &bmsr);
4269         err |= tg3_readphy(tp, MII_BMSR, &bmsr);
4270         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) {
4271                 if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
4272                         bmsr |= BMSR_LSTATUS;
4273                 else
4274                         bmsr &= ~BMSR_LSTATUS;
4275         }
4276
4277         err |= tg3_readphy(tp, MII_BMCR, &bmcr);
4278
4279         if ((tp->link_config.autoneg == AUTONEG_ENABLE) && !force_reset &&
4280             (tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT)) {
4281                 /* do nothing, just check for link up at the end */
4282         } else if (tp->link_config.autoneg == AUTONEG_ENABLE) {
4283                 u32 adv, new_adv;
4284
4285                 err |= tg3_readphy(tp, MII_ADVERTISE, &adv);
4286                 new_adv = adv & ~(ADVERTISE_1000XFULL | ADVERTISE_1000XHALF |
4287                                   ADVERTISE_1000XPAUSE |
4288                                   ADVERTISE_1000XPSE_ASYM |
4289                                   ADVERTISE_SLCT);
4290
4291                 new_adv |= tg3_advert_flowctrl_1000X(tp->link_config.flowctrl);
4292
4293                 if (tp->link_config.advertising & ADVERTISED_1000baseT_Half)
4294                         new_adv |= ADVERTISE_1000XHALF;
4295                 if (tp->link_config.advertising & ADVERTISED_1000baseT_Full)
4296                         new_adv |= ADVERTISE_1000XFULL;
4297
4298                 if ((new_adv != adv) || !(bmcr & BMCR_ANENABLE)) {
4299                         tg3_writephy(tp, MII_ADVERTISE, new_adv);
4300                         bmcr |= BMCR_ANENABLE | BMCR_ANRESTART;
4301                         tg3_writephy(tp, MII_BMCR, bmcr);
4302
4303                         tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
4304                         tp->serdes_counter = SERDES_AN_TIMEOUT_5714S;
4305                         tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
4306
4307                         return err;
4308                 }
4309         } else {
4310                 u32 new_bmcr;
4311
4312                 bmcr &= ~BMCR_SPEED1000;
4313                 new_bmcr = bmcr & ~(BMCR_ANENABLE | BMCR_FULLDPLX);
4314
4315                 if (tp->link_config.duplex == DUPLEX_FULL)
4316                         new_bmcr |= BMCR_FULLDPLX;
4317
4318                 if (new_bmcr != bmcr) {
4319                         /* BMCR_SPEED1000 is a reserved bit that needs
4320                          * to be set on write.
4321                          */
4322                         new_bmcr |= BMCR_SPEED1000;
4323
4324                         /* Force a linkdown */
4325                         if (netif_carrier_ok(tp->dev)) {
4326                                 u32 adv;
4327
4328                                 err |= tg3_readphy(tp, MII_ADVERTISE, &adv);
4329                                 adv &= ~(ADVERTISE_1000XFULL |
4330                                          ADVERTISE_1000XHALF |
4331                                          ADVERTISE_SLCT);
4332                                 tg3_writephy(tp, MII_ADVERTISE, adv);
4333                                 tg3_writephy(tp, MII_BMCR, bmcr |
4334                                                            BMCR_ANRESTART |
4335                                                            BMCR_ANENABLE);
4336                                 udelay(10);
4337                                 netif_carrier_off(tp->dev);
4338                         }
4339                         tg3_writephy(tp, MII_BMCR, new_bmcr);
4340                         bmcr = new_bmcr;
4341                         err |= tg3_readphy(tp, MII_BMSR, &bmsr);
4342                         err |= tg3_readphy(tp, MII_BMSR, &bmsr);
4343                         if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
4344                             ASIC_REV_5714) {
4345                                 if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
4346                                         bmsr |= BMSR_LSTATUS;
4347                                 else
4348                                         bmsr &= ~BMSR_LSTATUS;
4349                         }
4350                         tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
4351                 }
4352         }
4353
4354         if (bmsr & BMSR_LSTATUS) {
4355                 current_speed = SPEED_1000;
4356                 current_link_up = 1;
4357                 if (bmcr & BMCR_FULLDPLX)
4358                         current_duplex = DUPLEX_FULL;
4359                 else
4360                         current_duplex = DUPLEX_HALF;
4361
4362                 local_adv = 0;
4363                 remote_adv = 0;
4364
4365                 if (bmcr & BMCR_ANENABLE) {
4366                         u32 common;
4367
4368                         err |= tg3_readphy(tp, MII_ADVERTISE, &local_adv);
4369                         err |= tg3_readphy(tp, MII_LPA, &remote_adv);
4370                         common = local_adv & remote_adv;
4371                         if (common & (ADVERTISE_1000XHALF |
4372                                       ADVERTISE_1000XFULL)) {
4373                                 if (common & ADVERTISE_1000XFULL)
4374                                         current_duplex = DUPLEX_FULL;
4375                                 else
4376                                         current_duplex = DUPLEX_HALF;
4377                         } else if (!tg3_flag(tp, 5780_CLASS)) {
4378                                 /* Link is up via parallel detect */
4379                         } else {
4380                                 current_link_up = 0;
4381                         }
4382                 }
4383         }
4384
4385         if (current_link_up == 1 && current_duplex == DUPLEX_FULL)
4386                 tg3_setup_flow_control(tp, local_adv, remote_adv);
4387
4388         tp->mac_mode &= ~MAC_MODE_HALF_DUPLEX;
4389         if (tp->link_config.active_duplex == DUPLEX_HALF)
4390                 tp->mac_mode |= MAC_MODE_HALF_DUPLEX;
4391
4392         tw32_f(MAC_MODE, tp->mac_mode);
4393         udelay(40);
4394
4395         tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
4396
4397         tp->link_config.active_speed = current_speed;
4398         tp->link_config.active_duplex = current_duplex;
4399
4400         if (current_link_up != netif_carrier_ok(tp->dev)) {
4401                 if (current_link_up)
4402                         netif_carrier_on(tp->dev);
4403                 else {
4404                         netif_carrier_off(tp->dev);
4405                         tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
4406                 }
4407                 tg3_link_report(tp);
4408         }
4409         return err;
4410 }
4411
4412 static void tg3_serdes_parallel_detect(struct tg3 *tp)
4413 {
4414         if (tp->serdes_counter) {
4415                 /* Give autoneg time to complete. */
4416                 tp->serdes_counter--;
4417                 return;
4418         }
4419
4420         if (!netif_carrier_ok(tp->dev) &&
4421             (tp->link_config.autoneg == AUTONEG_ENABLE)) {
4422                 u32 bmcr;
4423
4424                 tg3_readphy(tp, MII_BMCR, &bmcr);
4425                 if (bmcr & BMCR_ANENABLE) {
4426                         u32 phy1, phy2;
4427
4428                         /* Select shadow register 0x1f */
4429                         tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x7c00);
4430                         tg3_readphy(tp, MII_TG3_MISC_SHDW, &phy1);
4431
4432                         /* Select expansion interrupt status register */
4433                         tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
4434                                          MII_TG3_DSP_EXP1_INT_STAT);
4435                         tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &phy2);
4436                         tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &phy2);
4437
4438                         if ((phy1 & 0x10) && !(phy2 & 0x20)) {
4439                                 /* We have signal detect and not receiving
4440                                  * config code words, link is up by parallel
4441                                  * detection.
4442                                  */
4443
4444                                 bmcr &= ~BMCR_ANENABLE;
4445                                 bmcr |= BMCR_SPEED1000 | BMCR_FULLDPLX;
4446                                 tg3_writephy(tp, MII_BMCR, bmcr);
4447                                 tp->phy_flags |= TG3_PHYFLG_PARALLEL_DETECT;
4448                         }
4449                 }
4450         } else if (netif_carrier_ok(tp->dev) &&
4451                    (tp->link_config.autoneg == AUTONEG_ENABLE) &&
4452                    (tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT)) {
4453                 u32 phy2;
4454
4455                 /* Select expansion interrupt status register */
4456                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
4457                                  MII_TG3_DSP_EXP1_INT_STAT);
4458                 tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &phy2);
4459                 if (phy2 & 0x20) {
4460                         u32 bmcr;
4461
4462                         /* Config code words received, turn on autoneg. */
4463                         tg3_readphy(tp, MII_BMCR, &bmcr);
4464                         tg3_writephy(tp, MII_BMCR, bmcr | BMCR_ANENABLE);
4465
4466                         tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
4467
4468                 }
4469         }
4470 }
4471
4472 static int tg3_setup_phy(struct tg3 *tp, int force_reset)
4473 {
4474         u32 val;
4475         int err;
4476
4477         if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
4478                 err = tg3_setup_fiber_phy(tp, force_reset);
4479         else if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
4480                 err = tg3_setup_fiber_mii_phy(tp, force_reset);
4481         else
4482                 err = tg3_setup_copper_phy(tp, force_reset);
4483
4484         if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5784_AX) {
4485                 u32 scale;
4486
4487                 val = tr32(TG3_CPMU_CLCK_STAT) & CPMU_CLCK_STAT_MAC_CLCK_MASK;
4488                 if (val == CPMU_CLCK_STAT_MAC_CLCK_62_5)
4489                         scale = 65;
4490                 else if (val == CPMU_CLCK_STAT_MAC_CLCK_6_25)
4491                         scale = 6;
4492                 else
4493                         scale = 12;
4494
4495                 val = tr32(GRC_MISC_CFG) & ~GRC_MISC_CFG_PRESCALAR_MASK;
4496                 val |= (scale << GRC_MISC_CFG_PRESCALAR_SHIFT);
4497                 tw32(GRC_MISC_CFG, val);
4498         }
4499
4500         val = (2 << TX_LENGTHS_IPG_CRS_SHIFT) |
4501               (6 << TX_LENGTHS_IPG_SHIFT);
4502         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720)
4503                 val |= tr32(MAC_TX_LENGTHS) &
4504                        (TX_LENGTHS_JMB_FRM_LEN_MSK |
4505                         TX_LENGTHS_CNT_DWN_VAL_MSK);
4506
4507         if (tp->link_config.active_speed == SPEED_1000 &&
4508             tp->link_config.active_duplex == DUPLEX_HALF)
4509                 tw32(MAC_TX_LENGTHS, val |
4510                      (0xff << TX_LENGTHS_SLOT_TIME_SHIFT));
4511         else
4512                 tw32(MAC_TX_LENGTHS, val |
4513                      (32 << TX_LENGTHS_SLOT_TIME_SHIFT));
4514
4515         if (!tg3_flag(tp, 5705_PLUS)) {
4516                 if (netif_carrier_ok(tp->dev)) {
4517                         tw32(HOSTCC_STAT_COAL_TICKS,
4518                              tp->coal.stats_block_coalesce_usecs);
4519                 } else {
4520                         tw32(HOSTCC_STAT_COAL_TICKS, 0);
4521                 }
4522         }
4523
4524         if (tg3_flag(tp, ASPM_WORKAROUND)) {
4525                 val = tr32(PCIE_PWR_MGMT_THRESH);
4526                 if (!netif_carrier_ok(tp->dev))
4527                         val = (val & ~PCIE_PWR_MGMT_L1_THRESH_MSK) |
4528                               tp->pwrmgmt_thresh;
4529                 else
4530                         val |= PCIE_PWR_MGMT_L1_THRESH_MSK;
4531                 tw32(PCIE_PWR_MGMT_THRESH, val);
4532         }
4533
4534         return err;
4535 }
4536
4537 static inline int tg3_irq_sync(struct tg3 *tp)
4538 {
4539         return tp->irq_sync;
4540 }
4541
4542 static inline void tg3_rd32_loop(struct tg3 *tp, u32 *dst, u32 off, u32 len)
4543 {
4544         int i;
4545
4546         dst = (u32 *)((u8 *)dst + off);
4547         for (i = 0; i < len; i += sizeof(u32))
4548                 *dst++ = tr32(off + i);
4549 }
4550
4551 static void tg3_dump_legacy_regs(struct tg3 *tp, u32 *regs)
4552 {
4553         tg3_rd32_loop(tp, regs, TG3PCI_VENDOR, 0xb0);
4554         tg3_rd32_loop(tp, regs, MAILBOX_INTERRUPT_0, 0x200);
4555         tg3_rd32_loop(tp, regs, MAC_MODE, 0x4f0);
4556         tg3_rd32_loop(tp, regs, SNDDATAI_MODE, 0xe0);
4557         tg3_rd32_loop(tp, regs, SNDDATAC_MODE, 0x04);
4558         tg3_rd32_loop(tp, regs, SNDBDS_MODE, 0x80);
4559         tg3_rd32_loop(tp, regs, SNDBDI_MODE, 0x48);
4560         tg3_rd32_loop(tp, regs, SNDBDC_MODE, 0x04);
4561         tg3_rd32_loop(tp, regs, RCVLPC_MODE, 0x20);
4562         tg3_rd32_loop(tp, regs, RCVLPC_SELLST_BASE, 0x15c);
4563         tg3_rd32_loop(tp, regs, RCVDBDI_MODE, 0x0c);
4564         tg3_rd32_loop(tp, regs, RCVDBDI_JUMBO_BD, 0x3c);
4565         tg3_rd32_loop(tp, regs, RCVDBDI_BD_PROD_IDX_0, 0x44);
4566         tg3_rd32_loop(tp, regs, RCVDCC_MODE, 0x04);
4567         tg3_rd32_loop(tp, regs, RCVBDI_MODE, 0x20);
4568         tg3_rd32_loop(tp, regs, RCVCC_MODE, 0x14);
4569         tg3_rd32_loop(tp, regs, RCVLSC_MODE, 0x08);
4570         tg3_rd32_loop(tp, regs, MBFREE_MODE, 0x08);
4571         tg3_rd32_loop(tp, regs, HOSTCC_MODE, 0x100);
4572
4573         if (tg3_flag(tp, SUPPORT_MSIX))
4574                 tg3_rd32_loop(tp, regs, HOSTCC_RXCOL_TICKS_VEC1, 0x180);
4575
4576         tg3_rd32_loop(tp, regs, MEMARB_MODE, 0x10);
4577         tg3_rd32_loop(tp, regs, BUFMGR_MODE, 0x58);
4578         tg3_rd32_loop(tp, regs, RDMAC_MODE, 0x08);
4579         tg3_rd32_loop(tp, regs, WDMAC_MODE, 0x08);
4580         tg3_rd32_loop(tp, regs, RX_CPU_MODE, 0x04);
4581         tg3_rd32_loop(tp, regs, RX_CPU_STATE, 0x04);
4582         tg3_rd32_loop(tp, regs, RX_CPU_PGMCTR, 0x04);
4583         tg3_rd32_loop(tp, regs, RX_CPU_HWBKPT, 0x04);
4584
4585         if (!tg3_flag(tp, 5705_PLUS)) {
4586                 tg3_rd32_loop(tp, regs, TX_CPU_MODE, 0x04);
4587                 tg3_rd32_loop(tp, regs, TX_CPU_STATE, 0x04);
4588                 tg3_rd32_loop(tp, regs, TX_CPU_PGMCTR, 0x04);
4589         }
4590
4591         tg3_rd32_loop(tp, regs, GRCMBOX_INTERRUPT_0, 0x110);
4592         tg3_rd32_loop(tp, regs, FTQ_RESET, 0x120);
4593         tg3_rd32_loop(tp, regs, MSGINT_MODE, 0x0c);
4594         tg3_rd32_loop(tp, regs, DMAC_MODE, 0x04);
4595         tg3_rd32_loop(tp, regs, GRC_MODE, 0x4c);
4596
4597         if (tg3_flag(tp, NVRAM))
4598                 tg3_rd32_loop(tp, regs, NVRAM_CMD, 0x24);
4599 }
4600
4601 static void tg3_dump_state(struct tg3 *tp)
4602 {
4603         int i;
4604         u32 *regs;
4605
4606         regs = kzalloc(TG3_REG_BLK_SIZE, GFP_ATOMIC);
4607         if (!regs) {
4608                 netdev_err(tp->dev, "Failed allocating register dump buffer\n");
4609                 return;
4610         }
4611
4612         if (tg3_flag(tp, PCI_EXPRESS)) {
4613                 /* Read up to but not including private PCI registers */
4614                 for (i = 0; i < TG3_PCIE_TLDLPL_PORT; i += sizeof(u32))
4615                         regs[i / sizeof(u32)] = tr32(i);
4616         } else
4617                 tg3_dump_legacy_regs(tp, regs);
4618
4619         for (i = 0; i < TG3_REG_BLK_SIZE / sizeof(u32); i += 4) {
4620                 if (!regs[i + 0] && !regs[i + 1] &&
4621                     !regs[i + 2] && !regs[i + 3])
4622                         continue;
4623
4624                 netdev_err(tp->dev, "0x%08x: 0x%08x, 0x%08x, 0x%08x, 0x%08x\n",
4625                            i * 4,
4626                            regs[i + 0], regs[i + 1], regs[i + 2], regs[i + 3]);
4627         }
4628
4629         kfree(regs);
4630
4631         for (i = 0; i < tp->irq_cnt; i++) {
4632                 struct tg3_napi *tnapi = &tp->napi[i];
4633
4634                 /* SW status block */
4635                 netdev_err(tp->dev,
4636                          "%d: Host status block [%08x:%08x:(%04x:%04x:%04x):(%04x:%04x)]\n",
4637                            i,
4638                            tnapi->hw_status->status,
4639                            tnapi->hw_status->status_tag,
4640                            tnapi->hw_status->rx_jumbo_consumer,
4641                            tnapi->hw_status->rx_consumer,
4642                            tnapi->hw_status->rx_mini_consumer,
4643                            tnapi->hw_status->idx[0].rx_producer,
4644                            tnapi->hw_status->idx[0].tx_consumer);
4645
4646                 netdev_err(tp->dev,
4647                 "%d: NAPI info [%08x:%08x:(%04x:%04x:%04x):%04x:(%04x:%04x:%04x:%04x)]\n",
4648                            i,
4649                            tnapi->last_tag, tnapi->last_irq_tag,
4650                            tnapi->tx_prod, tnapi->tx_cons, tnapi->tx_pending,
4651                            tnapi->rx_rcb_ptr,
4652                            tnapi->prodring.rx_std_prod_idx,
4653                            tnapi->prodring.rx_std_cons_idx,
4654                            tnapi->prodring.rx_jmb_prod_idx,
4655                            tnapi->prodring.rx_jmb_cons_idx);
4656         }
4657 }
4658
4659 /* This is called whenever we suspect that the system chipset is re-
4660  * ordering the sequence of MMIO to the tx send mailbox. The symptom
4661  * is bogus tx completions. We try to recover by setting the
4662  * TG3_FLAG_MBOX_WRITE_REORDER flag and resetting the chip later
4663  * in the workqueue.
4664  */
4665 static void tg3_tx_recover(struct tg3 *tp)
4666 {
4667         BUG_ON(tg3_flag(tp, MBOX_WRITE_REORDER) ||
4668                tp->write32_tx_mbox == tg3_write_indirect_mbox);
4669
4670         netdev_warn(tp->dev,
4671                     "The system may be re-ordering memory-mapped I/O "
4672                     "cycles to the network device, attempting to recover. "
4673                     "Please report the problem to the driver maintainer "
4674                     "and include system chipset information.\n");
4675
4676         spin_lock(&tp->lock);
4677         tg3_flag_set(tp, TX_RECOVERY_PENDING);
4678         spin_unlock(&tp->lock);
4679 }
4680
4681 static inline u32 tg3_tx_avail(struct tg3_napi *tnapi)
4682 {
4683         /* Tell compiler to fetch tx indices from memory. */
4684         barrier();
4685         return tnapi->tx_pending -
4686                ((tnapi->tx_prod - tnapi->tx_cons) & (TG3_TX_RING_SIZE - 1));
4687 }
4688
4689 /* Tigon3 never reports partial packet sends.  So we do not
4690  * need special logic to handle SKBs that have not had all
4691  * of their frags sent yet, like SunGEM does.
4692  */
4693 static void tg3_tx(struct tg3_napi *tnapi)
4694 {
4695         struct tg3 *tp = tnapi->tp;
4696         u32 hw_idx = tnapi->hw_status->idx[0].tx_consumer;
4697         u32 sw_idx = tnapi->tx_cons;
4698         struct netdev_queue *txq;
4699         int index = tnapi - tp->napi;
4700
4701         if (tg3_flag(tp, ENABLE_TSS))
4702                 index--;
4703
4704         txq = netdev_get_tx_queue(tp->dev, index);
4705
4706         while (sw_idx != hw_idx) {
4707                 struct ring_info *ri = &tnapi->tx_buffers[sw_idx];
4708                 struct sk_buff *skb = ri->skb;
4709                 int i, tx_bug = 0;
4710
4711                 if (unlikely(skb == NULL)) {
4712                         tg3_tx_recover(tp);
4713                         return;
4714                 }
4715
4716                 pci_unmap_single(tp->pdev,
4717                                  dma_unmap_addr(ri, mapping),
4718                                  skb_headlen(skb),
4719                                  PCI_DMA_TODEVICE);
4720
4721                 ri->skb = NULL;
4722
4723                 sw_idx = NEXT_TX(sw_idx);
4724
4725                 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
4726                         ri = &tnapi->tx_buffers[sw_idx];
4727                         if (unlikely(ri->skb != NULL || sw_idx == hw_idx))
4728                                 tx_bug = 1;
4729
4730                         pci_unmap_page(tp->pdev,
4731                                        dma_unmap_addr(ri, mapping),
4732                                        skb_shinfo(skb)->frags[i].size,
4733                                        PCI_DMA_TODEVICE);
4734                         sw_idx = NEXT_TX(sw_idx);
4735                 }
4736
4737                 dev_kfree_skb(skb);
4738
4739                 if (unlikely(tx_bug)) {
4740                         tg3_tx_recover(tp);
4741                         return;
4742                 }
4743         }
4744
4745         tnapi->tx_cons = sw_idx;
4746
4747         /* Need to make the tx_cons update visible to tg3_start_xmit()
4748          * before checking for netif_queue_stopped().  Without the
4749          * memory barrier, there is a small possibility that tg3_start_xmit()
4750          * will miss it and cause the queue to be stopped forever.
4751          */
4752         smp_mb();
4753
4754         if (unlikely(netif_tx_queue_stopped(txq) &&
4755                      (tg3_tx_avail(tnapi) > TG3_TX_WAKEUP_THRESH(tnapi)))) {
4756                 __netif_tx_lock(txq, smp_processor_id());
4757                 if (netif_tx_queue_stopped(txq) &&
4758                     (tg3_tx_avail(tnapi) > TG3_TX_WAKEUP_THRESH(tnapi)))
4759                         netif_tx_wake_queue(txq);
4760                 __netif_tx_unlock(txq);
4761         }
4762 }
4763
4764 static void tg3_rx_skb_free(struct tg3 *tp, struct ring_info *ri, u32 map_sz)
4765 {
4766         if (!ri->skb)
4767                 return;
4768
4769         pci_unmap_single(tp->pdev, dma_unmap_addr(ri, mapping),
4770                          map_sz, PCI_DMA_FROMDEVICE);
4771         dev_kfree_skb_any(ri->skb);
4772         ri->skb = NULL;
4773 }
4774
4775 /* Returns size of skb allocated or < 0 on error.
4776  *
4777  * We only need to fill in the address because the other members
4778  * of the RX descriptor are invariant, see tg3_init_rings.
4779  *
4780  * Note the purposeful assymetry of cpu vs. chip accesses.  For
4781  * posting buffers we only dirty the first cache line of the RX
4782  * descriptor (containing the address).  Whereas for the RX status
4783  * buffers the cpu only reads the last cacheline of the RX descriptor
4784  * (to fetch the error flags, vlan tag, checksum, and opaque cookie).
4785  */
4786 static int tg3_alloc_rx_skb(struct tg3 *tp, struct tg3_rx_prodring_set *tpr,
4787                             u32 opaque_key, u32 dest_idx_unmasked)
4788 {
4789         struct tg3_rx_buffer_desc *desc;
4790         struct ring_info *map;
4791         struct sk_buff *skb;
4792         dma_addr_t mapping;
4793         int skb_size, dest_idx;
4794
4795         switch (opaque_key) {
4796         case RXD_OPAQUE_RING_STD:
4797                 dest_idx = dest_idx_unmasked & tp->rx_std_ring_mask;
4798                 desc = &tpr->rx_std[dest_idx];
4799                 map = &tpr->rx_std_buffers[dest_idx];
4800                 skb_size = tp->rx_pkt_map_sz;
4801                 break;
4802
4803         case RXD_OPAQUE_RING_JUMBO:
4804                 dest_idx = dest_idx_unmasked & tp->rx_jmb_ring_mask;
4805                 desc = &tpr->rx_jmb[dest_idx].std;
4806                 map = &tpr->rx_jmb_buffers[dest_idx];
4807                 skb_size = TG3_RX_JMB_MAP_SZ;
4808                 break;
4809
4810         default:
4811                 return -EINVAL;
4812         }
4813
4814         /* Do not overwrite any of the map or rp information
4815          * until we are sure we can commit to a new buffer.
4816          *
4817          * Callers depend upon this behavior and assume that
4818          * we leave everything unchanged if we fail.
4819          */
4820         skb = netdev_alloc_skb(tp->dev, skb_size + tp->rx_offset);
4821         if (skb == NULL)
4822                 return -ENOMEM;
4823
4824         skb_reserve(skb, tp->rx_offset);
4825
4826         mapping = pci_map_single(tp->pdev, skb->data, skb_size,
4827                                  PCI_DMA_FROMDEVICE);
4828         if (pci_dma_mapping_error(tp->pdev, mapping)) {
4829                 dev_kfree_skb(skb);
4830                 return -EIO;
4831         }
4832
4833         map->skb = skb;
4834         dma_unmap_addr_set(map, mapping, mapping);
4835
4836         desc->addr_hi = ((u64)mapping >> 32);
4837         desc->addr_lo = ((u64)mapping & 0xffffffff);
4838
4839         return skb_size;
4840 }
4841
4842 /* We only need to move over in the address because the other
4843  * members of the RX descriptor are invariant.  See notes above
4844  * tg3_alloc_rx_skb for full details.
4845  */
4846 static void tg3_recycle_rx(struct tg3_napi *tnapi,
4847                            struct tg3_rx_prodring_set *dpr,
4848                            u32 opaque_key, int src_idx,
4849                            u32 dest_idx_unmasked)
4850 {
4851         struct tg3 *tp = tnapi->tp;
4852         struct tg3_rx_buffer_desc *src_desc, *dest_desc;
4853         struct ring_info *src_map, *dest_map;
4854         struct tg3_rx_prodring_set *spr = &tp->napi[0].prodring;
4855         int dest_idx;
4856
4857         switch (opaque_key) {
4858         case RXD_OPAQUE_RING_STD:
4859                 dest_idx = dest_idx_unmasked & tp->rx_std_ring_mask;
4860                 dest_desc = &dpr->rx_std[dest_idx];
4861                 dest_map = &dpr->rx_std_buffers[dest_idx];
4862                 src_desc = &spr->rx_std[src_idx];
4863                 src_map = &spr->rx_std_buffers[src_idx];
4864                 break;
4865
4866         case RXD_OPAQUE_RING_JUMBO:
4867                 dest_idx = dest_idx_unmasked & tp->rx_jmb_ring_mask;
4868                 dest_desc = &dpr->rx_jmb[dest_idx].std;
4869                 dest_map = &dpr->rx_jmb_buffers[dest_idx];
4870                 src_desc = &spr->rx_jmb[src_idx].std;
4871                 src_map = &spr->rx_jmb_buffers[src_idx];
4872                 break;
4873
4874         default:
4875                 return;
4876         }
4877
4878         dest_map->skb = src_map->skb;
4879         dma_unmap_addr_set(dest_map, mapping,
4880                            dma_unmap_addr(src_map, mapping));
4881         dest_desc->addr_hi = src_desc->addr_hi;
4882         dest_desc->addr_lo = src_desc->addr_lo;
4883
4884         /* Ensure that the update to the skb happens after the physical
4885          * addresses have been transferred to the new BD location.
4886          */
4887         smp_wmb();
4888
4889         src_map->skb = NULL;
4890 }
4891
4892 /* The RX ring scheme is composed of multiple rings which post fresh
4893  * buffers to the chip, and one special ring the chip uses to report
4894  * status back to the host.
4895  *
4896  * The special ring reports the status of received packets to the
4897  * host.  The chip does not write into the original descriptor the
4898  * RX buffer was obtained from.  The chip simply takes the original
4899  * descriptor as provided by the host, updates the status and length
4900  * field, then writes this into the next status ring entry.
4901  *
4902  * Each ring the host uses to post buffers to the chip is described
4903  * by a TG3_BDINFO entry in the chips SRAM area.  When a packet arrives,
4904  * it is first placed into the on-chip ram.  When the packet's length
4905  * is known, it walks down the TG3_BDINFO entries to select the ring.
4906  * Each TG3_BDINFO specifies a MAXLEN field and the first TG3_BDINFO
4907  * which is within the range of the new packet's length is chosen.
4908  *
4909  * The "separate ring for rx status" scheme may sound queer, but it makes
4910  * sense from a cache coherency perspective.  If only the host writes
4911  * to the buffer post rings, and only the chip writes to the rx status
4912  * rings, then cache lines never move beyond shared-modified state.
4913  * If both the host and chip were to write into the same ring, cache line
4914  * eviction could occur since both entities want it in an exclusive state.
4915  */
4916 static int tg3_rx(struct tg3_napi *tnapi, int budget)
4917 {
4918         struct tg3 *tp = tnapi->tp;
4919         u32 work_mask, rx_std_posted = 0;
4920         u32 std_prod_idx, jmb_prod_idx;
4921         u32 sw_idx = tnapi->rx_rcb_ptr;
4922         u16 hw_idx;
4923         int received;
4924         struct tg3_rx_prodring_set *tpr = &tnapi->prodring;
4925
4926         hw_idx = *(tnapi->rx_rcb_prod_idx);
4927         /*
4928          * We need to order the read of hw_idx and the read of
4929          * the opaque cookie.
4930          */
4931         rmb();
4932         work_mask = 0;
4933         received = 0;
4934         std_prod_idx = tpr->rx_std_prod_idx;
4935         jmb_prod_idx = tpr->rx_jmb_prod_idx;
4936         while (sw_idx != hw_idx && budget > 0) {
4937                 struct ring_info *ri;
4938                 struct tg3_rx_buffer_desc *desc = &tnapi->rx_rcb[sw_idx];
4939                 unsigned int len;
4940                 struct sk_buff *skb;
4941                 dma_addr_t dma_addr;
4942                 u32 opaque_key, desc_idx, *post_ptr;
4943
4944                 desc_idx = desc->opaque & RXD_OPAQUE_INDEX_MASK;
4945                 opaque_key = desc->opaque & RXD_OPAQUE_RING_MASK;
4946                 if (opaque_key == RXD_OPAQUE_RING_STD) {
4947                         ri = &tp->napi[0].prodring.rx_std_buffers[desc_idx];
4948                         dma_addr = dma_unmap_addr(ri, mapping);
4949                         skb = ri->skb;
4950                         post_ptr = &std_prod_idx;
4951                         rx_std_posted++;
4952                 } else if (opaque_key == RXD_OPAQUE_RING_JUMBO) {
4953                         ri = &tp->napi[0].prodring.rx_jmb_buffers[desc_idx];
4954                         dma_addr = dma_unmap_addr(ri, mapping);
4955                         skb = ri->skb;
4956                         post_ptr = &jmb_prod_idx;
4957                 } else
4958                         goto next_pkt_nopost;
4959
4960                 work_mask |= opaque_key;
4961
4962                 if ((desc->err_vlan & RXD_ERR_MASK) != 0 &&
4963                     (desc->err_vlan != RXD_ERR_ODD_NIBBLE_RCVD_MII)) {
4964                 drop_it:
4965                         tg3_recycle_rx(tnapi, tpr, opaque_key,
4966                                        desc_idx, *post_ptr);
4967                 drop_it_no_recycle:
4968                         /* Other statistics kept track of by card. */
4969                         tp->rx_dropped++;
4970                         goto next_pkt;
4971                 }
4972
4973                 len = ((desc->idx_len & RXD_LEN_MASK) >> RXD_LEN_SHIFT) -
4974                       ETH_FCS_LEN;
4975
4976                 if (len > TG3_RX_COPY_THRESH(tp)) {
4977                         int skb_size;
4978
4979                         skb_size = tg3_alloc_rx_skb(tp, tpr, opaque_key,
4980                                                     *post_ptr);
4981                         if (skb_size < 0)
4982                                 goto drop_it;
4983
4984                         pci_unmap_single(tp->pdev, dma_addr, skb_size,
4985                                          PCI_DMA_FROMDEVICE);
4986
4987                         /* Ensure that the update to the skb happens
4988                          * after the usage of the old DMA mapping.
4989                          */
4990                         smp_wmb();
4991
4992                         ri->skb = NULL;
4993
4994                         skb_put(skb, len);
4995                 } else {
4996                         struct sk_buff *copy_skb;
4997
4998                         tg3_recycle_rx(tnapi, tpr, opaque_key,
4999                                        desc_idx, *post_ptr);
5000
5001                         copy_skb = netdev_alloc_skb(tp->dev, len +
5002                                                     TG3_RAW_IP_ALIGN);
5003                         if (copy_skb == NULL)
5004                                 goto drop_it_no_recycle;
5005
5006                         skb_reserve(copy_skb, TG3_RAW_IP_ALIGN);
5007                         skb_put(copy_skb, len);
5008                         pci_dma_sync_single_for_cpu(tp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE);
5009                         skb_copy_from_linear_data(skb, copy_skb->data, len);
5010                         pci_dma_sync_single_for_device(tp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE);
5011
5012                         /* We'll reuse the original ring buffer. */
5013                         skb = copy_skb;
5014                 }
5015
5016                 if ((tp->dev->features & NETIF_F_RXCSUM) &&
5017                     (desc->type_flags & RXD_FLAG_TCPUDP_CSUM) &&
5018                     (((desc->ip_tcp_csum & RXD_TCPCSUM_MASK)
5019                       >> RXD_TCPCSUM_SHIFT) == 0xffff))
5020                         skb->ip_summed = CHECKSUM_UNNECESSARY;
5021                 else
5022                         skb_checksum_none_assert(skb);
5023
5024                 skb->protocol = eth_type_trans(skb, tp->dev);
5025
5026                 if (len > (tp->dev->mtu + ETH_HLEN) &&
5027                     skb->protocol != htons(ETH_P_8021Q)) {
5028                         dev_kfree_skb(skb);
5029                         goto drop_it_no_recycle;
5030                 }
5031
5032                 if (desc->type_flags & RXD_FLAG_VLAN &&
5033                     !(tp->rx_mode & RX_MODE_KEEP_VLAN_TAG))
5034                         __vlan_hwaccel_put_tag(skb,
5035                                                desc->err_vlan & RXD_VLAN_MASK);
5036
5037                 napi_gro_receive(&tnapi->napi, skb);
5038
5039                 received++;
5040                 budget--;
5041
5042 next_pkt:
5043                 (*post_ptr)++;
5044
5045                 if (unlikely(rx_std_posted >= tp->rx_std_max_post)) {
5046                         tpr->rx_std_prod_idx = std_prod_idx &
5047                                                tp->rx_std_ring_mask;
5048                         tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG,
5049                                      tpr->rx_std_prod_idx);
5050                         work_mask &= ~RXD_OPAQUE_RING_STD;
5051                         rx_std_posted = 0;
5052                 }
5053 next_pkt_nopost:
5054                 sw_idx++;
5055                 sw_idx &= tp->rx_ret_ring_mask;
5056
5057                 /* Refresh hw_idx to see if there is new work */
5058                 if (sw_idx == hw_idx) {
5059                         hw_idx = *(tnapi->rx_rcb_prod_idx);
5060                         rmb();
5061                 }
5062         }
5063
5064         /* ACK the status ring. */
5065         tnapi->rx_rcb_ptr = sw_idx;
5066         tw32_rx_mbox(tnapi->consmbox, sw_idx);
5067
5068         /* Refill RX ring(s). */
5069         if (!tg3_flag(tp, ENABLE_RSS)) {
5070                 if (work_mask & RXD_OPAQUE_RING_STD) {
5071                         tpr->rx_std_prod_idx = std_prod_idx &
5072                                                tp->rx_std_ring_mask;
5073                         tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG,
5074                                      tpr->rx_std_prod_idx);
5075                 }
5076                 if (work_mask & RXD_OPAQUE_RING_JUMBO) {
5077                         tpr->rx_jmb_prod_idx = jmb_prod_idx &
5078                                                tp->rx_jmb_ring_mask;
5079                         tw32_rx_mbox(TG3_RX_JMB_PROD_IDX_REG,
5080                                      tpr->rx_jmb_prod_idx);
5081                 }
5082                 mmiowb();
5083         } else if (work_mask) {
5084                 /* rx_std_buffers[] and rx_jmb_buffers[] entries must be
5085                  * updated before the producer indices can be updated.
5086                  */
5087                 smp_wmb();
5088
5089                 tpr->rx_std_prod_idx = std_prod_idx & tp->rx_std_ring_mask;
5090                 tpr->rx_jmb_prod_idx = jmb_prod_idx & tp->rx_jmb_ring_mask;
5091
5092                 if (tnapi != &tp->napi[1])
5093                         napi_schedule(&tp->napi[1].napi);
5094         }
5095
5096         return received;
5097 }
5098
5099 static void tg3_poll_link(struct tg3 *tp)
5100 {
5101         /* handle link change and other phy events */
5102         if (!(tg3_flag(tp, USE_LINKCHG_REG) || tg3_flag(tp, POLL_SERDES))) {
5103                 struct tg3_hw_status *sblk = tp->napi[0].hw_status;
5104
5105                 if (sblk->status & SD_STATUS_LINK_CHG) {
5106                         sblk->status = SD_STATUS_UPDATED |
5107                                        (sblk->status & ~SD_STATUS_LINK_CHG);
5108                         spin_lock(&tp->lock);
5109                         if (tg3_flag(tp, USE_PHYLIB)) {
5110                                 tw32_f(MAC_STATUS,
5111                                      (MAC_STATUS_SYNC_CHANGED |
5112                                       MAC_STATUS_CFG_CHANGED |
5113                                       MAC_STATUS_MI_COMPLETION |
5114                                       MAC_STATUS_LNKSTATE_CHANGED));
5115                                 udelay(40);
5116                         } else
5117                                 tg3_setup_phy(tp, 0);
5118                         spin_unlock(&tp->lock);
5119                 }
5120         }
5121 }
5122
5123 static int tg3_rx_prodring_xfer(struct tg3 *tp,
5124                                 struct tg3_rx_prodring_set *dpr,
5125                                 struct tg3_rx_prodring_set *spr)
5126 {
5127         u32 si, di, cpycnt, src_prod_idx;
5128         int i, err = 0;
5129
5130         while (1) {
5131                 src_prod_idx = spr->rx_std_prod_idx;
5132
5133                 /* Make sure updates to the rx_std_buffers[] entries and the
5134                  * standard producer index are seen in the correct order.
5135                  */
5136                 smp_rmb();
5137
5138                 if (spr->rx_std_cons_idx == src_prod_idx)
5139                         break;
5140
5141                 if (spr->rx_std_cons_idx < src_prod_idx)
5142                         cpycnt = src_prod_idx - spr->rx_std_cons_idx;
5143                 else
5144                         cpycnt = tp->rx_std_ring_mask + 1 -
5145                                  spr->rx_std_cons_idx;
5146
5147                 cpycnt = min(cpycnt,
5148                              tp->rx_std_ring_mask + 1 - dpr->rx_std_prod_idx);
5149
5150                 si = spr->rx_std_cons_idx;
5151                 di = dpr->rx_std_prod_idx;
5152
5153                 for (i = di; i < di + cpycnt; i++) {
5154                         if (dpr->rx_std_buffers[i].skb) {
5155                                 cpycnt = i - di;
5156                                 err = -ENOSPC;
5157                                 break;
5158                         }
5159                 }
5160
5161                 if (!cpycnt)
5162                         break;
5163
5164                 /* Ensure that updates to the rx_std_buffers ring and the
5165                  * shadowed hardware producer ring from tg3_recycle_skb() are
5166                  * ordered correctly WRT the skb check above.
5167                  */
5168                 smp_rmb();
5169
5170                 memcpy(&dpr->rx_std_buffers[di],
5171                        &spr->rx_std_buffers[si],
5172                        cpycnt * sizeof(struct ring_info));
5173
5174                 for (i = 0; i < cpycnt; i++, di++, si++) {
5175                         struct tg3_rx_buffer_desc *sbd, *dbd;
5176                         sbd = &spr->rx_std[si];
5177                         dbd = &dpr->rx_std[di];
5178                         dbd->addr_hi = sbd->addr_hi;
5179                         dbd->addr_lo = sbd->addr_lo;
5180                 }
5181
5182                 spr->rx_std_cons_idx = (spr->rx_std_cons_idx + cpycnt) &
5183                                        tp->rx_std_ring_mask;
5184                 dpr->rx_std_prod_idx = (dpr->rx_std_prod_idx + cpycnt) &
5185                                        tp->rx_std_ring_mask;
5186         }
5187
5188         while (1) {
5189                 src_prod_idx = spr->rx_jmb_prod_idx;
5190
5191                 /* Make sure updates to the rx_jmb_buffers[] entries and
5192                  * the jumbo producer index are seen in the correct order.
5193                  */
5194                 smp_rmb();
5195
5196                 if (spr->rx_jmb_cons_idx == src_prod_idx)
5197                         break;
5198
5199                 if (spr->rx_jmb_cons_idx < src_prod_idx)
5200                         cpycnt = src_prod_idx - spr->rx_jmb_cons_idx;
5201                 else
5202                         cpycnt = tp->rx_jmb_ring_mask + 1 -
5203                                  spr->rx_jmb_cons_idx;
5204
5205                 cpycnt = min(cpycnt,
5206                              tp->rx_jmb_ring_mask + 1 - dpr->rx_jmb_prod_idx);
5207
5208                 si = spr->rx_jmb_cons_idx;
5209                 di = dpr->rx_jmb_prod_idx;
5210
5211                 for (i = di; i < di + cpycnt; i++) {
5212                         if (dpr->rx_jmb_buffers[i].skb) {
5213                                 cpycnt = i - di;
5214                                 err = -ENOSPC;
5215                                 break;
5216                         }
5217                 }
5218
5219                 if (!cpycnt)
5220                         break;
5221
5222                 /* Ensure that updates to the rx_jmb_buffers ring and the
5223                  * shadowed hardware producer ring from tg3_recycle_skb() are
5224                  * ordered correctly WRT the skb check above.
5225                  */
5226                 smp_rmb();
5227
5228                 memcpy(&dpr->rx_jmb_buffers[di],
5229                        &spr->rx_jmb_buffers[si],
5230                        cpycnt * sizeof(struct ring_info));
5231
5232                 for (i = 0; i < cpycnt; i++, di++, si++) {
5233                         struct tg3_rx_buffer_desc *sbd, *dbd;
5234                         sbd = &spr->rx_jmb[si].std;
5235                         dbd = &dpr->rx_jmb[di].std;
5236                         dbd->addr_hi = sbd->addr_hi;
5237                         dbd->addr_lo = sbd->addr_lo;
5238                 }
5239
5240                 spr->rx_jmb_cons_idx = (spr->rx_jmb_cons_idx + cpycnt) &
5241                                        tp->rx_jmb_ring_mask;
5242                 dpr->rx_jmb_prod_idx = (dpr->rx_jmb_prod_idx + cpycnt) &
5243                                        tp->rx_jmb_ring_mask;
5244         }
5245
5246         return err;
5247 }
5248
5249 static int tg3_poll_work(struct tg3_napi *tnapi, int work_done, int budget)
5250 {
5251         struct tg3 *tp = tnapi->tp;
5252
5253         /* run TX completion thread */
5254         if (tnapi->hw_status->idx[0].tx_consumer != tnapi->tx_cons) {
5255                 tg3_tx(tnapi);
5256                 if (unlikely(tg3_flag(tp, TX_RECOVERY_PENDING)))
5257                         return work_done;
5258         }
5259
5260         /* run RX thread, within the bounds set by NAPI.
5261          * All RX "locking" is done by ensuring outside
5262          * code synchronizes with tg3->napi.poll()
5263          */
5264         if (*(tnapi->rx_rcb_prod_idx) != tnapi->rx_rcb_ptr)
5265                 work_done += tg3_rx(tnapi, budget - work_done);
5266
5267         if (tg3_flag(tp, ENABLE_RSS) && tnapi == &tp->napi[1]) {
5268                 struct tg3_rx_prodring_set *dpr = &tp->napi[0].prodring;
5269                 int i, err = 0;
5270                 u32 std_prod_idx = dpr->rx_std_prod_idx;
5271                 u32 jmb_prod_idx = dpr->rx_jmb_prod_idx;
5272
5273                 for (i = 1; i < tp->irq_cnt; i++)
5274                         err |= tg3_rx_prodring_xfer(tp, dpr,
5275                                                     &tp->napi[i].prodring);
5276
5277                 wmb();
5278
5279                 if (std_prod_idx != dpr->rx_std_prod_idx)
5280                         tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG,
5281                                      dpr->rx_std_prod_idx);
5282
5283                 if (jmb_prod_idx != dpr->rx_jmb_prod_idx)
5284                         tw32_rx_mbox(TG3_RX_JMB_PROD_IDX_REG,
5285                                      dpr->rx_jmb_prod_idx);
5286
5287                 mmiowb();
5288
5289                 if (err)
5290                         tw32_f(HOSTCC_MODE, tp->coal_now);
5291         }
5292
5293         return work_done;
5294 }
5295
5296 static int tg3_poll_msix(struct napi_struct *napi, int budget)
5297 {
5298         struct tg3_napi *tnapi = container_of(napi, struct tg3_napi, napi);
5299         struct tg3 *tp = tnapi->tp;
5300         int work_done = 0;
5301         struct tg3_hw_status *sblk = tnapi->hw_status;
5302
5303         while (1) {
5304                 work_done = tg3_poll_work(tnapi, work_done, budget);
5305
5306                 if (unlikely(tg3_flag(tp, TX_RECOVERY_PENDING)))
5307                         goto tx_recovery;
5308
5309                 if (unlikely(work_done >= budget))
5310                         break;
5311
5312                 /* tp->last_tag is used in tg3_int_reenable() below
5313                  * to tell the hw how much work has been processed,
5314                  * so we must read it before checking for more work.
5315                  */
5316                 tnapi->last_tag = sblk->status_tag;
5317                 tnapi->last_irq_tag = tnapi->last_tag;
5318                 rmb();
5319
5320                 /* check for RX/TX work to do */
5321                 if (likely(sblk->idx[0].tx_consumer == tnapi->tx_cons &&
5322                            *(tnapi->rx_rcb_prod_idx) == tnapi->rx_rcb_ptr)) {
5323                         napi_complete(napi);
5324                         /* Reenable interrupts. */
5325                         tw32_mailbox(tnapi->int_mbox, tnapi->last_tag << 24);
5326                         mmiowb();
5327                         break;
5328                 }
5329         }
5330
5331         return work_done;
5332
5333 tx_recovery:
5334         /* work_done is guaranteed to be less than budget. */
5335         napi_complete(napi);
5336         schedule_work(&tp->reset_task);
5337         return work_done;
5338 }
5339
5340 static void tg3_process_error(struct tg3 *tp)
5341 {
5342         u32 val;
5343         bool real_error = false;
5344
5345         if (tg3_flag(tp, ERROR_PROCESSED))
5346                 return;
5347
5348         /* Check Flow Attention register */
5349         val = tr32(HOSTCC_FLOW_ATTN);
5350         if (val & ~HOSTCC_FLOW_ATTN_MBUF_LWM) {
5351                 netdev_err(tp->dev, "FLOW Attention error.  Resetting chip.\n");
5352                 real_error = true;
5353         }
5354
5355         if (tr32(MSGINT_STATUS) & ~MSGINT_STATUS_MSI_REQ) {
5356                 netdev_err(tp->dev, "MSI Status error.  Resetting chip.\n");
5357                 real_error = true;
5358         }
5359
5360         if (tr32(RDMAC_STATUS) || tr32(WDMAC_STATUS)) {
5361                 netdev_err(tp->dev, "DMA Status error.  Resetting chip.\n");
5362                 real_error = true;
5363         }
5364
5365         if (!real_error)
5366                 return;
5367
5368         tg3_dump_state(tp);
5369
5370         tg3_flag_set(tp, ERROR_PROCESSED);
5371         schedule_work(&tp->reset_task);
5372 }
5373
5374 static int tg3_poll(struct napi_struct *napi, int budget)
5375 {
5376         struct tg3_napi *tnapi = container_of(napi, struct tg3_napi, napi);
5377         struct tg3 *tp = tnapi->tp;
5378         int work_done = 0;
5379         struct tg3_hw_status *sblk = tnapi->hw_status;
5380
5381         while (1) {
5382                 if (sblk->status & SD_STATUS_ERROR)
5383                         tg3_process_error(tp);
5384
5385                 tg3_poll_link(tp);
5386
5387                 work_done = tg3_poll_work(tnapi, work_done, budget);
5388
5389                 if (unlikely(tg3_flag(tp, TX_RECOVERY_PENDING)))
5390                         goto tx_recovery;
5391
5392                 if (unlikely(work_done >= budget))
5393                         break;
5394
5395                 if (tg3_flag(tp, TAGGED_STATUS)) {
5396                         /* tp->last_tag is used in tg3_int_reenable() below
5397                          * to tell the hw how much work has been processed,
5398                          * so we must read it before checking for more work.
5399                          */
5400                         tnapi->last_tag = sblk->status_tag;
5401                         tnapi->last_irq_tag = tnapi->last_tag;
5402                         rmb();
5403                 } else
5404                         sblk->status &= ~SD_STATUS_UPDATED;
5405
5406                 if (likely(!tg3_has_work(tnapi))) {
5407                         napi_complete(napi);
5408                         tg3_int_reenable(tnapi);
5409                         break;
5410                 }
5411         }
5412
5413         return work_done;
5414
5415 tx_recovery:
5416         /* work_done is guaranteed to be less than budget. */
5417         napi_complete(napi);
5418         schedule_work(&tp->reset_task);
5419         return work_done;
5420 }
5421
5422 static void tg3_napi_disable(struct tg3 *tp)
5423 {
5424         int i;
5425
5426         for (i = tp->irq_cnt - 1; i >= 0; i--)
5427                 napi_disable(&tp->napi[i].napi);
5428 }
5429
5430 static void tg3_napi_enable(struct tg3 *tp)
5431 {
5432         int i;
5433
5434         for (i = 0; i < tp->irq_cnt; i++)
5435                 napi_enable(&tp->napi[i].napi);
5436 }
5437
5438 static void tg3_napi_init(struct tg3 *tp)
5439 {
5440         int i;
5441
5442         netif_napi_add(tp->dev, &tp->napi[0].napi, tg3_poll, 64);
5443         for (i = 1; i < tp->irq_cnt; i++)
5444                 netif_napi_add(tp->dev, &tp->napi[i].napi, tg3_poll_msix, 64);
5445 }
5446
5447 static void tg3_napi_fini(struct tg3 *tp)
5448 {
5449         int i;
5450
5451         for (i = 0; i < tp->irq_cnt; i++)
5452                 netif_napi_del(&tp->napi[i].napi);
5453 }
5454
5455 static inline void tg3_netif_stop(struct tg3 *tp)
5456 {
5457         tp->dev->trans_start = jiffies; /* prevent tx timeout */
5458         tg3_napi_disable(tp);
5459         netif_tx_disable(tp->dev);
5460 }
5461
5462 static inline void tg3_netif_start(struct tg3 *tp)
5463 {
5464         /* NOTE: unconditional netif_tx_wake_all_queues is only
5465          * appropriate so long as all callers are assured to
5466          * have free tx slots (such as after tg3_init_hw)
5467          */
5468         netif_tx_wake_all_queues(tp->dev);
5469
5470         tg3_napi_enable(tp);
5471         tp->napi[0].hw_status->status |= SD_STATUS_UPDATED;
5472         tg3_enable_ints(tp);
5473 }
5474
5475 static void tg3_irq_quiesce(struct tg3 *tp)
5476 {
5477         int i;
5478
5479         BUG_ON(tp->irq_sync);
5480
5481         tp->irq_sync = 1;
5482         smp_mb();
5483
5484         for (i = 0; i < tp->irq_cnt; i++)
5485                 synchronize_irq(tp->napi[i].irq_vec);
5486 }
5487
5488 /* Fully shutdown all tg3 driver activity elsewhere in the system.
5489  * If irq_sync is non-zero, then the IRQ handler must be synchronized
5490  * with as well.  Most of the time, this is not necessary except when
5491  * shutting down the device.
5492  */
5493 static inline void tg3_full_lock(struct tg3 *tp, int irq_sync)
5494 {
5495         spin_lock_bh(&tp->lock);
5496         if (irq_sync)
5497                 tg3_irq_quiesce(tp);
5498 }
5499
5500 static inline void tg3_full_unlock(struct tg3 *tp)
5501 {
5502         spin_unlock_bh(&tp->lock);
5503 }
5504
5505 /* One-shot MSI handler - Chip automatically disables interrupt
5506  * after sending MSI so driver doesn't have to do it.
5507  */
5508 static irqreturn_t tg3_msi_1shot(int irq, void *dev_id)
5509 {
5510         struct tg3_napi *tnapi = dev_id;
5511         struct tg3 *tp = tnapi->tp;
5512
5513         prefetch(tnapi->hw_status);
5514         if (tnapi->rx_rcb)
5515                 prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
5516
5517         if (likely(!tg3_irq_sync(tp)))
5518                 napi_schedule(&tnapi->napi);
5519
5520         return IRQ_HANDLED;
5521 }
5522
5523 /* MSI ISR - No need to check for interrupt sharing and no need to
5524  * flush status block and interrupt mailbox. PCI ordering rules
5525  * guarantee that MSI will arrive after the status block.
5526  */
5527 static irqreturn_t tg3_msi(int irq, void *dev_id)
5528 {
5529         struct tg3_napi *tnapi = dev_id;
5530         struct tg3 *tp = tnapi->tp;
5531
5532         prefetch(tnapi->hw_status);
5533         if (tnapi->rx_rcb)
5534                 prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
5535         /*
5536          * Writing any value to intr-mbox-0 clears PCI INTA# and
5537          * chip-internal interrupt pending events.
5538          * Writing non-zero to intr-mbox-0 additional tells the
5539          * NIC to stop sending us irqs, engaging "in-intr-handler"
5540          * event coalescing.
5541          */
5542         tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
5543         if (likely(!tg3_irq_sync(tp)))
5544                 napi_schedule(&tnapi->napi);
5545
5546         return IRQ_RETVAL(1);
5547 }
5548
5549 static irqreturn_t tg3_interrupt(int irq, void *dev_id)
5550 {
5551         struct tg3_napi *tnapi = dev_id;
5552         struct tg3 *tp = tnapi->tp;
5553         struct tg3_hw_status *sblk = tnapi->hw_status;
5554         unsigned int handled = 1;
5555
5556         /* In INTx mode, it is possible for the interrupt to arrive at
5557          * the CPU before the status block posted prior to the interrupt.
5558          * Reading the PCI State register will confirm whether the
5559          * interrupt is ours and will flush the status block.
5560          */
5561         if (unlikely(!(sblk->status & SD_STATUS_UPDATED))) {
5562                 if (tg3_flag(tp, CHIP_RESETTING) ||
5563                     (tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
5564                         handled = 0;
5565                         goto out;
5566                 }
5567         }
5568
5569         /*
5570          * Writing any value to intr-mbox-0 clears PCI INTA# and
5571          * chip-internal interrupt pending events.
5572          * Writing non-zero to intr-mbox-0 additional tells the
5573          * NIC to stop sending us irqs, engaging "in-intr-handler"
5574          * event coalescing.
5575          *
5576          * Flush the mailbox to de-assert the IRQ immediately to prevent
5577          * spurious interrupts.  The flush impacts performance but
5578          * excessive spurious interrupts can be worse in some cases.
5579          */
5580         tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
5581         if (tg3_irq_sync(tp))
5582                 goto out;
5583         sblk->status &= ~SD_STATUS_UPDATED;
5584         if (likely(tg3_has_work(tnapi))) {
5585                 prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
5586                 napi_schedule(&tnapi->napi);
5587         } else {
5588                 /* No work, shared interrupt perhaps?  re-enable
5589                  * interrupts, and flush that PCI write
5590                  */
5591                 tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
5592                                0x00000000);
5593         }
5594 out:
5595         return IRQ_RETVAL(handled);
5596 }
5597
5598 static irqreturn_t tg3_interrupt_tagged(int irq, void *dev_id)
5599 {
5600         struct tg3_napi *tnapi = dev_id;
5601         struct tg3 *tp = tnapi->tp;
5602         struct tg3_hw_status *sblk = tnapi->hw_status;
5603         unsigned int handled = 1;
5604
5605         /* In INTx mode, it is possible for the interrupt to arrive at
5606          * the CPU before the status block posted prior to the interrupt.
5607          * Reading the PCI State register will confirm whether the
5608          * interrupt is ours and will flush the status block.
5609          */
5610         if (unlikely(sblk->status_tag == tnapi->last_irq_tag)) {
5611                 if (tg3_flag(tp, CHIP_RESETTING) ||
5612                     (tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
5613                         handled = 0;
5614                         goto out;
5615                 }
5616         }
5617
5618         /*
5619          * writing any value to intr-mbox-0 clears PCI INTA# and
5620          * chip-internal interrupt pending events.
5621          * writing non-zero to intr-mbox-0 additional tells the
5622          * NIC to stop sending us irqs, engaging "in-intr-handler"
5623          * event coalescing.
5624          *
5625          * Flush the mailbox to de-assert the IRQ immediately to prevent
5626          * spurious interrupts.  The flush impacts performance but
5627          * excessive spurious interrupts can be worse in some cases.
5628          */
5629         tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
5630
5631         /*
5632          * In a shared interrupt configuration, sometimes other devices'
5633          * interrupts will scream.  We record the current status tag here
5634          * so that the above check can report that the screaming interrupts
5635          * are unhandled.  Eventually they will be silenced.
5636          */
5637         tnapi->last_irq_tag = sblk->status_tag;
5638
5639         if (tg3_irq_sync(tp))
5640                 goto out;
5641
5642         prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
5643
5644         napi_schedule(&tnapi->napi);
5645
5646 out:
5647         return IRQ_RETVAL(handled);
5648 }
5649
5650 /* ISR for interrupt test */
5651 static irqreturn_t tg3_test_isr(int irq, void *dev_id)
5652 {
5653         struct tg3_napi *tnapi = dev_id;
5654         struct tg3 *tp = tnapi->tp;
5655         struct tg3_hw_status *sblk = tnapi->hw_status;
5656
5657         if ((sblk->status & SD_STATUS_UPDATED) ||
5658             !(tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
5659                 tg3_disable_ints(tp);
5660                 return IRQ_RETVAL(1);
5661         }
5662         return IRQ_RETVAL(0);
5663 }
5664
5665 static int tg3_init_hw(struct tg3 *, int);
5666 static int tg3_halt(struct tg3 *, int, int);
5667
5668 /* Restart hardware after configuration changes, self-test, etc.
5669  * Invoked with tp->lock held.
5670  */
5671 static int tg3_restart_hw(struct tg3 *tp, int reset_phy)
5672         __releases(tp->lock)
5673         __acquires(tp->lock)
5674 {
5675         int err;
5676
5677         err = tg3_init_hw(tp, reset_phy);
5678         if (err) {
5679                 netdev_err(tp->dev,
5680                            "Failed to re-initialize device, aborting\n");
5681                 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
5682                 tg3_full_unlock(tp);
5683                 del_timer_sync(&tp->timer);
5684                 tp->irq_sync = 0;
5685                 tg3_napi_enable(tp);
5686                 dev_close(tp->dev);
5687                 tg3_full_lock(tp, 0);
5688         }
5689         return err;
5690 }
5691
5692 #ifdef CONFIG_NET_POLL_CONTROLLER
5693 static void tg3_poll_controller(struct net_device *dev)
5694 {
5695         int i;
5696         struct tg3 *tp = netdev_priv(dev);
5697
5698         for (i = 0; i < tp->irq_cnt; i++)
5699                 tg3_interrupt(tp->napi[i].irq_vec, &tp->napi[i]);
5700 }
5701 #endif
5702
5703 static void tg3_reset_task(struct work_struct *work)
5704 {
5705         struct tg3 *tp = container_of(work, struct tg3, reset_task);
5706         int err;
5707         unsigned int restart_timer;
5708
5709         tg3_full_lock(tp, 0);
5710
5711         if (!netif_running(tp->dev)) {
5712                 tg3_full_unlock(tp);
5713                 return;
5714         }
5715
5716         tg3_full_unlock(tp);
5717
5718         tg3_phy_stop(tp);
5719
5720         tg3_netif_stop(tp);
5721
5722         tg3_full_lock(tp, 1);
5723
5724         restart_timer = tg3_flag(tp, RESTART_TIMER);
5725         tg3_flag_clear(tp, RESTART_TIMER);
5726
5727         if (tg3_flag(tp, TX_RECOVERY_PENDING)) {
5728                 tp->write32_tx_mbox = tg3_write32_tx_mbox;
5729                 tp->write32_rx_mbox = tg3_write_flush_reg32;
5730                 tg3_flag_set(tp, MBOX_WRITE_REORDER);
5731                 tg3_flag_clear(tp, TX_RECOVERY_PENDING);
5732         }
5733
5734         tg3_halt(tp, RESET_KIND_SHUTDOWN, 0);
5735         err = tg3_init_hw(tp, 1);
5736         if (err)
5737                 goto out;
5738
5739         tg3_netif_start(tp);
5740
5741         if (restart_timer)
5742                 mod_timer(&tp->timer, jiffies + 1);
5743
5744 out:
5745         tg3_full_unlock(tp);
5746
5747         if (!err)
5748                 tg3_phy_start(tp);
5749 }
5750
5751 static void tg3_tx_timeout(struct net_device *dev)
5752 {
5753         struct tg3 *tp = netdev_priv(dev);
5754
5755         if (netif_msg_tx_err(tp)) {
5756                 netdev_err(dev, "transmit timed out, resetting\n");
5757                 tg3_dump_state(tp);
5758         }
5759
5760         schedule_work(&tp->reset_task);
5761 }
5762
5763 /* Test for DMA buffers crossing any 4GB boundaries: 4G, 8G, etc */
5764 static inline int tg3_4g_overflow_test(dma_addr_t mapping, int len)
5765 {
5766         u32 base = (u32) mapping & 0xffffffff;
5767
5768         return (base > 0xffffdcc0) && (base + len + 8 < base);
5769 }
5770
5771 /* Test for DMA addresses > 40-bit */
5772 static inline int tg3_40bit_overflow_test(struct tg3 *tp, dma_addr_t mapping,
5773                                           int len)
5774 {
5775 #if defined(CONFIG_HIGHMEM) && (BITS_PER_LONG == 64)
5776         if (tg3_flag(tp, 40BIT_DMA_BUG))
5777                 return ((u64) mapping + len) > DMA_BIT_MASK(40);
5778         return 0;
5779 #else
5780         return 0;
5781 #endif
5782 }
5783
5784 static void tg3_set_txd(struct tg3_napi *tnapi, int entry,
5785                         dma_addr_t mapping, int len, u32 flags,
5786                         u32 mss_and_is_end)
5787 {
5788         struct tg3_tx_buffer_desc *txd = &tnapi->tx_ring[entry];
5789         int is_end = (mss_and_is_end & 0x1);
5790         u32 mss = (mss_and_is_end >> 1);
5791         u32 vlan_tag = 0;
5792
5793         if (is_end)
5794                 flags |= TXD_FLAG_END;
5795         if (flags & TXD_FLAG_VLAN) {
5796                 vlan_tag = flags >> 16;
5797                 flags &= 0xffff;
5798         }
5799         vlan_tag |= (mss << TXD_MSS_SHIFT);
5800
5801         txd->addr_hi = ((u64) mapping >> 32);
5802         txd->addr_lo = ((u64) mapping & 0xffffffff);
5803         txd->len_flags = (len << TXD_LEN_SHIFT) | flags;
5804         txd->vlan_tag = vlan_tag << TXD_VLAN_TAG_SHIFT;
5805 }
5806
5807 static void tg3_skb_error_unmap(struct tg3_napi *tnapi,
5808                                 struct sk_buff *skb, int last)
5809 {
5810         int i;
5811         u32 entry = tnapi->tx_prod;
5812         struct ring_info *txb = &tnapi->tx_buffers[entry];
5813
5814         pci_unmap_single(tnapi->tp->pdev,
5815                          dma_unmap_addr(txb, mapping),
5816                          skb_headlen(skb),
5817                          PCI_DMA_TODEVICE);
5818         for (i = 0; i < last; i++) {
5819                 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
5820
5821                 entry = NEXT_TX(entry);
5822                 txb = &tnapi->tx_buffers[entry];
5823
5824                 pci_unmap_page(tnapi->tp->pdev,
5825                                dma_unmap_addr(txb, mapping),
5826                                frag->size, PCI_DMA_TODEVICE);
5827         }
5828 }
5829
5830 /* Workaround 4GB and 40-bit hardware DMA bugs. */
5831 static int tigon3_dma_hwbug_workaround(struct tg3_napi *tnapi,
5832                                        struct sk_buff *skb,
5833                                        u32 base_flags, u32 mss)
5834 {
5835         struct tg3 *tp = tnapi->tp;
5836         struct sk_buff *new_skb;
5837         dma_addr_t new_addr = 0;
5838         u32 entry = tnapi->tx_prod;
5839         int ret = 0;
5840
5841         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701)
5842                 new_skb = skb_copy(skb, GFP_ATOMIC);
5843         else {
5844                 int more_headroom = 4 - ((unsigned long)skb->data & 3);
5845
5846                 new_skb = skb_copy_expand(skb,
5847                                           skb_headroom(skb) + more_headroom,
5848                                           skb_tailroom(skb), GFP_ATOMIC);
5849         }
5850
5851         if (!new_skb) {
5852                 ret = -1;
5853         } else {
5854                 /* New SKB is guaranteed to be linear. */
5855                 new_addr = pci_map_single(tp->pdev, new_skb->data, new_skb->len,
5856                                           PCI_DMA_TODEVICE);
5857                 /* Make sure the mapping succeeded */
5858                 if (pci_dma_mapping_error(tp->pdev, new_addr)) {
5859                         ret = -1;
5860                         dev_kfree_skb(new_skb);
5861
5862                 /