tg3: use <linux/io.h> and <linux/uaccess.h> instead <asm/io.h> and <asm/uaccess.h>
[linux-2.6.git] / drivers / net / tg3.c
1 /*
2  * tg3.c: Broadcom Tigon3 ethernet driver.
3  *
4  * Copyright (C) 2001, 2002, 2003, 2004 David S. Miller (davem@redhat.com)
5  * Copyright (C) 2001, 2002, 2003 Jeff Garzik (jgarzik@pobox.com)
6  * Copyright (C) 2004 Sun Microsystems Inc.
7  * Copyright (C) 2005-2011 Broadcom Corporation.
8  *
9  * Firmware is:
10  *      Derived from proprietary unpublished source code,
11  *      Copyright (C) 2000-2003 Broadcom Corporation.
12  *
13  *      Permission is hereby granted for the distribution of this firmware
14  *      data in hexadecimal or equivalent format, provided this copyright
15  *      notice is accompanying it.
16  */
17
18
19 #include <linux/module.h>
20 #include <linux/moduleparam.h>
21 #include <linux/stringify.h>
22 #include <linux/kernel.h>
23 #include <linux/types.h>
24 #include <linux/compiler.h>
25 #include <linux/slab.h>
26 #include <linux/delay.h>
27 #include <linux/in.h>
28 #include <linux/init.h>
29 #include <linux/ioport.h>
30 #include <linux/pci.h>
31 #include <linux/netdevice.h>
32 #include <linux/etherdevice.h>
33 #include <linux/skbuff.h>
34 #include <linux/ethtool.h>
35 #include <linux/mdio.h>
36 #include <linux/mii.h>
37 #include <linux/phy.h>
38 #include <linux/brcmphy.h>
39 #include <linux/if_vlan.h>
40 #include <linux/ip.h>
41 #include <linux/tcp.h>
42 #include <linux/workqueue.h>
43 #include <linux/prefetch.h>
44 #include <linux/dma-mapping.h>
45 #include <linux/firmware.h>
46
47 #include <net/checksum.h>
48 #include <net/ip.h>
49
50 #include <asm/system.h>
51 #include <linux/io.h>
52 #include <asm/byteorder.h>
53 #include <linux/uaccess.h>
54
55 #ifdef CONFIG_SPARC
56 #include <asm/idprom.h>
57 #include <asm/prom.h>
58 #endif
59
60 #define BAR_0   0
61 #define BAR_2   2
62
63 #include "tg3.h"
64
65 #define DRV_MODULE_NAME         "tg3"
66 #define TG3_MAJ_NUM                     3
67 #define TG3_MIN_NUM                     117
68 #define DRV_MODULE_VERSION      \
69         __stringify(TG3_MAJ_NUM) "." __stringify(TG3_MIN_NUM)
70 #define DRV_MODULE_RELDATE      "January 25, 2011"
71
72 #define TG3_DEF_MAC_MODE        0
73 #define TG3_DEF_RX_MODE         0
74 #define TG3_DEF_TX_MODE         0
75 #define TG3_DEF_MSG_ENABLE        \
76         (NETIF_MSG_DRV          | \
77          NETIF_MSG_PROBE        | \
78          NETIF_MSG_LINK         | \
79          NETIF_MSG_TIMER        | \
80          NETIF_MSG_IFDOWN       | \
81          NETIF_MSG_IFUP         | \
82          NETIF_MSG_RX_ERR       | \
83          NETIF_MSG_TX_ERR)
84
85 /* length of time before we decide the hardware is borked,
86  * and dev->tx_timeout() should be called to fix the problem
87  */
88 #define TG3_TX_TIMEOUT                  (5 * HZ)
89
90 /* hardware minimum and maximum for a single frame's data payload */
91 #define TG3_MIN_MTU                     60
92 #define TG3_MAX_MTU(tp) \
93         ((tp->tg3_flags & TG3_FLAG_JUMBO_CAPABLE) ? 9000 : 1500)
94
95 /* These numbers seem to be hard coded in the NIC firmware somehow.
96  * You can't change the ring sizes, but you can change where you place
97  * them in the NIC onboard memory.
98  */
99 #define TG3_RX_STD_RING_SIZE(tp) \
100         ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 || \
101           GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719) ? \
102          RX_STD_MAX_SIZE_5717 : 512)
103 #define TG3_DEF_RX_RING_PENDING         200
104 #define TG3_RX_JMB_RING_SIZE(tp) \
105         ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 || \
106           GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719) ? \
107          1024 : 256)
108 #define TG3_DEF_RX_JUMBO_RING_PENDING   100
109 #define TG3_RSS_INDIR_TBL_SIZE          128
110
111 /* Do not place this n-ring entries value into the tp struct itself,
112  * we really want to expose these constants to GCC so that modulo et
113  * al.  operations are done with shifts and masks instead of with
114  * hw multiply/modulo instructions.  Another solution would be to
115  * replace things like '% foo' with '& (foo - 1)'.
116  */
117
118 #define TG3_TX_RING_SIZE                512
119 #define TG3_DEF_TX_RING_PENDING         (TG3_TX_RING_SIZE - 1)
120
121 #define TG3_RX_STD_RING_BYTES(tp) \
122         (sizeof(struct tg3_rx_buffer_desc) * TG3_RX_STD_RING_SIZE(tp))
123 #define TG3_RX_JMB_RING_BYTES(tp) \
124         (sizeof(struct tg3_ext_rx_buffer_desc) * TG3_RX_JMB_RING_SIZE(tp))
125 #define TG3_RX_RCB_RING_BYTES(tp) \
126         (sizeof(struct tg3_rx_buffer_desc) * (tp->rx_ret_ring_mask + 1))
127 #define TG3_TX_RING_BYTES       (sizeof(struct tg3_tx_buffer_desc) * \
128                                  TG3_TX_RING_SIZE)
129 #define NEXT_TX(N)              (((N) + 1) & (TG3_TX_RING_SIZE - 1))
130
131 #define TG3_DMA_BYTE_ENAB               64
132
133 #define TG3_RX_STD_DMA_SZ               1536
134 #define TG3_RX_JMB_DMA_SZ               9046
135
136 #define TG3_RX_DMA_TO_MAP_SZ(x)         ((x) + TG3_DMA_BYTE_ENAB)
137
138 #define TG3_RX_STD_MAP_SZ               TG3_RX_DMA_TO_MAP_SZ(TG3_RX_STD_DMA_SZ)
139 #define TG3_RX_JMB_MAP_SZ               TG3_RX_DMA_TO_MAP_SZ(TG3_RX_JMB_DMA_SZ)
140
141 #define TG3_RX_STD_BUFF_RING_SIZE(tp) \
142         (sizeof(struct ring_info) * TG3_RX_STD_RING_SIZE(tp))
143
144 #define TG3_RX_JMB_BUFF_RING_SIZE(tp) \
145         (sizeof(struct ring_info) * TG3_RX_JMB_RING_SIZE(tp))
146
147 /* Due to a hardware bug, the 5701 can only DMA to memory addresses
148  * that are at least dword aligned when used in PCIX mode.  The driver
149  * works around this bug by double copying the packet.  This workaround
150  * is built into the normal double copy length check for efficiency.
151  *
152  * However, the double copy is only necessary on those architectures
153  * where unaligned memory accesses are inefficient.  For those architectures
154  * where unaligned memory accesses incur little penalty, we can reintegrate
155  * the 5701 in the normal rx path.  Doing so saves a device structure
156  * dereference by hardcoding the double copy threshold in place.
157  */
158 #define TG3_RX_COPY_THRESHOLD           256
159 #if NET_IP_ALIGN == 0 || defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS)
160         #define TG3_RX_COPY_THRESH(tp)  TG3_RX_COPY_THRESHOLD
161 #else
162         #define TG3_RX_COPY_THRESH(tp)  ((tp)->rx_copy_thresh)
163 #endif
164
165 /* minimum number of free TX descriptors required to wake up TX process */
166 #define TG3_TX_WAKEUP_THRESH(tnapi)             ((tnapi)->tx_pending / 4)
167
168 #define TG3_RAW_IP_ALIGN 2
169
170 /* number of ETHTOOL_GSTATS u64's */
171 #define TG3_NUM_STATS           (sizeof(struct tg3_ethtool_stats)/sizeof(u64))
172
173 #define TG3_NUM_TEST            6
174
175 #define TG3_FW_UPDATE_TIMEOUT_SEC       5
176
177 #define FIRMWARE_TG3            "tigon/tg3.bin"
178 #define FIRMWARE_TG3TSO         "tigon/tg3_tso.bin"
179 #define FIRMWARE_TG3TSO5        "tigon/tg3_tso5.bin"
180
181 static char version[] __devinitdata =
182         DRV_MODULE_NAME ".c:v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")";
183
184 MODULE_AUTHOR("David S. Miller (davem@redhat.com) and Jeff Garzik (jgarzik@pobox.com)");
185 MODULE_DESCRIPTION("Broadcom Tigon3 ethernet driver");
186 MODULE_LICENSE("GPL");
187 MODULE_VERSION(DRV_MODULE_VERSION);
188 MODULE_FIRMWARE(FIRMWARE_TG3);
189 MODULE_FIRMWARE(FIRMWARE_TG3TSO);
190 MODULE_FIRMWARE(FIRMWARE_TG3TSO5);
191
192 static int tg3_debug = -1;      /* -1 == use TG3_DEF_MSG_ENABLE as value */
193 module_param(tg3_debug, int, 0);
194 MODULE_PARM_DESC(tg3_debug, "Tigon3 bitmapped debugging message enable value");
195
196 static DEFINE_PCI_DEVICE_TABLE(tg3_pci_tbl) = {
197         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5700)},
198         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5701)},
199         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702)},
200         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703)},
201         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704)},
202         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702FE)},
203         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705)},
204         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705_2)},
205         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705M)},
206         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705M_2)},
207         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702X)},
208         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703X)},
209         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704S)},
210         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702A3)},
211         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703A3)},
212         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5782)},
213         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5788)},
214         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5789)},
215         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5901)},
216         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5901_2)},
217         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704S_2)},
218         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705F)},
219         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5721)},
220         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5722)},
221         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751)},
222         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751M)},
223         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751F)},
224         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5752)},
225         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5752M)},
226         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753)},
227         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753M)},
228         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753F)},
229         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5754)},
230         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5754M)},
231         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5755)},
232         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5755M)},
233         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5756)},
234         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5786)},
235         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787)},
236         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787M)},
237         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787F)},
238         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5714)},
239         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5714S)},
240         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5715)},
241         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5715S)},
242         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5780)},
243         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5780S)},
244         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5781)},
245         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5906)},
246         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5906M)},
247         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5784)},
248         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5764)},
249         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5723)},
250         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5761)},
251         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5761E)},
252         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5761S)},
253         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5761SE)},
254         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5785_G)},
255         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5785_F)},
256         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57780)},
257         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57760)},
258         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57790)},
259         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57788)},
260         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5717)},
261         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5718)},
262         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57781)},
263         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57785)},
264         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57761)},
265         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57765)},
266         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57791)},
267         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57795)},
268         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5719)},
269         {PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9DXX)},
270         {PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9MXX)},
271         {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1000)},
272         {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1001)},
273         {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1003)},
274         {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC9100)},
275         {PCI_DEVICE(PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_TIGON3)},
276         {}
277 };
278
279 MODULE_DEVICE_TABLE(pci, tg3_pci_tbl);
280
281 static const struct {
282         const char string[ETH_GSTRING_LEN];
283 } ethtool_stats_keys[TG3_NUM_STATS] = {
284         { "rx_octets" },
285         { "rx_fragments" },
286         { "rx_ucast_packets" },
287         { "rx_mcast_packets" },
288         { "rx_bcast_packets" },
289         { "rx_fcs_errors" },
290         { "rx_align_errors" },
291         { "rx_xon_pause_rcvd" },
292         { "rx_xoff_pause_rcvd" },
293         { "rx_mac_ctrl_rcvd" },
294         { "rx_xoff_entered" },
295         { "rx_frame_too_long_errors" },
296         { "rx_jabbers" },
297         { "rx_undersize_packets" },
298         { "rx_in_length_errors" },
299         { "rx_out_length_errors" },
300         { "rx_64_or_less_octet_packets" },
301         { "rx_65_to_127_octet_packets" },
302         { "rx_128_to_255_octet_packets" },
303         { "rx_256_to_511_octet_packets" },
304         { "rx_512_to_1023_octet_packets" },
305         { "rx_1024_to_1522_octet_packets" },
306         { "rx_1523_to_2047_octet_packets" },
307         { "rx_2048_to_4095_octet_packets" },
308         { "rx_4096_to_8191_octet_packets" },
309         { "rx_8192_to_9022_octet_packets" },
310
311         { "tx_octets" },
312         { "tx_collisions" },
313
314         { "tx_xon_sent" },
315         { "tx_xoff_sent" },
316         { "tx_flow_control" },
317         { "tx_mac_errors" },
318         { "tx_single_collisions" },
319         { "tx_mult_collisions" },
320         { "tx_deferred" },
321         { "tx_excessive_collisions" },
322         { "tx_late_collisions" },
323         { "tx_collide_2times" },
324         { "tx_collide_3times" },
325         { "tx_collide_4times" },
326         { "tx_collide_5times" },
327         { "tx_collide_6times" },
328         { "tx_collide_7times" },
329         { "tx_collide_8times" },
330         { "tx_collide_9times" },
331         { "tx_collide_10times" },
332         { "tx_collide_11times" },
333         { "tx_collide_12times" },
334         { "tx_collide_13times" },
335         { "tx_collide_14times" },
336         { "tx_collide_15times" },
337         { "tx_ucast_packets" },
338         { "tx_mcast_packets" },
339         { "tx_bcast_packets" },
340         { "tx_carrier_sense_errors" },
341         { "tx_discards" },
342         { "tx_errors" },
343
344         { "dma_writeq_full" },
345         { "dma_write_prioq_full" },
346         { "rxbds_empty" },
347         { "rx_discards" },
348         { "rx_errors" },
349         { "rx_threshold_hit" },
350
351         { "dma_readq_full" },
352         { "dma_read_prioq_full" },
353         { "tx_comp_queue_full" },
354
355         { "ring_set_send_prod_index" },
356         { "ring_status_update" },
357         { "nic_irqs" },
358         { "nic_avoided_irqs" },
359         { "nic_tx_threshold_hit" }
360 };
361
362 static const struct {
363         const char string[ETH_GSTRING_LEN];
364 } ethtool_test_keys[TG3_NUM_TEST] = {
365         { "nvram test     (online) " },
366         { "link test      (online) " },
367         { "register test  (offline)" },
368         { "memory test    (offline)" },
369         { "loopback test  (offline)" },
370         { "interrupt test (offline)" },
371 };
372
373 static void tg3_write32(struct tg3 *tp, u32 off, u32 val)
374 {
375         writel(val, tp->regs + off);
376 }
377
378 static u32 tg3_read32(struct tg3 *tp, u32 off)
379 {
380         return readl(tp->regs + off);
381 }
382
383 static void tg3_ape_write32(struct tg3 *tp, u32 off, u32 val)
384 {
385         writel(val, tp->aperegs + off);
386 }
387
388 static u32 tg3_ape_read32(struct tg3 *tp, u32 off)
389 {
390         return readl(tp->aperegs + off);
391 }
392
393 static void tg3_write_indirect_reg32(struct tg3 *tp, u32 off, u32 val)
394 {
395         unsigned long flags;
396
397         spin_lock_irqsave(&tp->indirect_lock, flags);
398         pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off);
399         pci_write_config_dword(tp->pdev, TG3PCI_REG_DATA, val);
400         spin_unlock_irqrestore(&tp->indirect_lock, flags);
401 }
402
403 static void tg3_write_flush_reg32(struct tg3 *tp, u32 off, u32 val)
404 {
405         writel(val, tp->regs + off);
406         readl(tp->regs + off);
407 }
408
409 static u32 tg3_read_indirect_reg32(struct tg3 *tp, u32 off)
410 {
411         unsigned long flags;
412         u32 val;
413
414         spin_lock_irqsave(&tp->indirect_lock, flags);
415         pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off);
416         pci_read_config_dword(tp->pdev, TG3PCI_REG_DATA, &val);
417         spin_unlock_irqrestore(&tp->indirect_lock, flags);
418         return val;
419 }
420
421 static void tg3_write_indirect_mbox(struct tg3 *tp, u32 off, u32 val)
422 {
423         unsigned long flags;
424
425         if (off == (MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW)) {
426                 pci_write_config_dword(tp->pdev, TG3PCI_RCV_RET_RING_CON_IDX +
427                                        TG3_64BIT_REG_LOW, val);
428                 return;
429         }
430         if (off == TG3_RX_STD_PROD_IDX_REG) {
431                 pci_write_config_dword(tp->pdev, TG3PCI_STD_RING_PROD_IDX +
432                                        TG3_64BIT_REG_LOW, val);
433                 return;
434         }
435
436         spin_lock_irqsave(&tp->indirect_lock, flags);
437         pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off + 0x5600);
438         pci_write_config_dword(tp->pdev, TG3PCI_REG_DATA, val);
439         spin_unlock_irqrestore(&tp->indirect_lock, flags);
440
441         /* In indirect mode when disabling interrupts, we also need
442          * to clear the interrupt bit in the GRC local ctrl register.
443          */
444         if ((off == (MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW)) &&
445             (val == 0x1)) {
446                 pci_write_config_dword(tp->pdev, TG3PCI_MISC_LOCAL_CTRL,
447                                        tp->grc_local_ctrl|GRC_LCLCTRL_CLEARINT);
448         }
449 }
450
451 static u32 tg3_read_indirect_mbox(struct tg3 *tp, u32 off)
452 {
453         unsigned long flags;
454         u32 val;
455
456         spin_lock_irqsave(&tp->indirect_lock, flags);
457         pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off + 0x5600);
458         pci_read_config_dword(tp->pdev, TG3PCI_REG_DATA, &val);
459         spin_unlock_irqrestore(&tp->indirect_lock, flags);
460         return val;
461 }
462
463 /* usec_wait specifies the wait time in usec when writing to certain registers
464  * where it is unsafe to read back the register without some delay.
465  * GRC_LOCAL_CTRL is one example if the GPIOs are toggled to switch power.
466  * TG3PCI_CLOCK_CTRL is another example if the clock frequencies are changed.
467  */
468 static void _tw32_flush(struct tg3 *tp, u32 off, u32 val, u32 usec_wait)
469 {
470         if ((tp->tg3_flags & TG3_FLAG_PCIX_TARGET_HWBUG) ||
471             (tp->tg3_flags2 & TG3_FLG2_ICH_WORKAROUND))
472                 /* Non-posted methods */
473                 tp->write32(tp, off, val);
474         else {
475                 /* Posted method */
476                 tg3_write32(tp, off, val);
477                 if (usec_wait)
478                         udelay(usec_wait);
479                 tp->read32(tp, off);
480         }
481         /* Wait again after the read for the posted method to guarantee that
482          * the wait time is met.
483          */
484         if (usec_wait)
485                 udelay(usec_wait);
486 }
487
488 static inline void tw32_mailbox_flush(struct tg3 *tp, u32 off, u32 val)
489 {
490         tp->write32_mbox(tp, off, val);
491         if (!(tp->tg3_flags & TG3_FLAG_MBOX_WRITE_REORDER) &&
492             !(tp->tg3_flags2 & TG3_FLG2_ICH_WORKAROUND))
493                 tp->read32_mbox(tp, off);
494 }
495
496 static void tg3_write32_tx_mbox(struct tg3 *tp, u32 off, u32 val)
497 {
498         void __iomem *mbox = tp->regs + off;
499         writel(val, mbox);
500         if (tp->tg3_flags & TG3_FLAG_TXD_MBOX_HWBUG)
501                 writel(val, mbox);
502         if (tp->tg3_flags & TG3_FLAG_MBOX_WRITE_REORDER)
503                 readl(mbox);
504 }
505
506 static u32 tg3_read32_mbox_5906(struct tg3 *tp, u32 off)
507 {
508         return readl(tp->regs + off + GRCMBOX_BASE);
509 }
510
511 static void tg3_write32_mbox_5906(struct tg3 *tp, u32 off, u32 val)
512 {
513         writel(val, tp->regs + off + GRCMBOX_BASE);
514 }
515
516 #define tw32_mailbox(reg, val)          tp->write32_mbox(tp, reg, val)
517 #define tw32_mailbox_f(reg, val)        tw32_mailbox_flush(tp, (reg), (val))
518 #define tw32_rx_mbox(reg, val)          tp->write32_rx_mbox(tp, reg, val)
519 #define tw32_tx_mbox(reg, val)          tp->write32_tx_mbox(tp, reg, val)
520 #define tr32_mailbox(reg)               tp->read32_mbox(tp, reg)
521
522 #define tw32(reg, val)                  tp->write32(tp, reg, val)
523 #define tw32_f(reg, val)                _tw32_flush(tp, (reg), (val), 0)
524 #define tw32_wait_f(reg, val, us)       _tw32_flush(tp, (reg), (val), (us))
525 #define tr32(reg)                       tp->read32(tp, reg)
526
527 static void tg3_write_mem(struct tg3 *tp, u32 off, u32 val)
528 {
529         unsigned long flags;
530
531         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) &&
532             (off >= NIC_SRAM_STATS_BLK) && (off < NIC_SRAM_TX_BUFFER_DESC))
533                 return;
534
535         spin_lock_irqsave(&tp->indirect_lock, flags);
536         if (tp->tg3_flags & TG3_FLAG_SRAM_USE_CONFIG) {
537                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off);
538                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
539
540                 /* Always leave this as zero. */
541                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
542         } else {
543                 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, off);
544                 tw32_f(TG3PCI_MEM_WIN_DATA, val);
545
546                 /* Always leave this as zero. */
547                 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, 0);
548         }
549         spin_unlock_irqrestore(&tp->indirect_lock, flags);
550 }
551
552 static void tg3_read_mem(struct tg3 *tp, u32 off, u32 *val)
553 {
554         unsigned long flags;
555
556         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) &&
557             (off >= NIC_SRAM_STATS_BLK) && (off < NIC_SRAM_TX_BUFFER_DESC)) {
558                 *val = 0;
559                 return;
560         }
561
562         spin_lock_irqsave(&tp->indirect_lock, flags);
563         if (tp->tg3_flags & TG3_FLAG_SRAM_USE_CONFIG) {
564                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off);
565                 pci_read_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
566
567                 /* Always leave this as zero. */
568                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
569         } else {
570                 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, off);
571                 *val = tr32(TG3PCI_MEM_WIN_DATA);
572
573                 /* Always leave this as zero. */
574                 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, 0);
575         }
576         spin_unlock_irqrestore(&tp->indirect_lock, flags);
577 }
578
579 static void tg3_ape_lock_init(struct tg3 *tp)
580 {
581         int i;
582         u32 regbase;
583
584         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
585                 regbase = TG3_APE_LOCK_GRANT;
586         else
587                 regbase = TG3_APE_PER_LOCK_GRANT;
588
589         /* Make sure the driver hasn't any stale locks. */
590         for (i = 0; i < 8; i++)
591                 tg3_ape_write32(tp, regbase + 4 * i, APE_LOCK_GRANT_DRIVER);
592 }
593
594 static int tg3_ape_lock(struct tg3 *tp, int locknum)
595 {
596         int i, off;
597         int ret = 0;
598         u32 status, req, gnt;
599
600         if (!(tp->tg3_flags3 & TG3_FLG3_ENABLE_APE))
601                 return 0;
602
603         switch (locknum) {
604         case TG3_APE_LOCK_GRC:
605         case TG3_APE_LOCK_MEM:
606                 break;
607         default:
608                 return -EINVAL;
609         }
610
611         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761) {
612                 req = TG3_APE_LOCK_REQ;
613                 gnt = TG3_APE_LOCK_GRANT;
614         } else {
615                 req = TG3_APE_PER_LOCK_REQ;
616                 gnt = TG3_APE_PER_LOCK_GRANT;
617         }
618
619         off = 4 * locknum;
620
621         tg3_ape_write32(tp, req + off, APE_LOCK_REQ_DRIVER);
622
623         /* Wait for up to 1 millisecond to acquire lock. */
624         for (i = 0; i < 100; i++) {
625                 status = tg3_ape_read32(tp, gnt + off);
626                 if (status == APE_LOCK_GRANT_DRIVER)
627                         break;
628                 udelay(10);
629         }
630
631         if (status != APE_LOCK_GRANT_DRIVER) {
632                 /* Revoke the lock request. */
633                 tg3_ape_write32(tp, gnt + off,
634                                 APE_LOCK_GRANT_DRIVER);
635
636                 ret = -EBUSY;
637         }
638
639         return ret;
640 }
641
642 static void tg3_ape_unlock(struct tg3 *tp, int locknum)
643 {
644         u32 gnt;
645
646         if (!(tp->tg3_flags3 & TG3_FLG3_ENABLE_APE))
647                 return;
648
649         switch (locknum) {
650         case TG3_APE_LOCK_GRC:
651         case TG3_APE_LOCK_MEM:
652                 break;
653         default:
654                 return;
655         }
656
657         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
658                 gnt = TG3_APE_LOCK_GRANT;
659         else
660                 gnt = TG3_APE_PER_LOCK_GRANT;
661
662         tg3_ape_write32(tp, gnt + 4 * locknum, APE_LOCK_GRANT_DRIVER);
663 }
664
665 static void tg3_disable_ints(struct tg3 *tp)
666 {
667         int i;
668
669         tw32(TG3PCI_MISC_HOST_CTRL,
670              (tp->misc_host_ctrl | MISC_HOST_CTRL_MASK_PCI_INT));
671         for (i = 0; i < tp->irq_max; i++)
672                 tw32_mailbox_f(tp->napi[i].int_mbox, 0x00000001);
673 }
674
675 static void tg3_enable_ints(struct tg3 *tp)
676 {
677         int i;
678
679         tp->irq_sync = 0;
680         wmb();
681
682         tw32(TG3PCI_MISC_HOST_CTRL,
683              (tp->misc_host_ctrl & ~MISC_HOST_CTRL_MASK_PCI_INT));
684
685         tp->coal_now = tp->coalesce_mode | HOSTCC_MODE_ENABLE;
686         for (i = 0; i < tp->irq_cnt; i++) {
687                 struct tg3_napi *tnapi = &tp->napi[i];
688
689                 tw32_mailbox_f(tnapi->int_mbox, tnapi->last_tag << 24);
690                 if (tp->tg3_flags2 & TG3_FLG2_1SHOT_MSI)
691                         tw32_mailbox_f(tnapi->int_mbox, tnapi->last_tag << 24);
692
693                 tp->coal_now |= tnapi->coal_now;
694         }
695
696         /* Force an initial interrupt */
697         if (!(tp->tg3_flags & TG3_FLAG_TAGGED_STATUS) &&
698             (tp->napi[0].hw_status->status & SD_STATUS_UPDATED))
699                 tw32(GRC_LOCAL_CTRL, tp->grc_local_ctrl | GRC_LCLCTRL_SETINT);
700         else
701                 tw32(HOSTCC_MODE, tp->coal_now);
702
703         tp->coal_now &= ~(tp->napi[0].coal_now | tp->napi[1].coal_now);
704 }
705
706 static inline unsigned int tg3_has_work(struct tg3_napi *tnapi)
707 {
708         struct tg3 *tp = tnapi->tp;
709         struct tg3_hw_status *sblk = tnapi->hw_status;
710         unsigned int work_exists = 0;
711
712         /* check for phy events */
713         if (!(tp->tg3_flags &
714               (TG3_FLAG_USE_LINKCHG_REG |
715                TG3_FLAG_POLL_SERDES))) {
716                 if (sblk->status & SD_STATUS_LINK_CHG)
717                         work_exists = 1;
718         }
719         /* check for RX/TX work to do */
720         if (sblk->idx[0].tx_consumer != tnapi->tx_cons ||
721             *(tnapi->rx_rcb_prod_idx) != tnapi->rx_rcb_ptr)
722                 work_exists = 1;
723
724         return work_exists;
725 }
726
727 /* tg3_int_reenable
728  *  similar to tg3_enable_ints, but it accurately determines whether there
729  *  is new work pending and can return without flushing the PIO write
730  *  which reenables interrupts
731  */
732 static void tg3_int_reenable(struct tg3_napi *tnapi)
733 {
734         struct tg3 *tp = tnapi->tp;
735
736         tw32_mailbox(tnapi->int_mbox, tnapi->last_tag << 24);
737         mmiowb();
738
739         /* When doing tagged status, this work check is unnecessary.
740          * The last_tag we write above tells the chip which piece of
741          * work we've completed.
742          */
743         if (!(tp->tg3_flags & TG3_FLAG_TAGGED_STATUS) &&
744             tg3_has_work(tnapi))
745                 tw32(HOSTCC_MODE, tp->coalesce_mode |
746                      HOSTCC_MODE_ENABLE | tnapi->coal_now);
747 }
748
749 static void tg3_switch_clocks(struct tg3 *tp)
750 {
751         u32 clock_ctrl;
752         u32 orig_clock_ctrl;
753
754         if ((tp->tg3_flags & TG3_FLAG_CPMU_PRESENT) ||
755             (tp->tg3_flags2 & TG3_FLG2_5780_CLASS))
756                 return;
757
758         clock_ctrl = tr32(TG3PCI_CLOCK_CTRL);
759
760         orig_clock_ctrl = clock_ctrl;
761         clock_ctrl &= (CLOCK_CTRL_FORCE_CLKRUN |
762                        CLOCK_CTRL_CLKRUN_OENABLE |
763                        0x1f);
764         tp->pci_clock_ctrl = clock_ctrl;
765
766         if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
767                 if (orig_clock_ctrl & CLOCK_CTRL_625_CORE) {
768                         tw32_wait_f(TG3PCI_CLOCK_CTRL,
769                                     clock_ctrl | CLOCK_CTRL_625_CORE, 40);
770                 }
771         } else if ((orig_clock_ctrl & CLOCK_CTRL_44MHZ_CORE) != 0) {
772                 tw32_wait_f(TG3PCI_CLOCK_CTRL,
773                             clock_ctrl |
774                             (CLOCK_CTRL_44MHZ_CORE | CLOCK_CTRL_ALTCLK),
775                             40);
776                 tw32_wait_f(TG3PCI_CLOCK_CTRL,
777                             clock_ctrl | (CLOCK_CTRL_ALTCLK),
778                             40);
779         }
780         tw32_wait_f(TG3PCI_CLOCK_CTRL, clock_ctrl, 40);
781 }
782
783 #define PHY_BUSY_LOOPS  5000
784
785 static int tg3_readphy(struct tg3 *tp, int reg, u32 *val)
786 {
787         u32 frame_val;
788         unsigned int loops;
789         int ret;
790
791         if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
792                 tw32_f(MAC_MI_MODE,
793                      (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
794                 udelay(80);
795         }
796
797         *val = 0x0;
798
799         frame_val  = ((tp->phy_addr << MI_COM_PHY_ADDR_SHIFT) &
800                       MI_COM_PHY_ADDR_MASK);
801         frame_val |= ((reg << MI_COM_REG_ADDR_SHIFT) &
802                       MI_COM_REG_ADDR_MASK);
803         frame_val |= (MI_COM_CMD_READ | MI_COM_START);
804
805         tw32_f(MAC_MI_COM, frame_val);
806
807         loops = PHY_BUSY_LOOPS;
808         while (loops != 0) {
809                 udelay(10);
810                 frame_val = tr32(MAC_MI_COM);
811
812                 if ((frame_val & MI_COM_BUSY) == 0) {
813                         udelay(5);
814                         frame_val = tr32(MAC_MI_COM);
815                         break;
816                 }
817                 loops -= 1;
818         }
819
820         ret = -EBUSY;
821         if (loops != 0) {
822                 *val = frame_val & MI_COM_DATA_MASK;
823                 ret = 0;
824         }
825
826         if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
827                 tw32_f(MAC_MI_MODE, tp->mi_mode);
828                 udelay(80);
829         }
830
831         return ret;
832 }
833
834 static int tg3_writephy(struct tg3 *tp, int reg, u32 val)
835 {
836         u32 frame_val;
837         unsigned int loops;
838         int ret;
839
840         if ((tp->phy_flags & TG3_PHYFLG_IS_FET) &&
841             (reg == MII_TG3_CTRL || reg == MII_TG3_AUX_CTRL))
842                 return 0;
843
844         if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
845                 tw32_f(MAC_MI_MODE,
846                      (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
847                 udelay(80);
848         }
849
850         frame_val  = ((tp->phy_addr << MI_COM_PHY_ADDR_SHIFT) &
851                       MI_COM_PHY_ADDR_MASK);
852         frame_val |= ((reg << MI_COM_REG_ADDR_SHIFT) &
853                       MI_COM_REG_ADDR_MASK);
854         frame_val |= (val & MI_COM_DATA_MASK);
855         frame_val |= (MI_COM_CMD_WRITE | MI_COM_START);
856
857         tw32_f(MAC_MI_COM, frame_val);
858
859         loops = PHY_BUSY_LOOPS;
860         while (loops != 0) {
861                 udelay(10);
862                 frame_val = tr32(MAC_MI_COM);
863                 if ((frame_val & MI_COM_BUSY) == 0) {
864                         udelay(5);
865                         frame_val = tr32(MAC_MI_COM);
866                         break;
867                 }
868                 loops -= 1;
869         }
870
871         ret = -EBUSY;
872         if (loops != 0)
873                 ret = 0;
874
875         if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
876                 tw32_f(MAC_MI_MODE, tp->mi_mode);
877                 udelay(80);
878         }
879
880         return ret;
881 }
882
883 static int tg3_bmcr_reset(struct tg3 *tp)
884 {
885         u32 phy_control;
886         int limit, err;
887
888         /* OK, reset it, and poll the BMCR_RESET bit until it
889          * clears or we time out.
890          */
891         phy_control = BMCR_RESET;
892         err = tg3_writephy(tp, MII_BMCR, phy_control);
893         if (err != 0)
894                 return -EBUSY;
895
896         limit = 5000;
897         while (limit--) {
898                 err = tg3_readphy(tp, MII_BMCR, &phy_control);
899                 if (err != 0)
900                         return -EBUSY;
901
902                 if ((phy_control & BMCR_RESET) == 0) {
903                         udelay(40);
904                         break;
905                 }
906                 udelay(10);
907         }
908         if (limit < 0)
909                 return -EBUSY;
910
911         return 0;
912 }
913
914 static int tg3_mdio_read(struct mii_bus *bp, int mii_id, int reg)
915 {
916         struct tg3 *tp = bp->priv;
917         u32 val;
918
919         spin_lock_bh(&tp->lock);
920
921         if (tg3_readphy(tp, reg, &val))
922                 val = -EIO;
923
924         spin_unlock_bh(&tp->lock);
925
926         return val;
927 }
928
929 static int tg3_mdio_write(struct mii_bus *bp, int mii_id, int reg, u16 val)
930 {
931         struct tg3 *tp = bp->priv;
932         u32 ret = 0;
933
934         spin_lock_bh(&tp->lock);
935
936         if (tg3_writephy(tp, reg, val))
937                 ret = -EIO;
938
939         spin_unlock_bh(&tp->lock);
940
941         return ret;
942 }
943
944 static int tg3_mdio_reset(struct mii_bus *bp)
945 {
946         return 0;
947 }
948
949 static void tg3_mdio_config_5785(struct tg3 *tp)
950 {
951         u32 val;
952         struct phy_device *phydev;
953
954         phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
955         switch (phydev->drv->phy_id & phydev->drv->phy_id_mask) {
956         case PHY_ID_BCM50610:
957         case PHY_ID_BCM50610M:
958                 val = MAC_PHYCFG2_50610_LED_MODES;
959                 break;
960         case PHY_ID_BCMAC131:
961                 val = MAC_PHYCFG2_AC131_LED_MODES;
962                 break;
963         case PHY_ID_RTL8211C:
964                 val = MAC_PHYCFG2_RTL8211C_LED_MODES;
965                 break;
966         case PHY_ID_RTL8201E:
967                 val = MAC_PHYCFG2_RTL8201E_LED_MODES;
968                 break;
969         default:
970                 return;
971         }
972
973         if (phydev->interface != PHY_INTERFACE_MODE_RGMII) {
974                 tw32(MAC_PHYCFG2, val);
975
976                 val = tr32(MAC_PHYCFG1);
977                 val &= ~(MAC_PHYCFG1_RGMII_INT |
978                          MAC_PHYCFG1_RXCLK_TO_MASK | MAC_PHYCFG1_TXCLK_TO_MASK);
979                 val |= MAC_PHYCFG1_RXCLK_TIMEOUT | MAC_PHYCFG1_TXCLK_TIMEOUT;
980                 tw32(MAC_PHYCFG1, val);
981
982                 return;
983         }
984
985         if (!(tp->tg3_flags3 & TG3_FLG3_RGMII_INBAND_DISABLE))
986                 val |= MAC_PHYCFG2_EMODE_MASK_MASK |
987                        MAC_PHYCFG2_FMODE_MASK_MASK |
988                        MAC_PHYCFG2_GMODE_MASK_MASK |
989                        MAC_PHYCFG2_ACT_MASK_MASK   |
990                        MAC_PHYCFG2_QUAL_MASK_MASK |
991                        MAC_PHYCFG2_INBAND_ENABLE;
992
993         tw32(MAC_PHYCFG2, val);
994
995         val = tr32(MAC_PHYCFG1);
996         val &= ~(MAC_PHYCFG1_RXCLK_TO_MASK | MAC_PHYCFG1_TXCLK_TO_MASK |
997                  MAC_PHYCFG1_RGMII_EXT_RX_DEC | MAC_PHYCFG1_RGMII_SND_STAT_EN);
998         if (!(tp->tg3_flags3 & TG3_FLG3_RGMII_INBAND_DISABLE)) {
999                 if (tp->tg3_flags3 & TG3_FLG3_RGMII_EXT_IBND_RX_EN)
1000                         val |= MAC_PHYCFG1_RGMII_EXT_RX_DEC;
1001                 if (tp->tg3_flags3 & TG3_FLG3_RGMII_EXT_IBND_TX_EN)
1002                         val |= MAC_PHYCFG1_RGMII_SND_STAT_EN;
1003         }
1004         val |= MAC_PHYCFG1_RXCLK_TIMEOUT | MAC_PHYCFG1_TXCLK_TIMEOUT |
1005                MAC_PHYCFG1_RGMII_INT | MAC_PHYCFG1_TXC_DRV;
1006         tw32(MAC_PHYCFG1, val);
1007
1008         val = tr32(MAC_EXT_RGMII_MODE);
1009         val &= ~(MAC_RGMII_MODE_RX_INT_B |
1010                  MAC_RGMII_MODE_RX_QUALITY |
1011                  MAC_RGMII_MODE_RX_ACTIVITY |
1012                  MAC_RGMII_MODE_RX_ENG_DET |
1013                  MAC_RGMII_MODE_TX_ENABLE |
1014                  MAC_RGMII_MODE_TX_LOWPWR |
1015                  MAC_RGMII_MODE_TX_RESET);
1016         if (!(tp->tg3_flags3 & TG3_FLG3_RGMII_INBAND_DISABLE)) {
1017                 if (tp->tg3_flags3 & TG3_FLG3_RGMII_EXT_IBND_RX_EN)
1018                         val |= MAC_RGMII_MODE_RX_INT_B |
1019                                MAC_RGMII_MODE_RX_QUALITY |
1020                                MAC_RGMII_MODE_RX_ACTIVITY |
1021                                MAC_RGMII_MODE_RX_ENG_DET;
1022                 if (tp->tg3_flags3 & TG3_FLG3_RGMII_EXT_IBND_TX_EN)
1023                         val |= MAC_RGMII_MODE_TX_ENABLE |
1024                                MAC_RGMII_MODE_TX_LOWPWR |
1025                                MAC_RGMII_MODE_TX_RESET;
1026         }
1027         tw32(MAC_EXT_RGMII_MODE, val);
1028 }
1029
1030 static void tg3_mdio_start(struct tg3 *tp)
1031 {
1032         tp->mi_mode &= ~MAC_MI_MODE_AUTO_POLL;
1033         tw32_f(MAC_MI_MODE, tp->mi_mode);
1034         udelay(80);
1035
1036         if ((tp->tg3_flags3 & TG3_FLG3_MDIOBUS_INITED) &&
1037             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
1038                 tg3_mdio_config_5785(tp);
1039 }
1040
1041 static int tg3_mdio_init(struct tg3 *tp)
1042 {
1043         int i;
1044         u32 reg;
1045         struct phy_device *phydev;
1046
1047         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
1048             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719) {
1049                 u32 is_serdes;
1050
1051                 tp->phy_addr = PCI_FUNC(tp->pdev->devfn) + 1;
1052
1053                 if (tp->pci_chip_rev_id != CHIPREV_ID_5717_A0)
1054                         is_serdes = tr32(SG_DIG_STATUS) & SG_DIG_IS_SERDES;
1055                 else
1056                         is_serdes = tr32(TG3_CPMU_PHY_STRAP) &
1057                                     TG3_CPMU_PHY_STRAP_IS_SERDES;
1058                 if (is_serdes)
1059                         tp->phy_addr += 7;
1060         } else
1061                 tp->phy_addr = TG3_PHY_MII_ADDR;
1062
1063         tg3_mdio_start(tp);
1064
1065         if (!(tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB) ||
1066             (tp->tg3_flags3 & TG3_FLG3_MDIOBUS_INITED))
1067                 return 0;
1068
1069         tp->mdio_bus = mdiobus_alloc();
1070         if (tp->mdio_bus == NULL)
1071                 return -ENOMEM;
1072
1073         tp->mdio_bus->name     = "tg3 mdio bus";
1074         snprintf(tp->mdio_bus->id, MII_BUS_ID_SIZE, "%x",
1075                  (tp->pdev->bus->number << 8) | tp->pdev->devfn);
1076         tp->mdio_bus->priv     = tp;
1077         tp->mdio_bus->parent   = &tp->pdev->dev;
1078         tp->mdio_bus->read     = &tg3_mdio_read;
1079         tp->mdio_bus->write    = &tg3_mdio_write;
1080         tp->mdio_bus->reset    = &tg3_mdio_reset;
1081         tp->mdio_bus->phy_mask = ~(1 << TG3_PHY_MII_ADDR);
1082         tp->mdio_bus->irq      = &tp->mdio_irq[0];
1083
1084         for (i = 0; i < PHY_MAX_ADDR; i++)
1085                 tp->mdio_bus->irq[i] = PHY_POLL;
1086
1087         /* The bus registration will look for all the PHYs on the mdio bus.
1088          * Unfortunately, it does not ensure the PHY is powered up before
1089          * accessing the PHY ID registers.  A chip reset is the
1090          * quickest way to bring the device back to an operational state..
1091          */
1092         if (tg3_readphy(tp, MII_BMCR, &reg) || (reg & BMCR_PDOWN))
1093                 tg3_bmcr_reset(tp);
1094
1095         i = mdiobus_register(tp->mdio_bus);
1096         if (i) {
1097                 dev_warn(&tp->pdev->dev, "mdiobus_reg failed (0x%x)\n", i);
1098                 mdiobus_free(tp->mdio_bus);
1099                 return i;
1100         }
1101
1102         phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
1103
1104         if (!phydev || !phydev->drv) {
1105                 dev_warn(&tp->pdev->dev, "No PHY devices\n");
1106                 mdiobus_unregister(tp->mdio_bus);
1107                 mdiobus_free(tp->mdio_bus);
1108                 return -ENODEV;
1109         }
1110
1111         switch (phydev->drv->phy_id & phydev->drv->phy_id_mask) {
1112         case PHY_ID_BCM57780:
1113                 phydev->interface = PHY_INTERFACE_MODE_GMII;
1114                 phydev->dev_flags |= PHY_BRCM_AUTO_PWRDWN_ENABLE;
1115                 break;
1116         case PHY_ID_BCM50610:
1117         case PHY_ID_BCM50610M:
1118                 phydev->dev_flags |= PHY_BRCM_CLEAR_RGMII_MODE |
1119                                      PHY_BRCM_RX_REFCLK_UNUSED |
1120                                      PHY_BRCM_DIS_TXCRXC_NOENRGY |
1121                                      PHY_BRCM_AUTO_PWRDWN_ENABLE;
1122                 if (tp->tg3_flags3 & TG3_FLG3_RGMII_INBAND_DISABLE)
1123                         phydev->dev_flags |= PHY_BRCM_STD_IBND_DISABLE;
1124                 if (tp->tg3_flags3 & TG3_FLG3_RGMII_EXT_IBND_RX_EN)
1125                         phydev->dev_flags |= PHY_BRCM_EXT_IBND_RX_ENABLE;
1126                 if (tp->tg3_flags3 & TG3_FLG3_RGMII_EXT_IBND_TX_EN)
1127                         phydev->dev_flags |= PHY_BRCM_EXT_IBND_TX_ENABLE;
1128                 /* fallthru */
1129         case PHY_ID_RTL8211C:
1130                 phydev->interface = PHY_INTERFACE_MODE_RGMII;
1131                 break;
1132         case PHY_ID_RTL8201E:
1133         case PHY_ID_BCMAC131:
1134                 phydev->interface = PHY_INTERFACE_MODE_MII;
1135                 phydev->dev_flags |= PHY_BRCM_AUTO_PWRDWN_ENABLE;
1136                 tp->phy_flags |= TG3_PHYFLG_IS_FET;
1137                 break;
1138         }
1139
1140         tp->tg3_flags3 |= TG3_FLG3_MDIOBUS_INITED;
1141
1142         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
1143                 tg3_mdio_config_5785(tp);
1144
1145         return 0;
1146 }
1147
1148 static void tg3_mdio_fini(struct tg3 *tp)
1149 {
1150         if (tp->tg3_flags3 & TG3_FLG3_MDIOBUS_INITED) {
1151                 tp->tg3_flags3 &= ~TG3_FLG3_MDIOBUS_INITED;
1152                 mdiobus_unregister(tp->mdio_bus);
1153                 mdiobus_free(tp->mdio_bus);
1154         }
1155 }
1156
1157 static int tg3_phy_cl45_write(struct tg3 *tp, u32 devad, u32 addr, u32 val)
1158 {
1159         int err;
1160
1161         err = tg3_writephy(tp, MII_TG3_MMD_CTRL, devad);
1162         if (err)
1163                 goto done;
1164
1165         err = tg3_writephy(tp, MII_TG3_MMD_ADDRESS, addr);
1166         if (err)
1167                 goto done;
1168
1169         err = tg3_writephy(tp, MII_TG3_MMD_CTRL,
1170                            MII_TG3_MMD_CTRL_DATA_NOINC | devad);
1171         if (err)
1172                 goto done;
1173
1174         err = tg3_writephy(tp, MII_TG3_MMD_ADDRESS, val);
1175
1176 done:
1177         return err;
1178 }
1179
1180 static int tg3_phy_cl45_read(struct tg3 *tp, u32 devad, u32 addr, u32 *val)
1181 {
1182         int err;
1183
1184         err = tg3_writephy(tp, MII_TG3_MMD_CTRL, devad);
1185         if (err)
1186                 goto done;
1187
1188         err = tg3_writephy(tp, MII_TG3_MMD_ADDRESS, addr);
1189         if (err)
1190                 goto done;
1191
1192         err = tg3_writephy(tp, MII_TG3_MMD_CTRL,
1193                            MII_TG3_MMD_CTRL_DATA_NOINC | devad);
1194         if (err)
1195                 goto done;
1196
1197         err = tg3_readphy(tp, MII_TG3_MMD_ADDRESS, val);
1198
1199 done:
1200         return err;
1201 }
1202
1203 /* tp->lock is held. */
1204 static inline void tg3_generate_fw_event(struct tg3 *tp)
1205 {
1206         u32 val;
1207
1208         val = tr32(GRC_RX_CPU_EVENT);
1209         val |= GRC_RX_CPU_DRIVER_EVENT;
1210         tw32_f(GRC_RX_CPU_EVENT, val);
1211
1212         tp->last_event_jiffies = jiffies;
1213 }
1214
1215 #define TG3_FW_EVENT_TIMEOUT_USEC 2500
1216
1217 /* tp->lock is held. */
1218 static void tg3_wait_for_event_ack(struct tg3 *tp)
1219 {
1220         int i;
1221         unsigned int delay_cnt;
1222         long time_remain;
1223
1224         /* If enough time has passed, no wait is necessary. */
1225         time_remain = (long)(tp->last_event_jiffies + 1 +
1226                       usecs_to_jiffies(TG3_FW_EVENT_TIMEOUT_USEC)) -
1227                       (long)jiffies;
1228         if (time_remain < 0)
1229                 return;
1230
1231         /* Check if we can shorten the wait time. */
1232         delay_cnt = jiffies_to_usecs(time_remain);
1233         if (delay_cnt > TG3_FW_EVENT_TIMEOUT_USEC)
1234                 delay_cnt = TG3_FW_EVENT_TIMEOUT_USEC;
1235         delay_cnt = (delay_cnt >> 3) + 1;
1236
1237         for (i = 0; i < delay_cnt; i++) {
1238                 if (!(tr32(GRC_RX_CPU_EVENT) & GRC_RX_CPU_DRIVER_EVENT))
1239                         break;
1240                 udelay(8);
1241         }
1242 }
1243
1244 /* tp->lock is held. */
1245 static void tg3_ump_link_report(struct tg3 *tp)
1246 {
1247         u32 reg;
1248         u32 val;
1249
1250         if (!(tp->tg3_flags2 & TG3_FLG2_5780_CLASS) ||
1251             !(tp->tg3_flags  & TG3_FLAG_ENABLE_ASF))
1252                 return;
1253
1254         tg3_wait_for_event_ack(tp);
1255
1256         tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX, FWCMD_NICDRV_LINK_UPDATE);
1257
1258         tg3_write_mem(tp, NIC_SRAM_FW_CMD_LEN_MBOX, 14);
1259
1260         val = 0;
1261         if (!tg3_readphy(tp, MII_BMCR, &reg))
1262                 val = reg << 16;
1263         if (!tg3_readphy(tp, MII_BMSR, &reg))
1264                 val |= (reg & 0xffff);
1265         tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX, val);
1266
1267         val = 0;
1268         if (!tg3_readphy(tp, MII_ADVERTISE, &reg))
1269                 val = reg << 16;
1270         if (!tg3_readphy(tp, MII_LPA, &reg))
1271                 val |= (reg & 0xffff);
1272         tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 4, val);
1273
1274         val = 0;
1275         if (!(tp->phy_flags & TG3_PHYFLG_MII_SERDES)) {
1276                 if (!tg3_readphy(tp, MII_CTRL1000, &reg))
1277                         val = reg << 16;
1278                 if (!tg3_readphy(tp, MII_STAT1000, &reg))
1279                         val |= (reg & 0xffff);
1280         }
1281         tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 8, val);
1282
1283         if (!tg3_readphy(tp, MII_PHYADDR, &reg))
1284                 val = reg << 16;
1285         else
1286                 val = 0;
1287         tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 12, val);
1288
1289         tg3_generate_fw_event(tp);
1290 }
1291
1292 static void tg3_link_report(struct tg3 *tp)
1293 {
1294         if (!netif_carrier_ok(tp->dev)) {
1295                 netif_info(tp, link, tp->dev, "Link is down\n");
1296                 tg3_ump_link_report(tp);
1297         } else if (netif_msg_link(tp)) {
1298                 netdev_info(tp->dev, "Link is up at %d Mbps, %s duplex\n",
1299                             (tp->link_config.active_speed == SPEED_1000 ?
1300                              1000 :
1301                              (tp->link_config.active_speed == SPEED_100 ?
1302                               100 : 10)),
1303                             (tp->link_config.active_duplex == DUPLEX_FULL ?
1304                              "full" : "half"));
1305
1306                 netdev_info(tp->dev, "Flow control is %s for TX and %s for RX\n",
1307                             (tp->link_config.active_flowctrl & FLOW_CTRL_TX) ?
1308                             "on" : "off",
1309                             (tp->link_config.active_flowctrl & FLOW_CTRL_RX) ?
1310                             "on" : "off");
1311                 tg3_ump_link_report(tp);
1312         }
1313 }
1314
1315 static u16 tg3_advert_flowctrl_1000T(u8 flow_ctrl)
1316 {
1317         u16 miireg;
1318
1319         if ((flow_ctrl & FLOW_CTRL_TX) && (flow_ctrl & FLOW_CTRL_RX))
1320                 miireg = ADVERTISE_PAUSE_CAP;
1321         else if (flow_ctrl & FLOW_CTRL_TX)
1322                 miireg = ADVERTISE_PAUSE_ASYM;
1323         else if (flow_ctrl & FLOW_CTRL_RX)
1324                 miireg = ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM;
1325         else
1326                 miireg = 0;
1327
1328         return miireg;
1329 }
1330
1331 static u16 tg3_advert_flowctrl_1000X(u8 flow_ctrl)
1332 {
1333         u16 miireg;
1334
1335         if ((flow_ctrl & FLOW_CTRL_TX) && (flow_ctrl & FLOW_CTRL_RX))
1336                 miireg = ADVERTISE_1000XPAUSE;
1337         else if (flow_ctrl & FLOW_CTRL_TX)
1338                 miireg = ADVERTISE_1000XPSE_ASYM;
1339         else if (flow_ctrl & FLOW_CTRL_RX)
1340                 miireg = ADVERTISE_1000XPAUSE | ADVERTISE_1000XPSE_ASYM;
1341         else
1342                 miireg = 0;
1343
1344         return miireg;
1345 }
1346
1347 static u8 tg3_resolve_flowctrl_1000X(u16 lcladv, u16 rmtadv)
1348 {
1349         u8 cap = 0;
1350
1351         if (lcladv & ADVERTISE_1000XPAUSE) {
1352                 if (lcladv & ADVERTISE_1000XPSE_ASYM) {
1353                         if (rmtadv & LPA_1000XPAUSE)
1354                                 cap = FLOW_CTRL_TX | FLOW_CTRL_RX;
1355                         else if (rmtadv & LPA_1000XPAUSE_ASYM)
1356                                 cap = FLOW_CTRL_RX;
1357                 } else {
1358                         if (rmtadv & LPA_1000XPAUSE)
1359                                 cap = FLOW_CTRL_TX | FLOW_CTRL_RX;
1360                 }
1361         } else if (lcladv & ADVERTISE_1000XPSE_ASYM) {
1362                 if ((rmtadv & LPA_1000XPAUSE) && (rmtadv & LPA_1000XPAUSE_ASYM))
1363                         cap = FLOW_CTRL_TX;
1364         }
1365
1366         return cap;
1367 }
1368
1369 static void tg3_setup_flow_control(struct tg3 *tp, u32 lcladv, u32 rmtadv)
1370 {
1371         u8 autoneg;
1372         u8 flowctrl = 0;
1373         u32 old_rx_mode = tp->rx_mode;
1374         u32 old_tx_mode = tp->tx_mode;
1375
1376         if (tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB)
1377                 autoneg = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]->autoneg;
1378         else
1379                 autoneg = tp->link_config.autoneg;
1380
1381         if (autoneg == AUTONEG_ENABLE &&
1382             (tp->tg3_flags & TG3_FLAG_PAUSE_AUTONEG)) {
1383                 if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)
1384                         flowctrl = tg3_resolve_flowctrl_1000X(lcladv, rmtadv);
1385                 else
1386                         flowctrl = mii_resolve_flowctrl_fdx(lcladv, rmtadv);
1387         } else
1388                 flowctrl = tp->link_config.flowctrl;
1389
1390         tp->link_config.active_flowctrl = flowctrl;
1391
1392         if (flowctrl & FLOW_CTRL_RX)
1393                 tp->rx_mode |= RX_MODE_FLOW_CTRL_ENABLE;
1394         else
1395                 tp->rx_mode &= ~RX_MODE_FLOW_CTRL_ENABLE;
1396
1397         if (old_rx_mode != tp->rx_mode)
1398                 tw32_f(MAC_RX_MODE, tp->rx_mode);
1399
1400         if (flowctrl & FLOW_CTRL_TX)
1401                 tp->tx_mode |= TX_MODE_FLOW_CTRL_ENABLE;
1402         else
1403                 tp->tx_mode &= ~TX_MODE_FLOW_CTRL_ENABLE;
1404
1405         if (old_tx_mode != tp->tx_mode)
1406                 tw32_f(MAC_TX_MODE, tp->tx_mode);
1407 }
1408
1409 static void tg3_adjust_link(struct net_device *dev)
1410 {
1411         u8 oldflowctrl, linkmesg = 0;
1412         u32 mac_mode, lcl_adv, rmt_adv;
1413         struct tg3 *tp = netdev_priv(dev);
1414         struct phy_device *phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
1415
1416         spin_lock_bh(&tp->lock);
1417
1418         mac_mode = tp->mac_mode & ~(MAC_MODE_PORT_MODE_MASK |
1419                                     MAC_MODE_HALF_DUPLEX);
1420
1421         oldflowctrl = tp->link_config.active_flowctrl;
1422
1423         if (phydev->link) {
1424                 lcl_adv = 0;
1425                 rmt_adv = 0;
1426
1427                 if (phydev->speed == SPEED_100 || phydev->speed == SPEED_10)
1428                         mac_mode |= MAC_MODE_PORT_MODE_MII;
1429                 else if (phydev->speed == SPEED_1000 ||
1430                          GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785)
1431                         mac_mode |= MAC_MODE_PORT_MODE_GMII;
1432                 else
1433                         mac_mode |= MAC_MODE_PORT_MODE_MII;
1434
1435                 if (phydev->duplex == DUPLEX_HALF)
1436                         mac_mode |= MAC_MODE_HALF_DUPLEX;
1437                 else {
1438                         lcl_adv = tg3_advert_flowctrl_1000T(
1439                                   tp->link_config.flowctrl);
1440
1441                         if (phydev->pause)
1442                                 rmt_adv = LPA_PAUSE_CAP;
1443                         if (phydev->asym_pause)
1444                                 rmt_adv |= LPA_PAUSE_ASYM;
1445                 }
1446
1447                 tg3_setup_flow_control(tp, lcl_adv, rmt_adv);
1448         } else
1449                 mac_mode |= MAC_MODE_PORT_MODE_GMII;
1450
1451         if (mac_mode != tp->mac_mode) {
1452                 tp->mac_mode = mac_mode;
1453                 tw32_f(MAC_MODE, tp->mac_mode);
1454                 udelay(40);
1455         }
1456
1457         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785) {
1458                 if (phydev->speed == SPEED_10)
1459                         tw32(MAC_MI_STAT,
1460                              MAC_MI_STAT_10MBPS_MODE |
1461                              MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
1462                 else
1463                         tw32(MAC_MI_STAT, MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
1464         }
1465
1466         if (phydev->speed == SPEED_1000 && phydev->duplex == DUPLEX_HALF)
1467                 tw32(MAC_TX_LENGTHS,
1468                      ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
1469                       (6 << TX_LENGTHS_IPG_SHIFT) |
1470                       (0xff << TX_LENGTHS_SLOT_TIME_SHIFT)));
1471         else
1472                 tw32(MAC_TX_LENGTHS,
1473                      ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
1474                       (6 << TX_LENGTHS_IPG_SHIFT) |
1475                       (32 << TX_LENGTHS_SLOT_TIME_SHIFT)));
1476
1477         if ((phydev->link && tp->link_config.active_speed == SPEED_INVALID) ||
1478             (!phydev->link && tp->link_config.active_speed != SPEED_INVALID) ||
1479             phydev->speed != tp->link_config.active_speed ||
1480             phydev->duplex != tp->link_config.active_duplex ||
1481             oldflowctrl != tp->link_config.active_flowctrl)
1482                 linkmesg = 1;
1483
1484         tp->link_config.active_speed = phydev->speed;
1485         tp->link_config.active_duplex = phydev->duplex;
1486
1487         spin_unlock_bh(&tp->lock);
1488
1489         if (linkmesg)
1490                 tg3_link_report(tp);
1491 }
1492
1493 static int tg3_phy_init(struct tg3 *tp)
1494 {
1495         struct phy_device *phydev;
1496
1497         if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED)
1498                 return 0;
1499
1500         /* Bring the PHY back to a known state. */
1501         tg3_bmcr_reset(tp);
1502
1503         phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
1504
1505         /* Attach the MAC to the PHY. */
1506         phydev = phy_connect(tp->dev, dev_name(&phydev->dev), tg3_adjust_link,
1507                              phydev->dev_flags, phydev->interface);
1508         if (IS_ERR(phydev)) {
1509                 dev_err(&tp->pdev->dev, "Could not attach to PHY\n");
1510                 return PTR_ERR(phydev);
1511         }
1512
1513         /* Mask with MAC supported features. */
1514         switch (phydev->interface) {
1515         case PHY_INTERFACE_MODE_GMII:
1516         case PHY_INTERFACE_MODE_RGMII:
1517                 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
1518                         phydev->supported &= (PHY_GBIT_FEATURES |
1519                                               SUPPORTED_Pause |
1520                                               SUPPORTED_Asym_Pause);
1521                         break;
1522                 }
1523                 /* fallthru */
1524         case PHY_INTERFACE_MODE_MII:
1525                 phydev->supported &= (PHY_BASIC_FEATURES |
1526                                       SUPPORTED_Pause |
1527                                       SUPPORTED_Asym_Pause);
1528                 break;
1529         default:
1530                 phy_disconnect(tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]);
1531                 return -EINVAL;
1532         }
1533
1534         tp->phy_flags |= TG3_PHYFLG_IS_CONNECTED;
1535
1536         phydev->advertising = phydev->supported;
1537
1538         return 0;
1539 }
1540
1541 static void tg3_phy_start(struct tg3 *tp)
1542 {
1543         struct phy_device *phydev;
1544
1545         if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
1546                 return;
1547
1548         phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
1549
1550         if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) {
1551                 tp->phy_flags &= ~TG3_PHYFLG_IS_LOW_POWER;
1552                 phydev->speed = tp->link_config.orig_speed;
1553                 phydev->duplex = tp->link_config.orig_duplex;
1554                 phydev->autoneg = tp->link_config.orig_autoneg;
1555                 phydev->advertising = tp->link_config.orig_advertising;
1556         }
1557
1558         phy_start(phydev);
1559
1560         phy_start_aneg(phydev);
1561 }
1562
1563 static void tg3_phy_stop(struct tg3 *tp)
1564 {
1565         if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
1566                 return;
1567
1568         phy_stop(tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]);
1569 }
1570
1571 static void tg3_phy_fini(struct tg3 *tp)
1572 {
1573         if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) {
1574                 phy_disconnect(tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]);
1575                 tp->phy_flags &= ~TG3_PHYFLG_IS_CONNECTED;
1576         }
1577 }
1578
1579 static int tg3_phydsp_read(struct tg3 *tp, u32 reg, u32 *val)
1580 {
1581         int err;
1582
1583         err = tg3_writephy(tp, MII_TG3_DSP_ADDRESS, reg);
1584         if (!err)
1585                 err = tg3_readphy(tp, MII_TG3_DSP_RW_PORT, val);
1586
1587         return err;
1588 }
1589
1590 static int tg3_phydsp_write(struct tg3 *tp, u32 reg, u32 val)
1591 {
1592         int err;
1593
1594         err = tg3_writephy(tp, MII_TG3_DSP_ADDRESS, reg);
1595         if (!err)
1596                 err = tg3_writephy(tp, MII_TG3_DSP_RW_PORT, val);
1597
1598         return err;
1599 }
1600
1601 static void tg3_phy_fet_toggle_apd(struct tg3 *tp, bool enable)
1602 {
1603         u32 phytest;
1604
1605         if (!tg3_readphy(tp, MII_TG3_FET_TEST, &phytest)) {
1606                 u32 phy;
1607
1608                 tg3_writephy(tp, MII_TG3_FET_TEST,
1609                              phytest | MII_TG3_FET_SHADOW_EN);
1610                 if (!tg3_readphy(tp, MII_TG3_FET_SHDW_AUXSTAT2, &phy)) {
1611                         if (enable)
1612                                 phy |= MII_TG3_FET_SHDW_AUXSTAT2_APD;
1613                         else
1614                                 phy &= ~MII_TG3_FET_SHDW_AUXSTAT2_APD;
1615                         tg3_writephy(tp, MII_TG3_FET_SHDW_AUXSTAT2, phy);
1616                 }
1617                 tg3_writephy(tp, MII_TG3_FET_TEST, phytest);
1618         }
1619 }
1620
1621 static void tg3_phy_toggle_apd(struct tg3 *tp, bool enable)
1622 {
1623         u32 reg;
1624
1625         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS) ||
1626             ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
1627               GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719) &&
1628              (tp->phy_flags & TG3_PHYFLG_MII_SERDES)))
1629                 return;
1630
1631         if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
1632                 tg3_phy_fet_toggle_apd(tp, enable);
1633                 return;
1634         }
1635
1636         reg = MII_TG3_MISC_SHDW_WREN |
1637               MII_TG3_MISC_SHDW_SCR5_SEL |
1638               MII_TG3_MISC_SHDW_SCR5_LPED |
1639               MII_TG3_MISC_SHDW_SCR5_DLPTLM |
1640               MII_TG3_MISC_SHDW_SCR5_SDTL |
1641               MII_TG3_MISC_SHDW_SCR5_C125OE;
1642         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5784 || !enable)
1643                 reg |= MII_TG3_MISC_SHDW_SCR5_DLLAPD;
1644
1645         tg3_writephy(tp, MII_TG3_MISC_SHDW, reg);
1646
1647
1648         reg = MII_TG3_MISC_SHDW_WREN |
1649               MII_TG3_MISC_SHDW_APD_SEL |
1650               MII_TG3_MISC_SHDW_APD_WKTM_84MS;
1651         if (enable)
1652                 reg |= MII_TG3_MISC_SHDW_APD_ENABLE;
1653
1654         tg3_writephy(tp, MII_TG3_MISC_SHDW, reg);
1655 }
1656
1657 static void tg3_phy_toggle_automdix(struct tg3 *tp, int enable)
1658 {
1659         u32 phy;
1660
1661         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS) ||
1662             (tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
1663                 return;
1664
1665         if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
1666                 u32 ephy;
1667
1668                 if (!tg3_readphy(tp, MII_TG3_FET_TEST, &ephy)) {
1669                         u32 reg = MII_TG3_FET_SHDW_MISCCTRL;
1670
1671                         tg3_writephy(tp, MII_TG3_FET_TEST,
1672                                      ephy | MII_TG3_FET_SHADOW_EN);
1673                         if (!tg3_readphy(tp, reg, &phy)) {
1674                                 if (enable)
1675                                         phy |= MII_TG3_FET_SHDW_MISCCTRL_MDIX;
1676                                 else
1677                                         phy &= ~MII_TG3_FET_SHDW_MISCCTRL_MDIX;
1678                                 tg3_writephy(tp, reg, phy);
1679                         }
1680                         tg3_writephy(tp, MII_TG3_FET_TEST, ephy);
1681                 }
1682         } else {
1683                 phy = MII_TG3_AUXCTL_MISC_RDSEL_MISC |
1684                       MII_TG3_AUXCTL_SHDWSEL_MISC;
1685                 if (!tg3_writephy(tp, MII_TG3_AUX_CTRL, phy) &&
1686                     !tg3_readphy(tp, MII_TG3_AUX_CTRL, &phy)) {
1687                         if (enable)
1688                                 phy |= MII_TG3_AUXCTL_MISC_FORCE_AMDIX;
1689                         else
1690                                 phy &= ~MII_TG3_AUXCTL_MISC_FORCE_AMDIX;
1691                         phy |= MII_TG3_AUXCTL_MISC_WREN;
1692                         tg3_writephy(tp, MII_TG3_AUX_CTRL, phy);
1693                 }
1694         }
1695 }
1696
1697 static void tg3_phy_set_wirespeed(struct tg3 *tp)
1698 {
1699         u32 val;
1700
1701         if (tp->phy_flags & TG3_PHYFLG_NO_ETH_WIRE_SPEED)
1702                 return;
1703
1704         if (!tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x7007) &&
1705             !tg3_readphy(tp, MII_TG3_AUX_CTRL, &val))
1706                 tg3_writephy(tp, MII_TG3_AUX_CTRL,
1707                              (val | (1 << 15) | (1 << 4)));
1708 }
1709
1710 static void tg3_phy_apply_otp(struct tg3 *tp)
1711 {
1712         u32 otp, phy;
1713
1714         if (!tp->phy_otp)
1715                 return;
1716
1717         otp = tp->phy_otp;
1718
1719         /* Enable SM_DSP clock and tx 6dB coding. */
1720         phy = MII_TG3_AUXCTL_SHDWSEL_AUXCTL |
1721               MII_TG3_AUXCTL_ACTL_SMDSP_ENA |
1722               MII_TG3_AUXCTL_ACTL_TX_6DB;
1723         tg3_writephy(tp, MII_TG3_AUX_CTRL, phy);
1724
1725         phy = ((otp & TG3_OTP_AGCTGT_MASK) >> TG3_OTP_AGCTGT_SHIFT);
1726         phy |= MII_TG3_DSP_TAP1_AGCTGT_DFLT;
1727         tg3_phydsp_write(tp, MII_TG3_DSP_TAP1, phy);
1728
1729         phy = ((otp & TG3_OTP_HPFFLTR_MASK) >> TG3_OTP_HPFFLTR_SHIFT) |
1730               ((otp & TG3_OTP_HPFOVER_MASK) >> TG3_OTP_HPFOVER_SHIFT);
1731         tg3_phydsp_write(tp, MII_TG3_DSP_AADJ1CH0, phy);
1732
1733         phy = ((otp & TG3_OTP_LPFDIS_MASK) >> TG3_OTP_LPFDIS_SHIFT);
1734         phy |= MII_TG3_DSP_AADJ1CH3_ADCCKADJ;
1735         tg3_phydsp_write(tp, MII_TG3_DSP_AADJ1CH3, phy);
1736
1737         phy = ((otp & TG3_OTP_VDAC_MASK) >> TG3_OTP_VDAC_SHIFT);
1738         tg3_phydsp_write(tp, MII_TG3_DSP_EXP75, phy);
1739
1740         phy = ((otp & TG3_OTP_10BTAMP_MASK) >> TG3_OTP_10BTAMP_SHIFT);
1741         tg3_phydsp_write(tp, MII_TG3_DSP_EXP96, phy);
1742
1743         phy = ((otp & TG3_OTP_ROFF_MASK) >> TG3_OTP_ROFF_SHIFT) |
1744               ((otp & TG3_OTP_RCOFF_MASK) >> TG3_OTP_RCOFF_SHIFT);
1745         tg3_phydsp_write(tp, MII_TG3_DSP_EXP97, phy);
1746
1747         /* Turn off SM_DSP clock. */
1748         phy = MII_TG3_AUXCTL_SHDWSEL_AUXCTL |
1749               MII_TG3_AUXCTL_ACTL_TX_6DB;
1750         tg3_writephy(tp, MII_TG3_AUX_CTRL, phy);
1751 }
1752
1753 static void tg3_phy_eee_adjust(struct tg3 *tp, u32 current_link_up)
1754 {
1755         u32 val;
1756
1757         if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP))
1758                 return;
1759
1760         tp->setlpicnt = 0;
1761
1762         if (tp->link_config.autoneg == AUTONEG_ENABLE &&
1763             current_link_up == 1 &&
1764             tp->link_config.active_duplex == DUPLEX_FULL &&
1765             (tp->link_config.active_speed == SPEED_100 ||
1766              tp->link_config.active_speed == SPEED_1000)) {
1767                 u32 eeectl;
1768
1769                 if (tp->link_config.active_speed == SPEED_1000)
1770                         eeectl = TG3_CPMU_EEE_CTRL_EXIT_16_5_US;
1771                 else
1772                         eeectl = TG3_CPMU_EEE_CTRL_EXIT_36_US;
1773
1774                 tw32(TG3_CPMU_EEE_CTRL, eeectl);
1775
1776                 tg3_phy_cl45_read(tp, MDIO_MMD_AN,
1777                                   TG3_CL45_D7_EEERES_STAT, &val);
1778
1779                 switch (val) {
1780                 case TG3_CL45_D7_EEERES_STAT_LP_1000T:
1781                         switch (GET_ASIC_REV(tp->pci_chip_rev_id)) {
1782                         case ASIC_REV_5717:
1783                         case ASIC_REV_5719:
1784                         case ASIC_REV_57765:
1785                                 /* Enable SM_DSP clock and tx 6dB coding. */
1786                                 val = MII_TG3_AUXCTL_SHDWSEL_AUXCTL |
1787                                       MII_TG3_AUXCTL_ACTL_SMDSP_ENA |
1788                                       MII_TG3_AUXCTL_ACTL_TX_6DB;
1789                                 tg3_writephy(tp, MII_TG3_AUX_CTRL, val);
1790
1791                                 tg3_phydsp_write(tp, MII_TG3_DSP_TAP26, 0x0000);
1792
1793                                 /* Turn off SM_DSP clock. */
1794                                 val = MII_TG3_AUXCTL_SHDWSEL_AUXCTL |
1795                                       MII_TG3_AUXCTL_ACTL_TX_6DB;
1796                                 tg3_writephy(tp, MII_TG3_AUX_CTRL, val);
1797                         }
1798                         /* Fallthrough */
1799                 case TG3_CL45_D7_EEERES_STAT_LP_100TX:
1800                         tp->setlpicnt = 2;
1801                 }
1802         }
1803
1804         if (!tp->setlpicnt) {
1805                 val = tr32(TG3_CPMU_EEE_MODE);
1806                 tw32(TG3_CPMU_EEE_MODE, val & ~TG3_CPMU_EEEMD_LPI_ENABLE);
1807         }
1808 }
1809
1810 static int tg3_wait_macro_done(struct tg3 *tp)
1811 {
1812         int limit = 100;
1813
1814         while (limit--) {
1815                 u32 tmp32;
1816
1817                 if (!tg3_readphy(tp, MII_TG3_DSP_CONTROL, &tmp32)) {
1818                         if ((tmp32 & 0x1000) == 0)
1819                                 break;
1820                 }
1821         }
1822         if (limit < 0)
1823                 return -EBUSY;
1824
1825         return 0;
1826 }
1827
1828 static int tg3_phy_write_and_check_testpat(struct tg3 *tp, int *resetp)
1829 {
1830         static const u32 test_pat[4][6] = {
1831         { 0x00005555, 0x00000005, 0x00002aaa, 0x0000000a, 0x00003456, 0x00000003 },
1832         { 0x00002aaa, 0x0000000a, 0x00003333, 0x00000003, 0x0000789a, 0x00000005 },
1833         { 0x00005a5a, 0x00000005, 0x00002a6a, 0x0000000a, 0x00001bcd, 0x00000003 },
1834         { 0x00002a5a, 0x0000000a, 0x000033c3, 0x00000003, 0x00002ef1, 0x00000005 }
1835         };
1836         int chan;
1837
1838         for (chan = 0; chan < 4; chan++) {
1839                 int i;
1840
1841                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
1842                              (chan * 0x2000) | 0x0200);
1843                 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0002);
1844
1845                 for (i = 0; i < 6; i++)
1846                         tg3_writephy(tp, MII_TG3_DSP_RW_PORT,
1847                                      test_pat[chan][i]);
1848
1849                 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0202);
1850                 if (tg3_wait_macro_done(tp)) {
1851                         *resetp = 1;
1852                         return -EBUSY;
1853                 }
1854
1855                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
1856                              (chan * 0x2000) | 0x0200);
1857                 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0082);
1858                 if (tg3_wait_macro_done(tp)) {
1859                         *resetp = 1;
1860                         return -EBUSY;
1861                 }
1862
1863                 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0802);
1864                 if (tg3_wait_macro_done(tp)) {
1865                         *resetp = 1;
1866                         return -EBUSY;
1867                 }
1868
1869                 for (i = 0; i < 6; i += 2) {
1870                         u32 low, high;
1871
1872                         if (tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &low) ||
1873                             tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &high) ||
1874                             tg3_wait_macro_done(tp)) {
1875                                 *resetp = 1;
1876                                 return -EBUSY;
1877                         }
1878                         low &= 0x7fff;
1879                         high &= 0x000f;
1880                         if (low != test_pat[chan][i] ||
1881                             high != test_pat[chan][i+1]) {
1882                                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000b);
1883                                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x4001);
1884                                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x4005);
1885
1886                                 return -EBUSY;
1887                         }
1888                 }
1889         }
1890
1891         return 0;
1892 }
1893
1894 static int tg3_phy_reset_chanpat(struct tg3 *tp)
1895 {
1896         int chan;
1897
1898         for (chan = 0; chan < 4; chan++) {
1899                 int i;
1900
1901                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
1902                              (chan * 0x2000) | 0x0200);
1903                 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0002);
1904                 for (i = 0; i < 6; i++)
1905                         tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x000);
1906                 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0202);
1907                 if (tg3_wait_macro_done(tp))
1908                         return -EBUSY;
1909         }
1910
1911         return 0;
1912 }
1913
1914 static int tg3_phy_reset_5703_4_5(struct tg3 *tp)
1915 {
1916         u32 reg32, phy9_orig;
1917         int retries, do_phy_reset, err;
1918
1919         retries = 10;
1920         do_phy_reset = 1;
1921         do {
1922                 if (do_phy_reset) {
1923                         err = tg3_bmcr_reset(tp);
1924                         if (err)
1925                                 return err;
1926                         do_phy_reset = 0;
1927                 }
1928
1929                 /* Disable transmitter and interrupt.  */
1930                 if (tg3_readphy(tp, MII_TG3_EXT_CTRL, &reg32))
1931                         continue;
1932
1933                 reg32 |= 0x3000;
1934                 tg3_writephy(tp, MII_TG3_EXT_CTRL, reg32);
1935
1936                 /* Set full-duplex, 1000 mbps.  */
1937                 tg3_writephy(tp, MII_BMCR,
1938                              BMCR_FULLDPLX | TG3_BMCR_SPEED1000);
1939
1940                 /* Set to master mode.  */
1941                 if (tg3_readphy(tp, MII_TG3_CTRL, &phy9_orig))
1942                         continue;
1943
1944                 tg3_writephy(tp, MII_TG3_CTRL,
1945                              (MII_TG3_CTRL_AS_MASTER |
1946                               MII_TG3_CTRL_ENABLE_AS_MASTER));
1947
1948                 /* Enable SM_DSP_CLOCK and 6dB.  */
1949                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0c00);
1950
1951                 /* Block the PHY control access.  */
1952                 tg3_phydsp_write(tp, 0x8005, 0x0800);
1953
1954                 err = tg3_phy_write_and_check_testpat(tp, &do_phy_reset);
1955                 if (!err)
1956                         break;
1957         } while (--retries);
1958
1959         err = tg3_phy_reset_chanpat(tp);
1960         if (err)
1961                 return err;
1962
1963         tg3_phydsp_write(tp, 0x8005, 0x0000);
1964
1965         tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8200);
1966         tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0000);
1967
1968         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
1969             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
1970                 /* Set Extended packet length bit for jumbo frames */
1971                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x4400);
1972         } else {
1973                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0400);
1974         }
1975
1976         tg3_writephy(tp, MII_TG3_CTRL, phy9_orig);
1977
1978         if (!tg3_readphy(tp, MII_TG3_EXT_CTRL, &reg32)) {
1979                 reg32 &= ~0x3000;
1980                 tg3_writephy(tp, MII_TG3_EXT_CTRL, reg32);
1981         } else if (!err)
1982                 err = -EBUSY;
1983
1984         return err;
1985 }
1986
1987 /* This will reset the tigon3 PHY if there is no valid
1988  * link unless the FORCE argument is non-zero.
1989  */
1990 static int tg3_phy_reset(struct tg3 *tp)
1991 {
1992         u32 val, cpmuctrl;
1993         int err;
1994
1995         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
1996                 val = tr32(GRC_MISC_CFG);
1997                 tw32_f(GRC_MISC_CFG, val & ~GRC_MISC_CFG_EPHY_IDDQ);
1998                 udelay(40);
1999         }
2000         err  = tg3_readphy(tp, MII_BMSR, &val);
2001         err |= tg3_readphy(tp, MII_BMSR, &val);
2002         if (err != 0)
2003                 return -EBUSY;
2004
2005         if (netif_running(tp->dev) && netif_carrier_ok(tp->dev)) {
2006                 netif_carrier_off(tp->dev);
2007                 tg3_link_report(tp);
2008         }
2009
2010         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
2011             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
2012             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
2013                 err = tg3_phy_reset_5703_4_5(tp);
2014                 if (err)
2015                         return err;
2016                 goto out;
2017         }
2018
2019         cpmuctrl = 0;
2020         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 &&
2021             GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5784_AX) {
2022                 cpmuctrl = tr32(TG3_CPMU_CTRL);
2023                 if (cpmuctrl & CPMU_CTRL_GPHY_10MB_RXONLY)
2024                         tw32(TG3_CPMU_CTRL,
2025                              cpmuctrl & ~CPMU_CTRL_GPHY_10MB_RXONLY);
2026         }
2027
2028         err = tg3_bmcr_reset(tp);
2029         if (err)
2030                 return err;
2031
2032         if (cpmuctrl & CPMU_CTRL_GPHY_10MB_RXONLY) {
2033                 val = MII_TG3_DSP_EXP8_AEDW | MII_TG3_DSP_EXP8_REJ2MHz;
2034                 tg3_phydsp_write(tp, MII_TG3_DSP_EXP8, val);
2035
2036                 tw32(TG3_CPMU_CTRL, cpmuctrl);
2037         }
2038
2039         if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5784_AX ||
2040             GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5761_AX) {
2041                 val = tr32(TG3_CPMU_LSPD_1000MB_CLK);
2042                 if ((val & CPMU_LSPD_1000MB_MACCLK_MASK) ==
2043                     CPMU_LSPD_1000MB_MACCLK_12_5) {
2044                         val &= ~CPMU_LSPD_1000MB_MACCLK_MASK;
2045                         udelay(40);
2046                         tw32_f(TG3_CPMU_LSPD_1000MB_CLK, val);
2047                 }
2048         }
2049
2050         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
2051              GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719) &&
2052             (tp->phy_flags & TG3_PHYFLG_MII_SERDES))
2053                 return 0;
2054
2055         tg3_phy_apply_otp(tp);
2056
2057         if (tp->phy_flags & TG3_PHYFLG_ENABLE_APD)
2058                 tg3_phy_toggle_apd(tp, true);
2059         else
2060                 tg3_phy_toggle_apd(tp, false);
2061
2062 out:
2063         if (tp->phy_flags & TG3_PHYFLG_ADC_BUG) {
2064                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0c00);
2065                 tg3_phydsp_write(tp, 0x201f, 0x2aaa);
2066                 tg3_phydsp_write(tp, 0x000a, 0x0323);
2067                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0400);
2068         }
2069         if (tp->phy_flags & TG3_PHYFLG_5704_A0_BUG) {
2070                 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8d68);
2071                 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8d68);
2072         }
2073         if (tp->phy_flags & TG3_PHYFLG_BER_BUG) {
2074                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0c00);
2075                 tg3_phydsp_write(tp, 0x000a, 0x310b);
2076                 tg3_phydsp_write(tp, 0x201f, 0x9506);
2077                 tg3_phydsp_write(tp, 0x401f, 0x14e2);
2078                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0400);
2079         } else if (tp->phy_flags & TG3_PHYFLG_JITTER_BUG) {
2080                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0c00);
2081                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000a);
2082                 if (tp->phy_flags & TG3_PHYFLG_ADJUST_TRIM) {
2083                         tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x110b);
2084                         tg3_writephy(tp, MII_TG3_TEST1,
2085                                      MII_TG3_TEST1_TRIM_EN | 0x4);
2086                 } else
2087                         tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x010b);
2088                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0400);
2089         }
2090         /* Set Extended packet length bit (bit 14) on all chips that */
2091         /* support jumbo frames */
2092         if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) {
2093                 /* Cannot do read-modify-write on 5401 */
2094                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x4c20);
2095         } else if (tp->tg3_flags & TG3_FLAG_JUMBO_CAPABLE) {
2096                 /* Set bit 14 with read-modify-write to preserve other bits */
2097                 if (!tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0007) &&
2098                     !tg3_readphy(tp, MII_TG3_AUX_CTRL, &val))
2099                         tg3_writephy(tp, MII_TG3_AUX_CTRL, val | 0x4000);
2100         }
2101
2102         /* Set phy register 0x10 bit 0 to high fifo elasticity to support
2103          * jumbo frames transmission.
2104          */
2105         if (tp->tg3_flags & TG3_FLAG_JUMBO_CAPABLE) {
2106                 if (!tg3_readphy(tp, MII_TG3_EXT_CTRL, &val))
2107                         tg3_writephy(tp, MII_TG3_EXT_CTRL,
2108                                      val | MII_TG3_EXT_CTRL_FIFO_ELASTIC);
2109         }
2110
2111         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
2112                 /* adjust output voltage */
2113                 tg3_writephy(tp, MII_TG3_FET_PTEST, 0x12);
2114         }
2115
2116         tg3_phy_toggle_automdix(tp, 1);
2117         tg3_phy_set_wirespeed(tp);
2118         return 0;
2119 }
2120
2121 static void tg3_frob_aux_power(struct tg3 *tp)
2122 {
2123         bool need_vaux = false;
2124
2125         /* The GPIOs do something completely different on 57765. */
2126         if ((tp->tg3_flags2 & TG3_FLG2_IS_NIC) == 0 ||
2127             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
2128             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765)
2129                 return;
2130
2131         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
2132              GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714 ||
2133              GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717) &&
2134             tp->pdev_peer != tp->pdev) {
2135                 struct net_device *dev_peer;
2136
2137                 dev_peer = pci_get_drvdata(tp->pdev_peer);
2138
2139                 /* remove_one() may have been run on the peer. */
2140                 if (dev_peer) {
2141                         struct tg3 *tp_peer = netdev_priv(dev_peer);
2142
2143                         if (tp_peer->tg3_flags & TG3_FLAG_INIT_COMPLETE)
2144                                 return;
2145
2146                         if ((tp_peer->tg3_flags & TG3_FLAG_WOL_ENABLE) ||
2147                             (tp_peer->tg3_flags & TG3_FLAG_ENABLE_ASF))
2148                                 need_vaux = true;
2149                 }
2150         }
2151
2152         if ((tp->tg3_flags & TG3_FLAG_WOL_ENABLE) ||
2153             (tp->tg3_flags & TG3_FLAG_ENABLE_ASF))
2154                 need_vaux = true;
2155
2156         if (need_vaux) {
2157                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
2158                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
2159                         tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
2160                                     (GRC_LCLCTRL_GPIO_OE0 |
2161                                      GRC_LCLCTRL_GPIO_OE1 |
2162                                      GRC_LCLCTRL_GPIO_OE2 |
2163                                      GRC_LCLCTRL_GPIO_OUTPUT0 |
2164                                      GRC_LCLCTRL_GPIO_OUTPUT1),
2165                                     100);
2166                 } else if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761 ||
2167                            tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761S) {
2168                         /* The 5761 non-e device swaps GPIO 0 and GPIO 2. */
2169                         u32 grc_local_ctrl = GRC_LCLCTRL_GPIO_OE0 |
2170                                              GRC_LCLCTRL_GPIO_OE1 |
2171                                              GRC_LCLCTRL_GPIO_OE2 |
2172                                              GRC_LCLCTRL_GPIO_OUTPUT0 |
2173                                              GRC_LCLCTRL_GPIO_OUTPUT1 |
2174                                              tp->grc_local_ctrl;
2175                         tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl, 100);
2176
2177                         grc_local_ctrl |= GRC_LCLCTRL_GPIO_OUTPUT2;
2178                         tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl, 100);
2179
2180                         grc_local_ctrl &= ~GRC_LCLCTRL_GPIO_OUTPUT0;
2181                         tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl, 100);
2182                 } else {
2183                         u32 no_gpio2;
2184                         u32 grc_local_ctrl = 0;
2185
2186                         /* Workaround to prevent overdrawing Amps. */
2187                         if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
2188                             ASIC_REV_5714) {
2189                                 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE3;
2190                                 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
2191                                             grc_local_ctrl, 100);
2192                         }
2193
2194                         /* On 5753 and variants, GPIO2 cannot be used. */
2195                         no_gpio2 = tp->nic_sram_data_cfg &
2196                                     NIC_SRAM_DATA_CFG_NO_GPIO2;
2197
2198                         grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE0 |
2199                                          GRC_LCLCTRL_GPIO_OE1 |
2200                                          GRC_LCLCTRL_GPIO_OE2 |
2201                                          GRC_LCLCTRL_GPIO_OUTPUT1 |
2202                                          GRC_LCLCTRL_GPIO_OUTPUT2;
2203                         if (no_gpio2) {
2204                                 grc_local_ctrl &= ~(GRC_LCLCTRL_GPIO_OE2 |
2205                                                     GRC_LCLCTRL_GPIO_OUTPUT2);
2206                         }
2207                         tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
2208                                                     grc_local_ctrl, 100);
2209
2210                         grc_local_ctrl |= GRC_LCLCTRL_GPIO_OUTPUT0;
2211
2212                         tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
2213                                                     grc_local_ctrl, 100);
2214
2215                         if (!no_gpio2) {
2216                                 grc_local_ctrl &= ~GRC_LCLCTRL_GPIO_OUTPUT2;
2217                                 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
2218                                             grc_local_ctrl, 100);
2219                         }
2220                 }
2221         } else {
2222                 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
2223                     GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701) {
2224                         tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
2225                                     (GRC_LCLCTRL_GPIO_OE1 |
2226                                      GRC_LCLCTRL_GPIO_OUTPUT1), 100);
2227
2228                         tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
2229                                     GRC_LCLCTRL_GPIO_OE1, 100);
2230
2231                         tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
2232                                     (GRC_LCLCTRL_GPIO_OE1 |
2233                                      GRC_LCLCTRL_GPIO_OUTPUT1), 100);
2234                 }
2235         }
2236 }
2237
2238 static int tg3_5700_link_polarity(struct tg3 *tp, u32 speed)
2239 {
2240         if (tp->led_ctrl == LED_CTRL_MODE_PHY_2)
2241                 return 1;
2242         else if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5411) {
2243                 if (speed != SPEED_10)
2244                         return 1;
2245         } else if (speed == SPEED_10)
2246                 return 1;
2247
2248         return 0;
2249 }
2250
2251 static int tg3_setup_phy(struct tg3 *, int);
2252
2253 #define RESET_KIND_SHUTDOWN     0
2254 #define RESET_KIND_INIT         1
2255 #define RESET_KIND_SUSPEND      2
2256
2257 static void tg3_write_sig_post_reset(struct tg3 *, int);
2258 static int tg3_halt_cpu(struct tg3 *, u32);
2259
2260 static void tg3_power_down_phy(struct tg3 *tp, bool do_low_power)
2261 {
2262         u32 val;
2263
2264         if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
2265                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
2266                         u32 sg_dig_ctrl = tr32(SG_DIG_CTRL);
2267                         u32 serdes_cfg = tr32(MAC_SERDES_CFG);
2268
2269                         sg_dig_ctrl |=
2270                                 SG_DIG_USING_HW_AUTONEG | SG_DIG_SOFT_RESET;
2271                         tw32(SG_DIG_CTRL, sg_dig_ctrl);
2272                         tw32(MAC_SERDES_CFG, serdes_cfg | (1 << 15));
2273                 }
2274                 return;
2275         }
2276
2277         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
2278                 tg3_bmcr_reset(tp);
2279                 val = tr32(GRC_MISC_CFG);
2280                 tw32_f(GRC_MISC_CFG, val | GRC_MISC_CFG_EPHY_IDDQ);
2281                 udelay(40);
2282                 return;
2283         } else if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
2284                 u32 phytest;
2285                 if (!tg3_readphy(tp, MII_TG3_FET_TEST, &phytest)) {
2286                         u32 phy;
2287
2288                         tg3_writephy(tp, MII_ADVERTISE, 0);
2289                         tg3_writephy(tp, MII_BMCR,
2290                                      BMCR_ANENABLE | BMCR_ANRESTART);
2291
2292                         tg3_writephy(tp, MII_TG3_FET_TEST,
2293                                      phytest | MII_TG3_FET_SHADOW_EN);
2294                         if (!tg3_readphy(tp, MII_TG3_FET_SHDW_AUXMODE4, &phy)) {
2295                                 phy |= MII_TG3_FET_SHDW_AUXMODE4_SBPD;
2296                                 tg3_writephy(tp,
2297                                              MII_TG3_FET_SHDW_AUXMODE4,
2298                                              phy);
2299                         }
2300                         tg3_writephy(tp, MII_TG3_FET_TEST, phytest);
2301                 }
2302                 return;
2303         } else if (do_low_power) {
2304                 tg3_writephy(tp, MII_TG3_EXT_CTRL,
2305                              MII_TG3_EXT_CTRL_FORCE_LED_OFF);
2306
2307                 tg3_writephy(tp, MII_TG3_AUX_CTRL,
2308                              MII_TG3_AUXCTL_SHDWSEL_PWRCTL |
2309                              MII_TG3_AUXCTL_PCTL_100TX_LPWR |
2310                              MII_TG3_AUXCTL_PCTL_SPR_ISOLATE |
2311                              MII_TG3_AUXCTL_PCTL_VREG_11V);
2312         }
2313
2314         /* The PHY should not be powered down on some chips because
2315          * of bugs.
2316          */
2317         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
2318             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
2319             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780 &&
2320              (tp->phy_flags & TG3_PHYFLG_MII_SERDES)))
2321                 return;
2322
2323         if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5784_AX ||
2324             GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5761_AX) {
2325                 val = tr32(TG3_CPMU_LSPD_1000MB_CLK);
2326                 val &= ~CPMU_LSPD_1000MB_MACCLK_MASK;
2327                 val |= CPMU_LSPD_1000MB_MACCLK_12_5;
2328                 tw32_f(TG3_CPMU_LSPD_1000MB_CLK, val);
2329         }
2330
2331         tg3_writephy(tp, MII_BMCR, BMCR_PDOWN);
2332 }
2333
2334 /* tp->lock is held. */
2335 static int tg3_nvram_lock(struct tg3 *tp)
2336 {
2337         if (tp->tg3_flags & TG3_FLAG_NVRAM) {
2338                 int i;
2339
2340                 if (tp->nvram_lock_cnt == 0) {
2341                         tw32(NVRAM_SWARB, SWARB_REQ_SET1);
2342                         for (i = 0; i < 8000; i++) {
2343                                 if (tr32(NVRAM_SWARB) & SWARB_GNT1)
2344                                         break;
2345                                 udelay(20);
2346                         }
2347                         if (i == 8000) {
2348                                 tw32(NVRAM_SWARB, SWARB_REQ_CLR1);
2349                                 return -ENODEV;
2350                         }
2351                 }
2352                 tp->nvram_lock_cnt++;
2353         }
2354         return 0;
2355 }
2356
2357 /* tp->lock is held. */
2358 static void tg3_nvram_unlock(struct tg3 *tp)
2359 {
2360         if (tp->tg3_flags & TG3_FLAG_NVRAM) {
2361                 if (tp->nvram_lock_cnt > 0)
2362                         tp->nvram_lock_cnt--;
2363                 if (tp->nvram_lock_cnt == 0)
2364                         tw32_f(NVRAM_SWARB, SWARB_REQ_CLR1);
2365         }
2366 }
2367
2368 /* tp->lock is held. */
2369 static void tg3_enable_nvram_access(struct tg3 *tp)
2370 {
2371         if ((tp->tg3_flags2 & TG3_FLG2_5750_PLUS) &&
2372             !(tp->tg3_flags3 & TG3_FLG3_PROTECTED_NVRAM)) {
2373                 u32 nvaccess = tr32(NVRAM_ACCESS);
2374
2375                 tw32(NVRAM_ACCESS, nvaccess | ACCESS_ENABLE);
2376         }
2377 }
2378
2379 /* tp->lock is held. */
2380 static void tg3_disable_nvram_access(struct tg3 *tp)
2381 {
2382         if ((tp->tg3_flags2 & TG3_FLG2_5750_PLUS) &&
2383             !(tp->tg3_flags3 & TG3_FLG3_PROTECTED_NVRAM)) {
2384                 u32 nvaccess = tr32(NVRAM_ACCESS);
2385
2386                 tw32(NVRAM_ACCESS, nvaccess & ~ACCESS_ENABLE);
2387         }
2388 }
2389
2390 static int tg3_nvram_read_using_eeprom(struct tg3 *tp,
2391                                         u32 offset, u32 *val)
2392 {
2393         u32 tmp;
2394         int i;
2395
2396         if (offset > EEPROM_ADDR_ADDR_MASK || (offset % 4) != 0)
2397                 return -EINVAL;
2398
2399         tmp = tr32(GRC_EEPROM_ADDR) & ~(EEPROM_ADDR_ADDR_MASK |
2400                                         EEPROM_ADDR_DEVID_MASK |
2401                                         EEPROM_ADDR_READ);
2402         tw32(GRC_EEPROM_ADDR,
2403              tmp |
2404              (0 << EEPROM_ADDR_DEVID_SHIFT) |
2405              ((offset << EEPROM_ADDR_ADDR_SHIFT) &
2406               EEPROM_ADDR_ADDR_MASK) |
2407              EEPROM_ADDR_READ | EEPROM_ADDR_START);
2408
2409         for (i = 0; i < 1000; i++) {
2410                 tmp = tr32(GRC_EEPROM_ADDR);
2411
2412                 if (tmp & EEPROM_ADDR_COMPLETE)
2413                         break;
2414                 msleep(1);
2415         }
2416         if (!(tmp & EEPROM_ADDR_COMPLETE))
2417                 return -EBUSY;
2418
2419         tmp = tr32(GRC_EEPROM_DATA);
2420
2421         /*
2422          * The data will always be opposite the native endian
2423          * format.  Perform a blind byteswap to compensate.
2424          */
2425         *val = swab32(tmp);
2426
2427         return 0;
2428 }
2429
2430 #define NVRAM_CMD_TIMEOUT 10000
2431
2432 static int tg3_nvram_exec_cmd(struct tg3 *tp, u32 nvram_cmd)
2433 {
2434         int i;
2435
2436         tw32(NVRAM_CMD, nvram_cmd);
2437         for (i = 0; i < NVRAM_CMD_TIMEOUT; i++) {
2438                 udelay(10);
2439                 if (tr32(NVRAM_CMD) & NVRAM_CMD_DONE) {
2440                         udelay(10);
2441                         break;
2442                 }
2443         }
2444
2445         if (i == NVRAM_CMD_TIMEOUT)
2446                 return -EBUSY;
2447
2448         return 0;
2449 }
2450
2451 static u32 tg3_nvram_phys_addr(struct tg3 *tp, u32 addr)
2452 {
2453         if ((tp->tg3_flags & TG3_FLAG_NVRAM) &&
2454             (tp->tg3_flags & TG3_FLAG_NVRAM_BUFFERED) &&
2455             (tp->tg3_flags2 & TG3_FLG2_FLASH) &&
2456            !(tp->tg3_flags3 & TG3_FLG3_NO_NVRAM_ADDR_TRANS) &&
2457             (tp->nvram_jedecnum == JEDEC_ATMEL))
2458
2459                 addr = ((addr / tp->nvram_pagesize) <<
2460                         ATMEL_AT45DB0X1B_PAGE_POS) +
2461                        (addr % tp->nvram_pagesize);
2462
2463         return addr;
2464 }
2465
2466 static u32 tg3_nvram_logical_addr(struct tg3 *tp, u32 addr)
2467 {
2468         if ((tp->tg3_flags & TG3_FLAG_NVRAM) &&
2469             (tp->tg3_flags & TG3_FLAG_NVRAM_BUFFERED) &&
2470             (tp->tg3_flags2 & TG3_FLG2_FLASH) &&
2471            !(tp->tg3_flags3 & TG3_FLG3_NO_NVRAM_ADDR_TRANS) &&
2472             (tp->nvram_jedecnum == JEDEC_ATMEL))
2473
2474                 addr = ((addr >> ATMEL_AT45DB0X1B_PAGE_POS) *
2475                         tp->nvram_pagesize) +
2476                        (addr & ((1 << ATMEL_AT45DB0X1B_PAGE_POS) - 1));
2477
2478         return addr;
2479 }
2480
2481 /* NOTE: Data read in from NVRAM is byteswapped according to
2482  * the byteswapping settings for all other register accesses.
2483  * tg3 devices are BE devices, so on a BE machine, the data
2484  * returned will be exactly as it is seen in NVRAM.  On a LE
2485  * machine, the 32-bit value will be byteswapped.
2486  */
2487 static int tg3_nvram_read(struct tg3 *tp, u32 offset, u32 *val)
2488 {
2489         int ret;
2490
2491         if (!(tp->tg3_flags & TG3_FLAG_NVRAM))
2492                 return tg3_nvram_read_using_eeprom(tp, offset, val);
2493
2494         offset = tg3_nvram_phys_addr(tp, offset);
2495
2496         if (offset > NVRAM_ADDR_MSK)
2497                 return -EINVAL;
2498
2499         ret = tg3_nvram_lock(tp);
2500         if (ret)
2501                 return ret;
2502
2503         tg3_enable_nvram_access(tp);
2504
2505         tw32(NVRAM_ADDR, offset);
2506         ret = tg3_nvram_exec_cmd(tp, NVRAM_CMD_RD | NVRAM_CMD_GO |
2507                 NVRAM_CMD_FIRST | NVRAM_CMD_LAST | NVRAM_CMD_DONE);
2508
2509         if (ret == 0)
2510                 *val = tr32(NVRAM_RDDATA);
2511
2512         tg3_disable_nvram_access(tp);
2513
2514         tg3_nvram_unlock(tp);
2515
2516         return ret;
2517 }
2518
2519 /* Ensures NVRAM data is in bytestream format. */
2520 static int tg3_nvram_read_be32(struct tg3 *tp, u32 offset, __be32 *val)
2521 {
2522         u32 v;
2523         int res = tg3_nvram_read(tp, offset, &v);
2524         if (!res)
2525                 *val = cpu_to_be32(v);
2526         return res;
2527 }
2528
2529 /* tp->lock is held. */
2530 static void __tg3_set_mac_addr(struct tg3 *tp, int skip_mac_1)
2531 {
2532         u32 addr_high, addr_low;
2533         int i;
2534
2535         addr_high = ((tp->dev->dev_addr[0] << 8) |
2536                      tp->dev->dev_addr[1]);
2537         addr_low = ((tp->dev->dev_addr[2] << 24) |
2538                     (tp->dev->dev_addr[3] << 16) |
2539                     (tp->dev->dev_addr[4] <<  8) |
2540                     (tp->dev->dev_addr[5] <<  0));
2541         for (i = 0; i < 4; i++) {
2542                 if (i == 1 && skip_mac_1)
2543                         continue;
2544                 tw32(MAC_ADDR_0_HIGH + (i * 8), addr_high);
2545                 tw32(MAC_ADDR_0_LOW + (i * 8), addr_low);
2546         }
2547
2548         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
2549             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
2550                 for (i = 0; i < 12; i++) {
2551                         tw32(MAC_EXTADDR_0_HIGH + (i * 8), addr_high);
2552                         tw32(MAC_EXTADDR_0_LOW + (i * 8), addr_low);
2553                 }
2554         }
2555
2556         addr_high = (tp->dev->dev_addr[0] +
2557                      tp->dev->dev_addr[1] +
2558                      tp->dev->dev_addr[2] +
2559                      tp->dev->dev_addr[3] +
2560                      tp->dev->dev_addr[4] +
2561                      tp->dev->dev_addr[5]) &
2562                 TX_BACKOFF_SEED_MASK;
2563         tw32(MAC_TX_BACKOFF_SEED, addr_high);
2564 }
2565
2566 static void tg3_enable_register_access(struct tg3 *tp)
2567 {
2568         /*
2569          * Make sure register accesses (indirect or otherwise) will function
2570          * correctly.
2571          */
2572         pci_write_config_dword(tp->pdev,
2573                                TG3PCI_MISC_HOST_CTRL, tp->misc_host_ctrl);
2574 }
2575
2576 static int tg3_power_up(struct tg3 *tp)
2577 {
2578         tg3_enable_register_access(tp);
2579
2580         pci_set_power_state(tp->pdev, PCI_D0);
2581
2582         /* Switch out of Vaux if it is a NIC */
2583         if (tp->tg3_flags2 & TG3_FLG2_IS_NIC)
2584                 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl, 100);
2585
2586         return 0;
2587 }
2588
2589 static int tg3_power_down_prepare(struct tg3 *tp)
2590 {
2591         u32 misc_host_ctrl;
2592         bool device_should_wake, do_low_power;
2593
2594         tg3_enable_register_access(tp);
2595
2596         /* Restore the CLKREQ setting. */
2597         if (tp->tg3_flags3 & TG3_FLG3_CLKREQ_BUG) {
2598                 u16 lnkctl;
2599
2600                 pci_read_config_word(tp->pdev,
2601                                      tp->pcie_cap + PCI_EXP_LNKCTL,
2602                                      &lnkctl);
2603                 lnkctl |= PCI_EXP_LNKCTL_CLKREQ_EN;
2604                 pci_write_config_word(tp->pdev,
2605                                       tp->pcie_cap + PCI_EXP_LNKCTL,
2606                                       lnkctl);
2607         }
2608
2609         misc_host_ctrl = tr32(TG3PCI_MISC_HOST_CTRL);
2610         tw32(TG3PCI_MISC_HOST_CTRL,
2611              misc_host_ctrl | MISC_HOST_CTRL_MASK_PCI_INT);
2612
2613         device_should_wake = device_may_wakeup(&tp->pdev->dev) &&
2614                              (tp->tg3_flags & TG3_FLAG_WOL_ENABLE);
2615
2616         if (tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB) {
2617                 do_low_power = false;
2618                 if ((tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) &&
2619                     !(tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)) {
2620                         struct phy_device *phydev;
2621                         u32 phyid, advertising;
2622
2623                         phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
2624
2625                         tp->phy_flags |= TG3_PHYFLG_IS_LOW_POWER;
2626
2627                         tp->link_config.orig_speed = phydev->speed;
2628                         tp->link_config.orig_duplex = phydev->duplex;
2629                         tp->link_config.orig_autoneg = phydev->autoneg;
2630                         tp->link_config.orig_advertising = phydev->advertising;
2631
2632                         advertising = ADVERTISED_TP |
2633                                       ADVERTISED_Pause |
2634                                       ADVERTISED_Autoneg |
2635                                       ADVERTISED_10baseT_Half;
2636
2637                         if ((tp->tg3_flags & TG3_FLAG_ENABLE_ASF) ||
2638                             device_should_wake) {
2639                                 if (tp->tg3_flags & TG3_FLAG_WOL_SPEED_100MB)
2640                                         advertising |=
2641                                                 ADVERTISED_100baseT_Half |
2642                                                 ADVERTISED_100baseT_Full |
2643                                                 ADVERTISED_10baseT_Full;
2644                                 else
2645                                         advertising |= ADVERTISED_10baseT_Full;
2646                         }
2647
2648                         phydev->advertising = advertising;
2649
2650                         phy_start_aneg(phydev);
2651
2652                         phyid = phydev->drv->phy_id & phydev->drv->phy_id_mask;
2653                         if (phyid != PHY_ID_BCMAC131) {
2654                                 phyid &= PHY_BCM_OUI_MASK;
2655                                 if (phyid == PHY_BCM_OUI_1 ||
2656                                     phyid == PHY_BCM_OUI_2 ||
2657                                     phyid == PHY_BCM_OUI_3)
2658                                         do_low_power = true;
2659                         }
2660                 }
2661         } else {
2662                 do_low_power = true;
2663
2664                 if (!(tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)) {
2665                         tp->phy_flags |= TG3_PHYFLG_IS_LOW_POWER;
2666                         tp->link_config.orig_speed = tp->link_config.speed;
2667                         tp->link_config.orig_duplex = tp->link_config.duplex;
2668                         tp->link_config.orig_autoneg = tp->link_config.autoneg;
2669                 }
2670
2671                 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES)) {
2672                         tp->link_config.speed = SPEED_10;
2673                         tp->link_config.duplex = DUPLEX_HALF;
2674                         tp->link_config.autoneg = AUTONEG_ENABLE;
2675                         tg3_setup_phy(tp, 0);
2676                 }
2677         }
2678
2679         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
2680                 u32 val;
2681
2682                 val = tr32(GRC_VCPU_EXT_CTRL);
2683                 tw32(GRC_VCPU_EXT_CTRL, val | GRC_VCPU_EXT_CTRL_DISABLE_WOL);
2684         } else if (!(tp->tg3_flags & TG3_FLAG_ENABLE_ASF)) {
2685                 int i;
2686                 u32 val;
2687
2688                 for (i = 0; i < 200; i++) {
2689                         tg3_read_mem(tp, NIC_SRAM_FW_ASF_STATUS_MBOX, &val);
2690                         if (val == ~NIC_SRAM_FIRMWARE_MBOX_MAGIC1)
2691                                 break;
2692                         msleep(1);
2693                 }
2694         }
2695         if (tp->tg3_flags & TG3_FLAG_WOL_CAP)
2696                 tg3_write_mem(tp, NIC_SRAM_WOL_MBOX, WOL_SIGNATURE |
2697                                                      WOL_DRV_STATE_SHUTDOWN |
2698                                                      WOL_DRV_WOL |
2699                                                      WOL_SET_MAGIC_PKT);
2700
2701         if (device_should_wake) {
2702                 u32 mac_mode;
2703
2704                 if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES)) {
2705                         if (do_low_power) {
2706                                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x5a);
2707                                 udelay(40);
2708                         }
2709
2710                         if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
2711                                 mac_mode = MAC_MODE_PORT_MODE_GMII;
2712                         else
2713                                 mac_mode = MAC_MODE_PORT_MODE_MII;
2714
2715                         mac_mode |= tp->mac_mode & MAC_MODE_LINK_POLARITY;
2716                         if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
2717                             ASIC_REV_5700) {
2718                                 u32 speed = (tp->tg3_flags &
2719                                              TG3_FLAG_WOL_SPEED_100MB) ?
2720                                              SPEED_100 : SPEED_10;
2721                                 if (tg3_5700_link_polarity(tp, speed))
2722                                         mac_mode |= MAC_MODE_LINK_POLARITY;
2723                                 else
2724                                         mac_mode &= ~MAC_MODE_LINK_POLARITY;
2725                         }
2726                 } else {
2727                         mac_mode = MAC_MODE_PORT_MODE_TBI;
2728                 }
2729
2730                 if (!(tp->tg3_flags2 & TG3_FLG2_5750_PLUS))
2731                         tw32(MAC_LED_CTRL, tp->led_ctrl);
2732
2733                 mac_mode |= MAC_MODE_MAGIC_PKT_ENABLE;
2734                 if (((tp->tg3_flags2 & TG3_FLG2_5705_PLUS) &&
2735                     !(tp->tg3_flags2 & TG3_FLG2_5780_CLASS)) &&
2736                     ((tp->tg3_flags & TG3_FLAG_ENABLE_ASF) ||
2737                      (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE)))
2738                         mac_mode |= MAC_MODE_KEEP_FRAME_IN_WOL;
2739
2740                 if (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE)
2741                         mac_mode |= MAC_MODE_APE_TX_EN |
2742                                     MAC_MODE_APE_RX_EN |
2743                                     MAC_MODE_TDE_ENABLE;
2744
2745                 tw32_f(MAC_MODE, mac_mode);
2746                 udelay(100);
2747
2748                 tw32_f(MAC_RX_MODE, RX_MODE_ENABLE);
2749                 udelay(10);
2750         }
2751
2752         if (!(tp->tg3_flags & TG3_FLAG_WOL_SPEED_100MB) &&
2753             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
2754              GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)) {
2755                 u32 base_val;
2756
2757                 base_val = tp->pci_clock_ctrl;
2758                 base_val |= (CLOCK_CTRL_RXCLK_DISABLE |
2759                              CLOCK_CTRL_TXCLK_DISABLE);
2760
2761                 tw32_wait_f(TG3PCI_CLOCK_CTRL, base_val | CLOCK_CTRL_ALTCLK |
2762                             CLOCK_CTRL_PWRDOWN_PLL133, 40);
2763         } else if ((tp->tg3_flags2 & TG3_FLG2_5780_CLASS) ||
2764                    (tp->tg3_flags & TG3_FLAG_CPMU_PRESENT) ||
2765                    (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)) {
2766                 /* do nothing */
2767         } else if (!((tp->tg3_flags2 & TG3_FLG2_5750_PLUS) &&
2768                      (tp->tg3_flags & TG3_FLAG_ENABLE_ASF))) {
2769                 u32 newbits1, newbits2;
2770
2771                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
2772                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
2773                         newbits1 = (CLOCK_CTRL_RXCLK_DISABLE |
2774                                     CLOCK_CTRL_TXCLK_DISABLE |
2775                                     CLOCK_CTRL_ALTCLK);
2776                         newbits2 = newbits1 | CLOCK_CTRL_44MHZ_CORE;
2777                 } else if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
2778                         newbits1 = CLOCK_CTRL_625_CORE;
2779                         newbits2 = newbits1 | CLOCK_CTRL_ALTCLK;
2780                 } else {
2781                         newbits1 = CLOCK_CTRL_ALTCLK;
2782                         newbits2 = newbits1 | CLOCK_CTRL_44MHZ_CORE;
2783                 }
2784
2785                 tw32_wait_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl | newbits1,
2786                             40);
2787
2788                 tw32_wait_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl | newbits2,
2789                             40);
2790
2791                 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
2792                         u32 newbits3;
2793
2794                         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
2795                             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
2796                                 newbits3 = (CLOCK_CTRL_RXCLK_DISABLE |
2797                                             CLOCK_CTRL_TXCLK_DISABLE |
2798                                             CLOCK_CTRL_44MHZ_CORE);
2799                         } else {
2800                                 newbits3 = CLOCK_CTRL_44MHZ_CORE;
2801                         }
2802
2803                         tw32_wait_f(TG3PCI_CLOCK_CTRL,
2804                                     tp->pci_clock_ctrl | newbits3, 40);
2805                 }
2806         }
2807
2808         if (!(device_should_wake) &&
2809             !(tp->tg3_flags & TG3_FLAG_ENABLE_ASF))
2810                 tg3_power_down_phy(tp, do_low_power);
2811
2812         tg3_frob_aux_power(tp);
2813
2814         /* Workaround for unstable PLL clock */
2815         if ((GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_AX) ||
2816             (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_BX)) {
2817                 u32 val = tr32(0x7d00);
2818
2819                 val &= ~((1 << 16) | (1 << 4) | (1 << 2) | (1 << 1) | 1);
2820                 tw32(0x7d00, val);
2821                 if (!(tp->tg3_flags & TG3_FLAG_ENABLE_ASF)) {
2822                         int err;
2823
2824                         err = tg3_nvram_lock(tp);
2825                         tg3_halt_cpu(tp, RX_CPU_BASE);
2826                         if (!err)
2827                                 tg3_nvram_unlock(tp);
2828                 }
2829         }
2830
2831         tg3_write_sig_post_reset(tp, RESET_KIND_SHUTDOWN);
2832
2833         return 0;
2834 }
2835
2836 static void tg3_power_down(struct tg3 *tp)
2837 {
2838         tg3_power_down_prepare(tp);
2839
2840         pci_wake_from_d3(tp->pdev, tp->tg3_flags & TG3_FLAG_WOL_ENABLE);
2841         pci_set_power_state(tp->pdev, PCI_D3hot);
2842 }
2843
2844 static void tg3_aux_stat_to_speed_duplex(struct tg3 *tp, u32 val, u16 *speed, u8 *duplex)
2845 {
2846         switch (val & MII_TG3_AUX_STAT_SPDMASK) {
2847         case MII_TG3_AUX_STAT_10HALF:
2848                 *speed = SPEED_10;
2849                 *duplex = DUPLEX_HALF;
2850                 break;
2851
2852         case MII_TG3_AUX_STAT_10FULL:
2853                 *speed = SPEED_10;
2854                 *duplex = DUPLEX_FULL;
2855                 break;
2856
2857         case MII_TG3_AUX_STAT_100HALF:
2858                 *speed = SPEED_100;
2859                 *duplex = DUPLEX_HALF;
2860                 break;
2861
2862         case MII_TG3_AUX_STAT_100FULL:
2863                 *speed = SPEED_100;
2864                 *duplex = DUPLEX_FULL;
2865                 break;
2866
2867         case MII_TG3_AUX_STAT_1000HALF:
2868                 *speed = SPEED_1000;
2869                 *duplex = DUPLEX_HALF;
2870                 break;
2871
2872         case MII_TG3_AUX_STAT_1000FULL:
2873                 *speed = SPEED_1000;
2874                 *duplex = DUPLEX_FULL;
2875                 break;
2876
2877         default:
2878                 if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
2879                         *speed = (val & MII_TG3_AUX_STAT_100) ? SPEED_100 :
2880                                  SPEED_10;
2881                         *duplex = (val & MII_TG3_AUX_STAT_FULL) ? DUPLEX_FULL :
2882                                   DUPLEX_HALF;
2883                         break;
2884                 }
2885                 *speed = SPEED_INVALID;
2886                 *duplex = DUPLEX_INVALID;
2887                 break;
2888         }
2889 }
2890
2891 static void tg3_phy_copper_begin(struct tg3 *tp)
2892 {
2893         u32 new_adv;
2894         int i;
2895
2896         if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) {
2897                 /* Entering low power mode.  Disable gigabit and
2898                  * 100baseT advertisements.
2899                  */
2900                 tg3_writephy(tp, MII_TG3_CTRL, 0);
2901
2902                 new_adv = (ADVERTISE_10HALF | ADVERTISE_10FULL |
2903                            ADVERTISE_CSMA | ADVERTISE_PAUSE_CAP);
2904                 if (tp->tg3_flags & TG3_FLAG_WOL_SPEED_100MB)
2905                         new_adv |= (ADVERTISE_100HALF | ADVERTISE_100FULL);
2906
2907                 tg3_writephy(tp, MII_ADVERTISE, new_adv);
2908         } else if (tp->link_config.speed == SPEED_INVALID) {
2909                 if (tp->phy_flags & TG3_PHYFLG_10_100_ONLY)
2910                         tp->link_config.advertising &=
2911                                 ~(ADVERTISED_1000baseT_Half |
2912                                   ADVERTISED_1000baseT_Full);
2913
2914                 new_adv = ADVERTISE_CSMA;
2915                 if (tp->link_config.advertising & ADVERTISED_10baseT_Half)
2916                         new_adv |= ADVERTISE_10HALF;
2917                 if (tp->link_config.advertising & ADVERTISED_10baseT_Full)
2918                         new_adv |= ADVERTISE_10FULL;
2919                 if (tp->link_config.advertising & ADVERTISED_100baseT_Half)
2920                         new_adv |= ADVERTISE_100HALF;
2921                 if (tp->link_config.advertising & ADVERTISED_100baseT_Full)
2922                         new_adv |= ADVERTISE_100FULL;
2923
2924                 new_adv |= tg3_advert_flowctrl_1000T(tp->link_config.flowctrl);
2925
2926                 tg3_writephy(tp, MII_ADVERTISE, new_adv);
2927
2928                 if (tp->link_config.advertising &
2929                     (ADVERTISED_1000baseT_Half | ADVERTISED_1000baseT_Full)) {
2930                         new_adv = 0;
2931                         if (tp->link_config.advertising & ADVERTISED_1000baseT_Half)
2932                                 new_adv |= MII_TG3_CTRL_ADV_1000_HALF;
2933                         if (tp->link_config.advertising & ADVERTISED_1000baseT_Full)
2934                                 new_adv |= MII_TG3_CTRL_ADV_1000_FULL;
2935                         if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY) &&
2936                             (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
2937                              tp->pci_chip_rev_id == CHIPREV_ID_5701_B0))
2938                                 new_adv |= (MII_TG3_CTRL_AS_MASTER |
2939                                             MII_TG3_CTRL_ENABLE_AS_MASTER);
2940                         tg3_writephy(tp, MII_TG3_CTRL, new_adv);
2941                 } else {
2942                         tg3_writephy(tp, MII_TG3_CTRL, 0);
2943                 }
2944         } else {
2945                 new_adv = tg3_advert_flowctrl_1000T(tp->link_config.flowctrl);
2946                 new_adv |= ADVERTISE_CSMA;
2947
2948                 /* Asking for a specific link mode. */
2949                 if (tp->link_config.speed == SPEED_1000) {
2950                         tg3_writephy(tp, MII_ADVERTISE, new_adv);
2951
2952                         if (tp->link_config.duplex == DUPLEX_FULL)
2953                                 new_adv = MII_TG3_CTRL_ADV_1000_FULL;
2954                         else
2955                                 new_adv = MII_TG3_CTRL_ADV_1000_HALF;
2956                         if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
2957                             tp->pci_chip_rev_id == CHIPREV_ID_5701_B0)
2958                                 new_adv |= (MII_TG3_CTRL_AS_MASTER |
2959                                             MII_TG3_CTRL_ENABLE_AS_MASTER);
2960                 } else {
2961                         if (tp->link_config.speed == SPEED_100) {
2962                                 if (tp->link_config.duplex == DUPLEX_FULL)
2963                                         new_adv |= ADVERTISE_100FULL;
2964                                 else
2965                                         new_adv |= ADVERTISE_100HALF;
2966                         } else {
2967                                 if (tp->link_config.duplex == DUPLEX_FULL)
2968                                         new_adv |= ADVERTISE_10FULL;
2969                                 else
2970                                         new_adv |= ADVERTISE_10HALF;
2971                         }
2972                         tg3_writephy(tp, MII_ADVERTISE, new_adv);
2973
2974                         new_adv = 0;
2975                 }
2976
2977                 tg3_writephy(tp, MII_TG3_CTRL, new_adv);
2978         }
2979
2980         if (tp->phy_flags & TG3_PHYFLG_EEE_CAP) {
2981                 u32 val;
2982
2983                 tw32(TG3_CPMU_EEE_MODE,
2984                      tr32(TG3_CPMU_EEE_MODE) & ~TG3_CPMU_EEEMD_LPI_ENABLE);
2985
2986                 /* Enable SM_DSP clock and tx 6dB coding. */
2987                 val = MII_TG3_AUXCTL_SHDWSEL_AUXCTL |
2988                       MII_TG3_AUXCTL_ACTL_SMDSP_ENA |
2989                       MII_TG3_AUXCTL_ACTL_TX_6DB;
2990                 tg3_writephy(tp, MII_TG3_AUX_CTRL, val);
2991
2992                 switch (GET_ASIC_REV(tp->pci_chip_rev_id)) {
2993                 case ASIC_REV_5717:
2994                 case ASIC_REV_57765:
2995                         if (!tg3_phydsp_read(tp, MII_TG3_DSP_CH34TP2, &val))
2996                                 tg3_phydsp_write(tp, MII_TG3_DSP_CH34TP2, val |
2997                                                  MII_TG3_DSP_CH34TP2_HIBW01);
2998                         /* Fall through */
2999                 case ASIC_REV_5719:
3000                         val = MII_TG3_DSP_TAP26_ALNOKO |
3001                               MII_TG3_DSP_TAP26_RMRXSTO |
3002                               MII_TG3_DSP_TAP26_OPCSINPT;
3003                         tg3_phydsp_write(tp, MII_TG3_DSP_TAP26, val);
3004                 }
3005
3006                 val = 0;
3007                 if (tp->link_config.autoneg == AUTONEG_ENABLE) {
3008                         /* Advertise 100-BaseTX EEE ability */
3009                         if (tp->link_config.advertising &
3010                             ADVERTISED_100baseT_Full)
3011                                 val |= MDIO_AN_EEE_ADV_100TX;
3012                         /* Advertise 1000-BaseT EEE ability */
3013                         if (tp->link_config.advertising &
3014                             ADVERTISED_1000baseT_Full)
3015                                 val |= MDIO_AN_EEE_ADV_1000T;
3016                 }
3017                 tg3_phy_cl45_write(tp, MDIO_MMD_AN, MDIO_AN_EEE_ADV, val);
3018
3019                 /* Turn off SM_DSP clock. */
3020                 val = MII_TG3_AUXCTL_SHDWSEL_AUXCTL |
3021                       MII_TG3_AUXCTL_ACTL_TX_6DB;
3022                 tg3_writephy(tp, MII_TG3_AUX_CTRL, val);
3023         }
3024
3025         if (tp->link_config.autoneg == AUTONEG_DISABLE &&
3026             tp->link_config.speed != SPEED_INVALID) {
3027                 u32 bmcr, orig_bmcr;
3028
3029                 tp->link_config.active_speed = tp->link_config.speed;
3030                 tp->link_config.active_duplex = tp->link_config.duplex;
3031
3032                 bmcr = 0;
3033                 switch (tp->link_config.speed) {
3034                 default:
3035                 case SPEED_10:
3036                         break;
3037
3038                 case SPEED_100:
3039                         bmcr |= BMCR_SPEED100;
3040                         break;
3041
3042                 case SPEED_1000:
3043                         bmcr |= TG3_BMCR_SPEED1000;
3044                         break;
3045                 }
3046
3047                 if (tp->link_config.duplex == DUPLEX_FULL)
3048                         bmcr |= BMCR_FULLDPLX;
3049
3050                 if (!tg3_readphy(tp, MII_BMCR, &orig_bmcr) &&
3051                     (bmcr != orig_bmcr)) {
3052                         tg3_writephy(tp, MII_BMCR, BMCR_LOOPBACK);
3053                         for (i = 0; i < 1500; i++) {
3054                                 u32 tmp;
3055
3056                                 udelay(10);
3057                                 if (tg3_readphy(tp, MII_BMSR, &tmp) ||
3058                                     tg3_readphy(tp, MII_BMSR, &tmp))
3059                                         continue;
3060                                 if (!(tmp & BMSR_LSTATUS)) {
3061                                         udelay(40);
3062                                         break;
3063                                 }
3064                         }
3065                         tg3_writephy(tp, MII_BMCR, bmcr);
3066                         udelay(40);
3067                 }
3068         } else {
3069                 tg3_writephy(tp, MII_BMCR,
3070                              BMCR_ANENABLE | BMCR_ANRESTART);
3071         }
3072 }
3073
3074 static int tg3_init_5401phy_dsp(struct tg3 *tp)
3075 {
3076         int err;
3077
3078         /* Turn off tap power management. */
3079         /* Set Extended packet length bit */
3080         err  = tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x4c20);
3081
3082         err |= tg3_phydsp_write(tp, 0x0012, 0x1804);
3083         err |= tg3_phydsp_write(tp, 0x0013, 0x1204);
3084         err |= tg3_phydsp_write(tp, 0x8006, 0x0132);
3085         err |= tg3_phydsp_write(tp, 0x8006, 0x0232);
3086         err |= tg3_phydsp_write(tp, 0x201f, 0x0a20);
3087
3088         udelay(40);
3089
3090         return err;
3091 }
3092
3093 static int tg3_copper_is_advertising_all(struct tg3 *tp, u32 mask)
3094 {
3095         u32 adv_reg, all_mask = 0;
3096
3097         if (mask & ADVERTISED_10baseT_Half)
3098                 all_mask |= ADVERTISE_10HALF;
3099         if (mask & ADVERTISED_10baseT_Full)
3100                 all_mask |= ADVERTISE_10FULL;
3101         if (mask & ADVERTISED_100baseT_Half)
3102                 all_mask |= ADVERTISE_100HALF;
3103         if (mask & ADVERTISED_100baseT_Full)
3104                 all_mask |= ADVERTISE_100FULL;
3105
3106         if (tg3_readphy(tp, MII_ADVERTISE, &adv_reg))
3107                 return 0;
3108
3109         if ((adv_reg & all_mask) != all_mask)
3110                 return 0;
3111         if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
3112                 u32 tg3_ctrl;
3113
3114                 all_mask = 0;
3115                 if (mask & ADVERTISED_1000baseT_Half)
3116                         all_mask |= ADVERTISE_1000HALF;
3117                 if (mask & ADVERTISED_1000baseT_Full)
3118                         all_mask |= ADVERTISE_1000FULL;
3119
3120                 if (tg3_readphy(tp, MII_TG3_CTRL, &tg3_ctrl))
3121                         return 0;
3122
3123                 if ((tg3_ctrl & all_mask) != all_mask)
3124                         return 0;
3125         }
3126         return 1;
3127 }
3128
3129 static int tg3_adv_1000T_flowctrl_ok(struct tg3 *tp, u32 *lcladv, u32 *rmtadv)
3130 {
3131         u32 curadv, reqadv;
3132
3133         if (tg3_readphy(tp, MII_ADVERTISE, lcladv))
3134                 return 1;
3135
3136         curadv = *lcladv & (ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM);
3137         reqadv = tg3_advert_flowctrl_1000T(tp->link_config.flowctrl);
3138
3139         if (tp->link_config.active_duplex == DUPLEX_FULL) {
3140                 if (curadv != reqadv)
3141                         return 0;
3142
3143                 if (tp->tg3_flags & TG3_FLAG_PAUSE_AUTONEG)
3144                         tg3_readphy(tp, MII_LPA, rmtadv);
3145         } else {
3146                 /* Reprogram the advertisement register, even if it
3147                  * does not affect the current link.  If the link
3148                  * gets renegotiated in the future, we can save an
3149                  * additional renegotiation cycle by advertising
3150                  * it correctly in the first place.
3151                  */
3152                 if (curadv != reqadv) {
3153                         *lcladv &= ~(ADVERTISE_PAUSE_CAP |
3154                                      ADVERTISE_PAUSE_ASYM);
3155                         tg3_writephy(tp, MII_ADVERTISE, *lcladv | reqadv);
3156                 }
3157         }
3158
3159         return 1;
3160 }
3161
3162 static int tg3_setup_copper_phy(struct tg3 *tp, int force_reset)
3163 {
3164         int current_link_up;
3165         u32 bmsr, val;
3166         u32 lcl_adv, rmt_adv;
3167         u16 current_speed;
3168         u8 current_duplex;
3169         int i, err;
3170
3171         tw32(MAC_EVENT, 0);
3172
3173         tw32_f(MAC_STATUS,
3174              (MAC_STATUS_SYNC_CHANGED |
3175               MAC_STATUS_CFG_CHANGED |
3176               MAC_STATUS_MI_COMPLETION |
3177               MAC_STATUS_LNKSTATE_CHANGED));
3178         udelay(40);
3179
3180         if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
3181                 tw32_f(MAC_MI_MODE,
3182                      (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
3183                 udelay(80);
3184         }
3185
3186         tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x02);
3187
3188         /* Some third-party PHYs need to be reset on link going
3189          * down.
3190          */
3191         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
3192              GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
3193              GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) &&
3194             netif_carrier_ok(tp->dev)) {
3195                 tg3_readphy(tp, MII_BMSR, &bmsr);
3196                 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
3197                     !(bmsr & BMSR_LSTATUS))
3198                         force_reset = 1;
3199         }
3200         if (force_reset)
3201                 tg3_phy_reset(tp);
3202
3203         if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) {
3204                 tg3_readphy(tp, MII_BMSR, &bmsr);
3205                 if (tg3_readphy(tp, MII_BMSR, &bmsr) ||
3206                     !(tp->tg3_flags & TG3_FLAG_INIT_COMPLETE))
3207                         bmsr = 0;
3208
3209                 if (!(bmsr & BMSR_LSTATUS)) {
3210                         err = tg3_init_5401phy_dsp(tp);
3211                         if (err)
3212                                 return err;
3213
3214                         tg3_readphy(tp, MII_BMSR, &bmsr);
3215                         for (i = 0; i < 1000; i++) {
3216                                 udelay(10);
3217                                 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
3218                                     (bmsr & BMSR_LSTATUS)) {
3219                                         udelay(40);
3220                                         break;
3221                                 }
3222                         }
3223
3224                         if ((tp->phy_id & TG3_PHY_ID_REV_MASK) ==
3225                             TG3_PHY_REV_BCM5401_B0 &&
3226                             !(bmsr & BMSR_LSTATUS) &&
3227                             tp->link_config.active_speed == SPEED_1000) {
3228                                 err = tg3_phy_reset(tp);
3229                                 if (!err)
3230                                         err = tg3_init_5401phy_dsp(tp);
3231                                 if (err)
3232                                         return err;
3233                         }
3234                 }
3235         } else if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
3236                    tp->pci_chip_rev_id == CHIPREV_ID_5701_B0) {
3237                 /* 5701 {A0,B0} CRC bug workaround */
3238                 tg3_writephy(tp, 0x15, 0x0a75);
3239                 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8c68);
3240                 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8d68);
3241                 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8c68);
3242         }
3243
3244         /* Clear pending interrupts... */
3245         tg3_readphy(tp, MII_TG3_ISTAT, &val);
3246         tg3_readphy(tp, MII_TG3_ISTAT, &val);
3247
3248         if (tp->phy_flags & TG3_PHYFLG_USE_MI_INTERRUPT)
3249                 tg3_writephy(tp, MII_TG3_IMASK, ~MII_TG3_INT_LINKCHG);
3250         else if (!(tp->phy_flags & TG3_PHYFLG_IS_FET))
3251                 tg3_writephy(tp, MII_TG3_IMASK, ~0);
3252
3253         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
3254             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
3255                 if (tp->led_ctrl == LED_CTRL_MODE_PHY_1)
3256                         tg3_writephy(tp, MII_TG3_EXT_CTRL,
3257                                      MII_TG3_EXT_CTRL_LNK3_LED_MODE);
3258                 else
3259                         tg3_writephy(tp, MII_TG3_EXT_CTRL, 0);
3260         }
3261
3262         current_link_up = 0;
3263         current_speed = SPEED_INVALID;
3264         current_duplex = DUPLEX_INVALID;
3265
3266         if (tp->phy_flags & TG3_PHYFLG_CAPACITIVE_COUPLING) {
3267                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x4007);
3268                 tg3_readphy(tp, MII_TG3_AUX_CTRL, &val);
3269                 if (!(val & (1 << 10))) {
3270                         val |= (1 << 10);
3271                         tg3_writephy(tp, MII_TG3_AUX_CTRL, val);
3272                         goto relink;
3273                 }
3274         }
3275
3276         bmsr = 0;
3277         for (i = 0; i < 100; i++) {
3278                 tg3_readphy(tp, MII_BMSR, &bmsr);
3279                 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
3280                     (bmsr & BMSR_LSTATUS))
3281                         break;
3282                 udelay(40);
3283         }
3284
3285         if (bmsr & BMSR_LSTATUS) {
3286                 u32 aux_stat, bmcr;
3287
3288                 tg3_readphy(tp, MII_TG3_AUX_STAT, &aux_stat);
3289                 for (i = 0; i < 2000; i++) {
3290                         udelay(10);
3291                         if (!tg3_readphy(tp, MII_TG3_AUX_STAT, &aux_stat) &&
3292                             aux_stat)
3293                                 break;
3294                 }
3295
3296                 tg3_aux_stat_to_speed_duplex(tp, aux_stat,
3297                                              &current_speed,
3298                                              &current_duplex);
3299
3300                 bmcr = 0;
3301                 for (i = 0; i < 200; i++) {
3302                         tg3_readphy(tp, MII_BMCR, &bmcr);
3303                         if (tg3_readphy(tp, MII_BMCR, &bmcr))
3304                                 continue;
3305                         if (bmcr && bmcr != 0x7fff)
3306                                 break;
3307                         udelay(10);
3308                 }
3309
3310                 lcl_adv = 0;
3311                 rmt_adv = 0;
3312
3313                 tp->link_config.active_speed = current_speed;
3314                 tp->link_config.active_duplex = current_duplex;
3315
3316                 if (tp->link_config.autoneg == AUTONEG_ENABLE) {
3317                         if ((bmcr & BMCR_ANENABLE) &&
3318                             tg3_copper_is_advertising_all(tp,
3319                                                 tp->link_config.advertising)) {
3320                                 if (tg3_adv_1000T_flowctrl_ok(tp, &lcl_adv,
3321                                                                   &rmt_adv))
3322                                         current_link_up = 1;
3323                         }
3324                 } else {
3325                         if (!(bmcr & BMCR_ANENABLE) &&
3326                             tp->link_config.speed == current_speed &&
3327                             tp->link_config.duplex == current_duplex &&
3328                             tp->link_config.flowctrl ==
3329                             tp->link_config.active_flowctrl) {
3330                                 current_link_up = 1;
3331                         }
3332                 }
3333
3334                 if (current_link_up == 1 &&
3335                     tp->link_config.active_duplex == DUPLEX_FULL)
3336                         tg3_setup_flow_control(tp, lcl_adv, rmt_adv);
3337         }
3338
3339 relink:
3340         if (current_link_up == 0 || (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)) {
3341                 tg3_phy_copper_begin(tp);
3342
3343                 tg3_readphy(tp, MII_BMSR, &bmsr);
3344                 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
3345                     (bmsr & BMSR_LSTATUS))
3346                         current_link_up = 1;
3347         }
3348
3349         tp->mac_mode &= ~MAC_MODE_PORT_MODE_MASK;
3350         if (current_link_up == 1) {
3351                 if (tp->link_config.active_speed == SPEED_100 ||
3352                     tp->link_config.active_speed == SPEED_10)
3353                         tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
3354                 else
3355                         tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
3356         } else if (tp->phy_flags & TG3_PHYFLG_IS_FET)
3357                 tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
3358         else
3359                 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
3360
3361         tp->mac_mode &= ~MAC_MODE_HALF_DUPLEX;
3362         if (tp->link_config.active_duplex == DUPLEX_HALF)
3363                 tp->mac_mode |= MAC_MODE_HALF_DUPLEX;
3364
3365         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700) {
3366                 if (current_link_up == 1 &&
3367                     tg3_5700_link_polarity(tp, tp->link_config.active_speed))
3368                         tp->mac_mode |= MAC_MODE_LINK_POLARITY;
3369                 else
3370                         tp->mac_mode &= ~MAC_MODE_LINK_POLARITY;
3371         }
3372
3373         /* ??? Without this setting Netgear GA302T PHY does not
3374          * ??? send/receive packets...
3375          */
3376         if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5411 &&
3377             tp->pci_chip_rev_id == CHIPREV_ID_5700_ALTIMA) {
3378                 tp->mi_mode |= MAC_MI_MODE_AUTO_POLL;
3379                 tw32_f(MAC_MI_MODE, tp->mi_mode);
3380                 udelay(80);
3381         }
3382
3383         tw32_f(MAC_MODE, tp->mac_mode);
3384         udelay(40);
3385
3386         tg3_phy_eee_adjust(tp, current_link_up);
3387
3388         if (tp->tg3_flags & TG3_FLAG_USE_LINKCHG_REG) {
3389                 /* Polled via timer. */
3390                 tw32_f(MAC_EVENT, 0);
3391         } else {
3392                 tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
3393         }
3394         udelay(40);
3395
3396         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 &&
3397             current_link_up == 1 &&
3398             tp->link_config.active_speed == SPEED_1000 &&
3399             ((tp->tg3_flags & TG3_FLAG_PCIX_MODE) ||
3400              (tp->tg3_flags & TG3_FLAG_PCI_HIGH_SPEED))) {
3401                 udelay(120);
3402                 tw32_f(MAC_STATUS,
3403                      (MAC_STATUS_SYNC_CHANGED |
3404                       MAC_STATUS_CFG_CHANGED));
3405                 udelay(40);
3406                 tg3_write_mem(tp,
3407                               NIC_SRAM_FIRMWARE_MBOX,
3408                               NIC_SRAM_FIRMWARE_MBOX_MAGIC2);
3409         }
3410
3411         /* Prevent send BD corruption. */
3412         if (tp->tg3_flags3 & TG3_FLG3_CLKREQ_BUG) {
3413                 u16 oldlnkctl, newlnkctl;
3414
3415                 pci_read_config_word(tp->pdev,
3416                                      tp->pcie_cap + PCI_EXP_LNKCTL,
3417                                      &oldlnkctl);
3418                 if (tp->link_config.active_speed == SPEED_100 ||
3419                     tp->link_config.active_speed == SPEED_10)
3420                         newlnkctl = oldlnkctl & ~PCI_EXP_LNKCTL_CLKREQ_EN;
3421                 else
3422                         newlnkctl = oldlnkctl | PCI_EXP_LNKCTL_CLKREQ_EN;
3423                 if (newlnkctl != oldlnkctl)
3424                         pci_write_config_word(tp->pdev,
3425                                               tp->pcie_cap + PCI_EXP_LNKCTL,
3426                                               newlnkctl);
3427         }
3428
3429         if (current_link_up != netif_carrier_ok(tp->dev)) {
3430                 if (current_link_up)
3431                         netif_carrier_on(tp->dev);
3432                 else
3433                         netif_carrier_off(tp->dev);
3434                 tg3_link_report(tp);
3435         }
3436
3437         return 0;
3438 }
3439
3440 struct tg3_fiber_aneginfo {
3441         int state;
3442 #define ANEG_STATE_UNKNOWN              0
3443 #define ANEG_STATE_AN_ENABLE            1
3444 #define ANEG_STATE_RESTART_INIT         2
3445 #define ANEG_STATE_RESTART              3
3446 #define ANEG_STATE_DISABLE_LINK_OK      4
3447 #define ANEG_STATE_ABILITY_DETECT_INIT  5
3448 #define ANEG_STATE_ABILITY_DETECT       6
3449 #define ANEG_STATE_ACK_DETECT_INIT      7
3450 #define ANEG_STATE_ACK_DETECT           8
3451 #define ANEG_STATE_COMPLETE_ACK_INIT    9
3452 #define ANEG_STATE_COMPLETE_ACK         10
3453 #define ANEG_STATE_IDLE_DETECT_INIT     11
3454 #define ANEG_STATE_IDLE_DETECT          12
3455 #define ANEG_STATE_LINK_OK              13
3456 #define ANEG_STATE_NEXT_PAGE_WAIT_INIT  14
3457 #define ANEG_STATE_NEXT_PAGE_WAIT       15
3458
3459         u32 flags;
3460 #define MR_AN_ENABLE            0x00000001
3461 #define MR_RESTART_AN           0x00000002
3462 #define MR_AN_COMPLETE          0x00000004
3463 #define MR_PAGE_RX              0x00000008
3464 #define MR_NP_LOADED            0x00000010
3465 #define MR_TOGGLE_TX            0x00000020
3466 #define MR_LP_ADV_FULL_DUPLEX   0x00000040
3467 #define MR_LP_ADV_HALF_DUPLEX   0x00000080
3468 #define MR_LP_ADV_SYM_PAUSE     0x00000100
3469 #define MR_LP_ADV_ASYM_PAUSE    0x00000200
3470 #define MR_LP_ADV_REMOTE_FAULT1 0x00000400
3471 #define MR_LP_ADV_REMOTE_FAULT2 0x00000800
3472 #define MR_LP_ADV_NEXT_PAGE     0x00001000
3473 #define MR_TOGGLE_RX            0x00002000
3474 #define MR_NP_RX                0x00004000
3475
3476 #define MR_LINK_OK              0x80000000
3477
3478         unsigned long link_time, cur_time;
3479
3480         u32 ability_match_cfg;
3481         int ability_match_count;
3482
3483         char ability_match, idle_match, ack_match;
3484
3485         u32 txconfig, rxconfig;
3486 #define ANEG_CFG_NP             0x00000080
3487 #define ANEG_CFG_ACK            0x00000040
3488 #define ANEG_CFG_RF2            0x00000020
3489 #define ANEG_CFG_RF1            0x00000010
3490 #define ANEG_CFG_PS2            0x00000001
3491 #define ANEG_CFG_PS1            0x00008000
3492 #define ANEG_CFG_HD             0x00004000
3493 #define ANEG_CFG_FD             0x00002000
3494 #define ANEG_CFG_INVAL          0x00001f06
3495
3496 };
3497 #define ANEG_OK         0
3498 #define ANEG_DONE       1
3499 #define ANEG_TIMER_ENAB 2
3500 #define ANEG_FAILED     -1
3501
3502 #define ANEG_STATE_SETTLE_TIME  10000
3503
3504 static int tg3_fiber_aneg_smachine(struct tg3 *tp,
3505                                    struct tg3_fiber_aneginfo *ap)
3506 {
3507         u16 flowctrl;
3508         unsigned long delta;
3509         u32 rx_cfg_reg;
3510         int ret;
3511
3512         if (ap->state == ANEG_STATE_UNKNOWN) {
3513                 ap->rxconfig = 0;
3514                 ap->link_time = 0;
3515                 ap->cur_time = 0;
3516                 ap->ability_match_cfg = 0;
3517                 ap->ability_match_count = 0;
3518                 ap->ability_match = 0;
3519                 ap->idle_match = 0;
3520                 ap->ack_match = 0;
3521         }
3522         ap->cur_time++;
3523
3524         if (tr32(MAC_STATUS) & MAC_STATUS_RCVD_CFG) {
3525                 rx_cfg_reg = tr32(MAC_RX_AUTO_NEG);
3526
3527                 if (rx_cfg_reg != ap->ability_match_cfg) {
3528                         ap->ability_match_cfg = rx_cfg_reg;
3529                         ap->ability_match = 0;
3530                         ap->ability_match_count = 0;
3531                 } else {
3532                         if (++ap->ability_match_count > 1) {
3533                                 ap->ability_match = 1;
3534                                 ap->ability_match_cfg = rx_cfg_reg;
3535                         }
3536                 }
3537                 if (rx_cfg_reg & ANEG_CFG_ACK)
3538                         ap->ack_match = 1;
3539                 else
3540                         ap->ack_match = 0;
3541
3542                 ap->idle_match = 0;
3543         } else {
3544                 ap->idle_match = 1;
3545                 ap->ability_match_cfg = 0;
3546                 ap->ability_match_count = 0;
3547                 ap->ability_match = 0;
3548                 ap->ack_match = 0;
3549
3550                 rx_cfg_reg = 0;
3551         }
3552
3553         ap->rxconfig = rx_cfg_reg;
3554         ret = ANEG_OK;
3555
3556         switch (ap->state) {
3557         case ANEG_STATE_UNKNOWN:
3558                 if (ap->flags & (MR_AN_ENABLE | MR_RESTART_AN))
3559                         ap->state = ANEG_STATE_AN_ENABLE;
3560
3561                 /* fallthru */
3562         case ANEG_STATE_AN_ENABLE:
3563                 ap->flags &= ~(MR_AN_COMPLETE | MR_PAGE_RX);
3564                 if (ap->flags & MR_AN_ENABLE) {
3565                         ap->link_time = 0;
3566                         ap->cur_time = 0;
3567                         ap->ability_match_cfg = 0;
3568                         ap->ability_match_count = 0;
3569                         ap->ability_match = 0;
3570                         ap->idle_match = 0;
3571                         ap->ack_match = 0;
3572
3573                         ap->state = ANEG_STATE_RESTART_INIT;
3574                 } else {
3575                         ap->state = ANEG_STATE_DISABLE_LINK_OK;
3576                 }
3577                 break;
3578
3579         case ANEG_STATE_RESTART_INIT:
3580                 ap->link_time = ap->cur_time;
3581                 ap->flags &= ~(MR_NP_LOADED);
3582                 ap->txconfig = 0;
3583                 tw32(MAC_TX_AUTO_NEG, 0);
3584                 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
3585                 tw32_f(MAC_MODE, tp->mac_mode);
3586                 udelay(40);
3587
3588                 ret = ANEG_TIMER_ENAB;
3589                 ap->state = ANEG_STATE_RESTART;
3590
3591                 /* fallthru */
3592         case ANEG_STATE_RESTART:
3593                 delta = ap->cur_time - ap->link_time;
3594                 if (delta > ANEG_STATE_SETTLE_TIME)
3595                         ap->state = ANEG_STATE_ABILITY_DETECT_INIT;
3596                 else
3597                         ret = ANEG_TIMER_ENAB;
3598                 break;
3599
3600         case ANEG_STATE_DISABLE_LINK_OK:
3601                 ret = ANEG_DONE;
3602                 break;
3603
3604         case ANEG_STATE_ABILITY_DETECT_INIT:
3605                 ap->flags &= ~(MR_TOGGLE_TX);
3606                 ap->txconfig = ANEG_CFG_FD;
3607                 flowctrl = tg3_advert_flowctrl_1000X(tp->link_config.flowctrl);
3608                 if (flowctrl & ADVERTISE_1000XPAUSE)
3609                         ap->txconfig |= ANEG_CFG_PS1;
3610                 if (flowctrl & ADVERTISE_1000XPSE_ASYM)
3611                         ap->txconfig |= ANEG_CFG_PS2;
3612                 tw32(MAC_TX_AUTO_NEG, ap->txconfig);
3613                 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
3614                 tw32_f(MAC_MODE, tp->mac_mode);
3615                 udelay(40);
3616
3617                 ap->state = ANEG_STATE_ABILITY_DETECT;
3618                 break;
3619
3620         case ANEG_STATE_ABILITY_DETECT:
3621                 if (ap->ability_match != 0 && ap->rxconfig != 0)
3622                         ap->state = ANEG_STATE_ACK_DETECT_INIT;
3623                 break;
3624
3625         case ANEG_STATE_ACK_DETECT_INIT:
3626                 ap->txconfig |= ANEG_CFG_ACK;
3627                 tw32(MAC_TX_AUTO_NEG, ap->txconfig);
3628                 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
3629                 tw32_f(MAC_MODE, tp->mac_mode);
3630                 udelay(40);
3631
3632                 ap->state = ANEG_STATE_ACK_DETECT;
3633
3634                 /* fallthru */
3635         case ANEG_STATE_ACK_DETECT:
3636                 if (ap->ack_match != 0) {
3637                         if ((ap->rxconfig & ~ANEG_CFG_ACK) ==
3638                             (ap->ability_match_cfg & ~ANEG_CFG_ACK)) {
3639                                 ap->state = ANEG_STATE_COMPLETE_ACK_INIT;
3640                         } else {
3641                                 ap->state = ANEG_STATE_AN_ENABLE;
3642                         }
3643                 } else if (ap->ability_match != 0 &&
3644                            ap->rxconfig == 0) {
3645                         ap->state = ANEG_STATE_AN_ENABLE;
3646                 }
3647                 break;
3648
3649         case ANEG_STATE_COMPLETE_ACK_INIT:
3650                 if (ap->rxconfig & ANEG_CFG_INVAL) {
3651                         ret = ANEG_FAILED;
3652                         break;
3653                 }
3654                 ap->flags &= ~(MR_LP_ADV_FULL_DUPLEX |
3655                                MR_LP_ADV_HALF_DUPLEX |
3656                                MR_LP_ADV_SYM_PAUSE |
3657                                MR_LP_ADV_ASYM_PAUSE |
3658                                MR_LP_ADV_REMOTE_FAULT1 |
3659                                MR_LP_ADV_REMOTE_FAULT2 |
3660                                MR_LP_ADV_NEXT_PAGE |
3661                                MR_TOGGLE_RX |
3662                                MR_NP_RX);
3663                 if (ap->rxconfig & ANEG_CFG_FD)
3664                         ap->flags |= MR_LP_ADV_FULL_DUPLEX;
3665                 if (ap->rxconfig & ANEG_CFG_HD)
3666                         ap->flags |= MR_LP_ADV_HALF_DUPLEX;
3667                 if (ap->rxconfig & ANEG_CFG_PS1)
3668                         ap->flags |= MR_LP_ADV_SYM_PAUSE;
3669                 if (ap->rxconfig & ANEG_CFG_PS2)
3670                         ap->flags |= MR_LP_ADV_ASYM_PAUSE;
3671                 if (ap->rxconfig & ANEG_CFG_RF1)
3672                         ap->flags |= MR_LP_ADV_REMOTE_FAULT1;
3673                 if (ap->rxconfig & ANEG_CFG_RF2)
3674                         ap->flags |= MR_LP_ADV_REMOTE_FAULT2;
3675                 if (ap->rxconfig & ANEG_CFG_NP)
3676                         ap->flags |= MR_LP_ADV_NEXT_PAGE;
3677
3678                 ap->link_time = ap->cur_time;
3679
3680                 ap->flags ^= (MR_TOGGLE_TX);
3681                 if (ap->rxconfig & 0x0008)
3682                         ap->flags |= MR_TOGGLE_RX;
3683                 if (ap->rxconfig & ANEG_CFG_NP)
3684                         ap->flags |= MR_NP_RX;
3685                 ap->flags |= MR_PAGE_RX;
3686
3687                 ap->state = ANEG_STATE_COMPLETE_ACK;
3688                 ret = ANEG_TIMER_ENAB;
3689                 break;
3690
3691         case ANEG_STATE_COMPLETE_ACK:
3692                 if (ap->ability_match != 0 &&
3693                     ap->rxconfig == 0) {
3694                         ap->state = ANEG_STATE_AN_ENABLE;
3695                         break;
3696                 }
3697                 delta = ap->cur_time - ap->link_time;
3698                 if (delta > ANEG_STATE_SETTLE_TIME) {
3699                         if (!(ap->flags & (MR_LP_ADV_NEXT_PAGE))) {
3700                                 ap->state = ANEG_STATE_IDLE_DETECT_INIT;
3701                         } else {
3702                                 if ((ap->txconfig & ANEG_CFG_NP) == 0 &&
3703                                     !(ap->flags & MR_NP_RX)) {
3704                                         ap->state = ANEG_STATE_IDLE_DETECT_INIT;
3705                                 } else {
3706                                         ret = ANEG_FAILED;
3707                                 }
3708                         }
3709                 }
3710                 break;
3711
3712         case ANEG_STATE_IDLE_DETECT_INIT:
3713                 ap->link_time = ap->cur_time;
3714                 tp->mac_mode &= ~MAC_MODE_SEND_CONFIGS;
3715                 tw32_f(MAC_MODE, tp->mac_mode);
3716                 udelay(40);
3717
3718                 ap->state = ANEG_STATE_IDLE_DETECT;
3719                 ret = ANEG_TIMER_ENAB;
3720                 break;
3721
3722         case ANEG_STATE_IDLE_DETECT:
3723                 if (ap->ability_match != 0 &&
3724                     ap->rxconfig == 0) {
3725                         ap->state = ANEG_STATE_AN_ENABLE;
3726                         break;
3727                 }
3728                 delta = ap->cur_time - ap->link_time;
3729                 if (delta > ANEG_STATE_SETTLE_TIME) {
3730                         /* XXX another gem from the Broadcom driver :( */
3731                         ap->state = ANEG_STATE_LINK_OK;
3732                 }
3733                 break;
3734
3735         case ANEG_STATE_LINK_OK:
3736                 ap->flags |= (MR_AN_COMPLETE | MR_LINK_OK);
3737                 ret = ANEG_DONE;
3738                 break;
3739
3740         case ANEG_STATE_NEXT_PAGE_WAIT_INIT:
3741                 /* ??? unimplemented */
3742                 break;
3743
3744         case ANEG_STATE_NEXT_PAGE_WAIT:
3745                 /* ??? unimplemented */
3746                 break;
3747
3748         default:
3749                 ret = ANEG_FAILED;
3750                 break;
3751         }
3752
3753         return ret;
3754 }
3755
3756 static int fiber_autoneg(struct tg3 *tp, u32 *txflags, u32 *rxflags)
3757 {
3758         int res = 0;
3759         struct tg3_fiber_aneginfo aninfo;
3760         int status = ANEG_FAILED;
3761         unsigned int tick;
3762         u32 tmp;
3763
3764         tw32_f(MAC_TX_AUTO_NEG, 0);
3765
3766         tmp = tp->mac_mode & ~MAC_MODE_PORT_MODE_MASK;
3767         tw32_f(MAC_MODE, tmp | MAC_MODE_PORT_MODE_GMII);
3768         udelay(40);
3769
3770         tw32_f(MAC_MODE, tp->mac_mode | MAC_MODE_SEND_CONFIGS);
3771         udelay(40);
3772
3773         memset(&aninfo, 0, sizeof(aninfo));
3774         aninfo.flags |= MR_AN_ENABLE;
3775         aninfo.state = ANEG_STATE_UNKNOWN;
3776         aninfo.cur_time = 0;
3777         tick = 0;
3778         while (++tick < 195000) {
3779                 status = tg3_fiber_aneg_smachine(tp, &aninfo);
3780                 if (status == ANEG_DONE || status == ANEG_FAILED)
3781                         break;
3782
3783                 udelay(1);
3784         }
3785
3786         tp->mac_mode &= ~MAC_MODE_SEND_CONFIGS;
3787         tw32_f(MAC_MODE, tp->mac_mode);
3788         udelay(40);
3789
3790         *txflags = aninfo.txconfig;
3791         *rxflags = aninfo.flags;
3792
3793         if (status == ANEG_DONE &&
3794             (aninfo.flags & (MR_AN_COMPLETE | MR_LINK_OK |
3795                              MR_LP_ADV_FULL_DUPLEX)))
3796                 res = 1;
3797
3798         return res;
3799 }
3800
3801 static void tg3_init_bcm8002(struct tg3 *tp)
3802 {
3803         u32 mac_status = tr32(MAC_STATUS);
3804         int i;
3805
3806         /* Reset when initting first time or we have a link. */
3807         if ((tp->tg3_flags & TG3_FLAG_INIT_COMPLETE) &&
3808             !(mac_status & MAC_STATUS_PCS_SYNCED))
3809                 return;
3810
3811         /* Set PLL lock range. */
3812         tg3_writephy(tp, 0x16, 0x8007);
3813
3814         /* SW reset */
3815         tg3_writephy(tp, MII_BMCR, BMCR_RESET);
3816
3817         /* Wait for reset to complete. */
3818         /* XXX schedule_timeout() ... */
3819         for (i = 0; i < 500; i++)
3820                 udelay(10);
3821
3822         /* Config mode; select PMA/Ch 1 regs. */
3823         tg3_writephy(tp, 0x10, 0x8411);
3824
3825         /* Enable auto-lock and comdet, select txclk for tx. */
3826         tg3_writephy(tp, 0x11, 0x0a10);
3827
3828         tg3_writephy(tp, 0x18, 0x00a0);
3829         tg3_writephy(tp, 0x16, 0x41ff);
3830
3831         /* Assert and deassert POR. */
3832         tg3_writephy(tp, 0x13, 0x0400);
3833         udelay(40);
3834         tg3_writephy(tp, 0x13, 0x0000);
3835
3836         tg3_writephy(tp, 0x11, 0x0a50);
3837         udelay(40);
3838         tg3_writephy(tp, 0x11, 0x0a10);
3839
3840         /* Wait for signal to stabilize */
3841         /* XXX schedule_timeout() ... */
3842         for (i = 0; i < 15000; i++)
3843                 udelay(10);
3844
3845         /* Deselect the channel register so we can read the PHYID
3846          * later.
3847          */
3848         tg3_writephy(tp, 0x10, 0x8011);
3849 }
3850
3851 static int tg3_setup_fiber_hw_autoneg(struct tg3 *tp, u32 mac_status)
3852 {
3853         u16 flowctrl;
3854         u32 sg_dig_ctrl, sg_dig_status;
3855         u32 serdes_cfg, expected_sg_dig_ctrl;
3856         int workaround, port_a;
3857         int current_link_up;
3858
3859         serdes_cfg = 0;
3860         expected_sg_dig_ctrl = 0;
3861         workaround = 0;
3862         port_a = 1;
3863         current_link_up = 0;
3864
3865         if (tp->pci_chip_rev_id != CHIPREV_ID_5704_A0 &&
3866             tp->pci_chip_rev_id != CHIPREV_ID_5704_A1) {
3867                 workaround = 1;
3868                 if (tr32(TG3PCI_DUAL_MAC_CTRL) & DUAL_MAC_CTRL_ID)
3869                         port_a = 0;
3870
3871                 /* preserve bits 0-11,13,14 for signal pre-emphasis */
3872                 /* preserve bits 20-23 for voltage regulator */
3873                 serdes_cfg = tr32(MAC_SERDES_CFG) & 0x00f06fff;
3874         }
3875
3876         sg_dig_ctrl = tr32(SG_DIG_CTRL);
3877
3878         if (tp->link_config.autoneg != AUTONEG_ENABLE) {
3879                 if (sg_dig_ctrl & SG_DIG_USING_HW_AUTONEG) {
3880                         if (workaround) {
3881                                 u32 val = serdes_cfg;
3882
3883                                 if (port_a)
3884                                         val |= 0xc010000;
3885                                 else
3886                                         val |= 0x4010000;
3887                                 tw32_f(MAC_SERDES_CFG, val);
3888                         }
3889
3890                         tw32_f(SG_DIG_CTRL, SG_DIG_COMMON_SETUP);
3891                 }
3892                 if (mac_status & MAC_STATUS_PCS_SYNCED) {
3893                         tg3_setup_flow_control(tp, 0, 0);
3894                         current_link_up = 1;
3895                 }
3896                 goto out;
3897         }
3898
3899         /* Want auto-negotiation.  */
3900         expected_sg_dig_ctrl = SG_DIG_USING_HW_AUTONEG | SG_DIG_COMMON_SETUP;
3901
3902         flowctrl = tg3_advert_flowctrl_1000X(tp->link_config.flowctrl);
3903         if (flowctrl & ADVERTISE_1000XPAUSE)
3904                 expected_sg_dig_ctrl |= SG_DIG_PAUSE_CAP;
3905         if (flowctrl & ADVERTISE_1000XPSE_ASYM)
3906                 expected_sg_dig_ctrl |= SG_DIG_ASYM_PAUSE;
3907
3908         if (sg_dig_ctrl != expected_sg_dig_ctrl) {
3909                 if ((tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT) &&
3910                     tp->serdes_counter &&
3911                     ((mac_status & (MAC_STATUS_PCS_SYNCED |
3912                                     MAC_STATUS_RCVD_CFG)) ==
3913                      MAC_STATUS_PCS_SYNCED)) {
3914                         tp->serdes_counter--;
3915                         current_link_up = 1;
3916                         goto out;
3917                 }
3918 restart_autoneg:
3919                 if (workaround)
3920                         tw32_f(MAC_SERDES_CFG, serdes_cfg | 0xc011000);
3921                 tw32_f(SG_DIG_CTRL, expected_sg_dig_ctrl | SG_DIG_SOFT_RESET);
3922                 udelay(5);
3923                 tw32_f(SG_DIG_CTRL, expected_sg_dig_ctrl);
3924
3925                 tp->serdes_counter = SERDES_AN_TIMEOUT_5704S;
3926                 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
3927         } else if (mac_status & (MAC_STATUS_PCS_SYNCED |
3928                                  MAC_STATUS_SIGNAL_DET)) {
3929                 sg_dig_status = tr32(SG_DIG_STATUS);
3930                 mac_status = tr32(MAC_STATUS);
3931
3932                 if ((sg_dig_status & SG_DIG_AUTONEG_COMPLETE) &&
3933                     (mac_status & MAC_STATUS_PCS_SYNCED)) {
3934                         u32 local_adv = 0, remote_adv = 0;
3935
3936                         if (sg_dig_ctrl & SG_DIG_PAUSE_CAP)
3937                                 local_adv |= ADVERTISE_1000XPAUSE;
3938                         if (sg_dig_ctrl & SG_DIG_ASYM_PAUSE)
3939                                 local_adv |= ADVERTISE_1000XPSE_ASYM;
3940
3941                         if (sg_dig_status & SG_DIG_PARTNER_PAUSE_CAPABLE)
3942                                 remote_adv |= LPA_1000XPAUSE;
3943                         if (sg_dig_status & SG_DIG_PARTNER_ASYM_PAUSE)
3944                                 remote_adv |= LPA_1000XPAUSE_ASYM;
3945
3946                         tg3_setup_flow_control(tp, local_adv, remote_adv);
3947                         current_link_up = 1;
3948                         tp->serdes_counter = 0;
3949                         tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
3950                 } else if (!(sg_dig_status & SG_DIG_AUTONEG_COMPLETE)) {
3951                         if (tp->serdes_counter)
3952                                 tp->serdes_counter--;
3953                         else {
3954                                 if (workaround) {
3955                                         u32 val = serdes_cfg;
3956
3957                                         if (port_a)
3958                                                 val |= 0xc010000;
3959                                         else
3960                                                 val |= 0x4010000;
3961
3962                                         tw32_f(MAC_SERDES_CFG, val);
3963                                 }
3964
3965                                 tw32_f(SG_DIG_CTRL, SG_DIG_COMMON_SETUP);
3966                                 udelay(40);
3967
3968                                 /* Link parallel detection - link is up */
3969                                 /* only if we have PCS_SYNC and not */
3970                                 /* receiving config code words */
3971                                 mac_status = tr32(MAC_STATUS);
3972                                 if ((mac_status & MAC_STATUS_PCS_SYNCED) &&
3973                                     !(mac_status & MAC_STATUS_RCVD_CFG)) {
3974                                         tg3_setup_flow_control(tp, 0, 0);
3975                                         current_link_up = 1;
3976                                         tp->phy_flags |=
3977                                                 TG3_PHYFLG_PARALLEL_DETECT;
3978                                         tp->serdes_counter =
3979                                                 SERDES_PARALLEL_DET_TIMEOUT;
3980                                 } else
3981                                         goto restart_autoneg;
3982                         }
3983                 }
3984         } else {
3985                 tp->serdes_counter = SERDES_AN_TIMEOUT_5704S;
3986                 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
3987         }
3988
3989 out:
3990         return current_link_up;
3991 }
3992
3993 static int tg3_setup_fiber_by_hand(struct tg3 *tp, u32 mac_status)
3994 {
3995         int current_link_up = 0;
3996
3997         if (!(mac_status & MAC_STATUS_PCS_SYNCED))
3998                 goto out;
3999
4000         if (tp->link_config.autoneg == AUTONEG_ENABLE) {
4001                 u32 txflags, rxflags;
4002                 int i;
4003
4004                 if (fiber_autoneg(tp, &txflags, &rxflags)) {
4005                         u32 local_adv = 0, remote_adv = 0;
4006
4007                         if (txflags & ANEG_CFG_PS1)
4008                                 local_adv |= ADVERTISE_1000XPAUSE;
4009                         if (txflags & ANEG_CFG_PS2)
4010                                 local_adv |= ADVERTISE_1000XPSE_ASYM;
4011
4012                         if (rxflags & MR_LP_ADV_SYM_PAUSE)
4013                                 remote_adv |= LPA_1000XPAUSE;
4014                         if (rxflags & MR_LP_ADV_ASYM_PAUSE)
4015                                 remote_adv |= LPA_1000XPAUSE_ASYM;
4016
4017                         tg3_setup_flow_control(tp, local_adv, remote_adv);
4018
4019                         current_link_up = 1;
4020                 }
4021                 for (i = 0; i < 30; i++) {
4022                         udelay(20);
4023                         tw32_f(MAC_STATUS,
4024                                (MAC_STATUS_SYNC_CHANGED |
4025                                 MAC_STATUS_CFG_CHANGED));
4026                         udelay(40);
4027                         if ((tr32(MAC_STATUS) &
4028                              (MAC_STATUS_SYNC_CHANGED |
4029                               MAC_STATUS_CFG_CHANGED)) == 0)
4030                                 break;
4031                 }
4032
4033                 mac_status = tr32(MAC_STATUS);
4034                 if (current_link_up == 0 &&
4035                     (mac_status & MAC_STATUS_PCS_SYNCED) &&
4036                     !(mac_status & MAC_STATUS_RCVD_CFG))
4037                         current_link_up = 1;
4038         } else {
4039                 tg3_setup_flow_control(tp, 0, 0);
4040
4041                 /* Forcing 1000FD link up. */
4042                 current_link_up = 1;
4043
4044                 tw32_f(MAC_MODE, (tp->mac_mode | MAC_MODE_SEND_CONFIGS));
4045                 udelay(40);
4046
4047                 tw32_f(MAC_MODE, tp->mac_mode);
4048                 udelay(40);
4049         }
4050
4051 out:
4052         return current_link_up;
4053 }
4054
4055 static int tg3_setup_fiber_phy(struct tg3 *tp, int force_reset)
4056 {
4057         u32 orig_pause_cfg;
4058         u16 orig_active_speed;
4059         u8 orig_active_duplex;
4060         u32 mac_status;
4061         int current_link_up;
4062         int i;
4063
4064         orig_pause_cfg = tp->link_config.active_flowctrl;
4065         orig_active_speed = tp->link_config.active_speed;
4066         orig_active_duplex = tp->link_config.active_duplex;
4067
4068         if (!(tp->tg3_flags2 & TG3_FLG2_HW_AUTONEG) &&
4069             netif_carrier_ok(tp->dev) &&
4070             (tp->tg3_flags & TG3_FLAG_INIT_COMPLETE)) {
4071                 mac_status = tr32(MAC_STATUS);
4072                 mac_status &= (MAC_STATUS_PCS_SYNCED |
4073                                MAC_STATUS_SIGNAL_DET |
4074                                MAC_STATUS_CFG_CHANGED |
4075                                MAC_STATUS_RCVD_CFG);
4076                 if (mac_status == (MAC_STATUS_PCS_SYNCED |
4077                                    MAC_STATUS_SIGNAL_DET)) {
4078                         tw32_f(MAC_STATUS, (MAC_STATUS_SYNC_CHANGED |
4079                                             MAC_STATUS_CFG_CHANGED));
4080                         return 0;
4081                 }
4082         }
4083
4084         tw32_f(MAC_TX_AUTO_NEG, 0);
4085
4086         tp->mac_mode &= ~(MAC_MODE_PORT_MODE_MASK | MAC_MODE_HALF_DUPLEX);
4087         tp->mac_mode |= MAC_MODE_PORT_MODE_TBI;
4088         tw32_f(MAC_MODE, tp->mac_mode);
4089         udelay(40);
4090
4091         if (tp->phy_id == TG3_PHY_ID_BCM8002)
4092                 tg3_init_bcm8002(tp);
4093
4094         /* Enable link change event even when serdes polling.  */
4095         tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
4096         udelay(40);
4097
4098         current_link_up = 0;
4099         mac_status = tr32(MAC_STATUS);
4100
4101         if (tp->tg3_flags2 & TG3_FLG2_HW_AUTONEG)
4102                 current_link_up = tg3_setup_fiber_hw_autoneg(tp, mac_status);
4103         else
4104                 current_link_up = tg3_setup_fiber_by_hand(tp, mac_status);
4105
4106         tp->napi[0].hw_status->status =
4107                 (SD_STATUS_UPDATED |
4108                  (tp->napi[0].hw_status->status & ~SD_STATUS_LINK_CHG));
4109
4110         for (i = 0; i < 100; i++) {
4111                 tw32_f(MAC_STATUS, (MAC_STATUS_SYNC_CHANGED |
4112                                     MAC_STATUS_CFG_CHANGED));
4113                 udelay(5);
4114                 if ((tr32(MAC_STATUS) & (MAC_STATUS_SYNC_CHANGED |
4115                                          MAC_STATUS_CFG_CHANGED |
4116                                          MAC_STATUS_LNKSTATE_CHANGED)) == 0)
4117                         break;
4118         }
4119
4120         mac_status = tr32(MAC_STATUS);
4121         if ((mac_status & MAC_STATUS_PCS_SYNCED) == 0) {
4122                 current_link_up = 0;
4123                 if (tp->link_config.autoneg == AUTONEG_ENABLE &&
4124                     tp->serdes_counter == 0) {
4125                         tw32_f(MAC_MODE, (tp->mac_mode |
4126                                           MAC_MODE_SEND_CONFIGS));
4127                         udelay(1);
4128                         tw32_f(MAC_MODE, tp->mac_mode);
4129                 }
4130         }
4131
4132         if (current_link_up == 1) {
4133                 tp->link_config.active_speed = SPEED_1000;
4134                 tp->link_config.active_duplex = DUPLEX_FULL;
4135                 tw32(MAC_LED_CTRL, (tp->led_ctrl |
4136                                     LED_CTRL_LNKLED_OVERRIDE |
4137                                     LED_CTRL_1000MBPS_ON));
4138         } else {
4139                 tp->link_config.active_speed = SPEED_INVALID;
4140                 tp->link_config.active_duplex = DUPLEX_INVALID;
4141                 tw32(MAC_LED_CTRL, (tp->led_ctrl |
4142                                     LED_CTRL_LNKLED_OVERRIDE |
4143                                     LED_CTRL_TRAFFIC_OVERRIDE));
4144         }
4145
4146         if (current_link_up != netif_carrier_ok(tp->dev)) {
4147                 if (current_link_up)
4148                         netif_carrier_on(tp->dev);
4149                 else
4150                         netif_carrier_off(tp->dev);
4151                 tg3_link_report(tp);
4152         } else {
4153                 u32 now_pause_cfg = tp->link_config.active_flowctrl;
4154                 if (orig_pause_cfg != now_pause_cfg ||
4155                     orig_active_speed != tp->link_config.active_speed ||
4156                     orig_active_duplex != tp->link_config.active_duplex)
4157                         tg3_link_report(tp);
4158         }
4159
4160         return 0;
4161 }
4162
4163 static int tg3_setup_fiber_mii_phy(struct tg3 *tp, int force_reset)
4164 {
4165         int current_link_up, err = 0;
4166         u32 bmsr, bmcr;
4167         u16 current_speed;
4168         u8 current_duplex;
4169         u32 local_adv, remote_adv;
4170
4171         tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
4172         tw32_f(MAC_MODE, tp->mac_mode);
4173         udelay(40);
4174
4175         tw32(MAC_EVENT, 0);
4176
4177         tw32_f(MAC_STATUS,
4178              (MAC_STATUS_SYNC_CHANGED |
4179               MAC_STATUS_CFG_CHANGED |
4180               MAC_STATUS_MI_COMPLETION |
4181               MAC_STATUS_LNKSTATE_CHANGED));
4182         udelay(40);
4183
4184         if (force_reset)
4185                 tg3_phy_reset(tp);
4186
4187         current_link_up = 0;
4188         current_speed = SPEED_INVALID;
4189         current_duplex = DUPLEX_INVALID;
4190
4191         err |= tg3_readphy(tp, MII_BMSR, &bmsr);
4192         err |= tg3_readphy(tp, MII_BMSR, &bmsr);
4193         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) {
4194                 if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
4195                         bmsr |= BMSR_LSTATUS;
4196                 else
4197                         bmsr &= ~BMSR_LSTATUS;
4198         }
4199
4200         err |= tg3_readphy(tp, MII_BMCR, &bmcr);
4201
4202         if ((tp->link_config.autoneg == AUTONEG_ENABLE) && !force_reset &&
4203             (tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT)) {
4204                 /* do nothing, just check for link up at the end */
4205         } else if (tp->link_config.autoneg == AUTONEG_ENABLE) {
4206                 u32 adv, new_adv;
4207
4208                 err |= tg3_readphy(tp, MII_ADVERTISE, &adv);
4209                 new_adv = adv & ~(ADVERTISE_1000XFULL | ADVERTISE_1000XHALF |
4210                                   ADVERTISE_1000XPAUSE |
4211                                   ADVERTISE_1000XPSE_ASYM |
4212                                   ADVERTISE_SLCT);
4213
4214                 new_adv |= tg3_advert_flowctrl_1000X(tp->link_config.flowctrl);
4215
4216                 if (tp->link_config.advertising & ADVERTISED_1000baseT_Half)
4217                         new_adv |= ADVERTISE_1000XHALF;
4218                 if (tp->link_config.advertising & ADVERTISED_1000baseT_Full)
4219                         new_adv |= ADVERTISE_1000XFULL;
4220
4221                 if ((new_adv != adv) || !(bmcr & BMCR_ANENABLE)) {
4222                         tg3_writephy(tp, MII_ADVERTISE, new_adv);
4223                         bmcr |= BMCR_ANENABLE | BMCR_ANRESTART;
4224                         tg3_writephy(tp, MII_BMCR, bmcr);
4225
4226                         tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
4227                         tp->serdes_counter = SERDES_AN_TIMEOUT_5714S;
4228                         tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
4229
4230                         return err;
4231                 }
4232         } else {
4233                 u32 new_bmcr;
4234
4235                 bmcr &= ~BMCR_SPEED1000;
4236                 new_bmcr = bmcr & ~(BMCR_ANENABLE | BMCR_FULLDPLX);
4237
4238                 if (tp->link_config.duplex == DUPLEX_FULL)
4239                         new_bmcr |= BMCR_FULLDPLX;
4240
4241                 if (new_bmcr != bmcr) {
4242                         /* BMCR_SPEED1000 is a reserved bit that needs
4243                          * to be set on write.
4244                          */
4245                         new_bmcr |= BMCR_SPEED1000;
4246
4247                         /* Force a linkdown */
4248                         if (netif_carrier_ok(tp->dev)) {
4249                                 u32 adv;
4250
4251                                 err |= tg3_readphy(tp, MII_ADVERTISE, &adv);
4252                                 adv &= ~(ADVERTISE_1000XFULL |
4253                                          ADVERTISE_1000XHALF |
4254                                          ADVERTISE_SLCT);
4255                                 tg3_writephy(tp, MII_ADVERTISE, adv);
4256                                 tg3_writephy(tp, MII_BMCR, bmcr |
4257                                                            BMCR_ANRESTART |
4258                                                            BMCR_ANENABLE);
4259                                 udelay(10);
4260                                 netif_carrier_off(tp->dev);
4261                         }
4262                         tg3_writephy(tp, MII_BMCR, new_bmcr);
4263                         bmcr = new_bmcr;
4264                         err |= tg3_readphy(tp, MII_BMSR, &bmsr);
4265                         err |= tg3_readphy(tp, MII_BMSR, &bmsr);
4266                         if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
4267                             ASIC_REV_5714) {
4268                                 if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
4269                                         bmsr |= BMSR_LSTATUS;
4270                                 else
4271                                         bmsr &= ~BMSR_LSTATUS;
4272                         }
4273                         tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
4274                 }
4275         }
4276
4277         if (bmsr & BMSR_LSTATUS) {
4278                 current_speed = SPEED_1000;
4279                 current_link_up = 1;
4280                 if (bmcr & BMCR_FULLDPLX)
4281                         current_duplex = DUPLEX_FULL;
4282                 else
4283                         current_duplex = DUPLEX_HALF;
4284
4285                 local_adv = 0;
4286                 remote_adv = 0;
4287
4288                 if (bmcr & BMCR_ANENABLE) {
4289                         u32 common;
4290
4291                         err |= tg3_readphy(tp, MII_ADVERTISE, &local_adv);
4292                         err |= tg3_readphy(tp, MII_LPA, &remote_adv);
4293                         common = local_adv & remote_adv;
4294                         if (common & (ADVERTISE_1000XHALF |
4295                                       ADVERTISE_1000XFULL)) {
4296                                 if (common & ADVERTISE_1000XFULL)
4297                                         current_duplex = DUPLEX_FULL;
4298                                 else
4299                                         current_duplex = DUPLEX_HALF;
4300                         } else if (!(tp->tg3_flags2 & TG3_FLG2_5780_CLASS)) {
4301                                 /* Link is up via parallel detect */
4302                         } else {
4303                                 current_link_up = 0;
4304                         }
4305                 }
4306         }
4307
4308         if (current_link_up == 1 && current_duplex == DUPLEX_FULL)
4309                 tg3_setup_flow_control(tp, local_adv, remote_adv);
4310
4311         tp->mac_mode &= ~MAC_MODE_HALF_DUPLEX;
4312         if (tp->link_config.active_duplex == DUPLEX_HALF)
4313                 tp->mac_mode |= MAC_MODE_HALF_DUPLEX;
4314
4315         tw32_f(MAC_MODE, tp->mac_mode);
4316         udelay(40);
4317
4318         tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
4319
4320         tp->link_config.active_speed = current_speed;
4321         tp->link_config.active_duplex = current_duplex;
4322
4323         if (current_link_up != netif_carrier_ok(tp->dev)) {
4324                 if (current_link_up)
4325                         netif_carrier_on(tp->dev);
4326                 else {
4327                         netif_carrier_off(tp->dev);
4328                         tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
4329                 }
4330                 tg3_link_report(tp);
4331         }
4332         return err;
4333 }
4334
4335 static void tg3_serdes_parallel_detect(struct tg3 *tp)
4336 {
4337         if (tp->serdes_counter) {
4338                 /* Give autoneg time to complete. */
4339                 tp->serdes_counter--;
4340                 return;
4341         }
4342
4343         if (!netif_carrier_ok(tp->dev) &&
4344             (tp->link_config.autoneg == AUTONEG_ENABLE)) {
4345                 u32 bmcr;
4346
4347                 tg3_readphy(tp, MII_BMCR, &bmcr);
4348                 if (bmcr & BMCR_ANENABLE) {
4349                         u32 phy1, phy2;
4350
4351                         /* Select shadow register 0x1f */
4352                         tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x7c00);
4353                         tg3_readphy(tp, MII_TG3_MISC_SHDW, &phy1);
4354
4355                         /* Select expansion interrupt status register */
4356                         tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
4357                                          MII_TG3_DSP_EXP1_INT_STAT);
4358                         tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &phy2);
4359                         tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &phy2);
4360
4361                         if ((phy1 & 0x10) && !(phy2 & 0x20)) {
4362                                 /* We have signal detect and not receiving
4363                                  * config code words, link is up by parallel
4364                                  * detection.
4365                                  */
4366
4367                                 bmcr &= ~BMCR_ANENABLE;
4368                                 bmcr |= BMCR_SPEED1000 | BMCR_FULLDPLX;
4369                                 tg3_writephy(tp, MII_BMCR, bmcr);
4370                                 tp->phy_flags |= TG3_PHYFLG_PARALLEL_DETECT;
4371                         }
4372                 }
4373         } else if (netif_carrier_ok(tp->dev) &&
4374                    (tp->link_config.autoneg == AUTONEG_ENABLE) &&
4375                    (tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT)) {
4376                 u32 phy2;
4377
4378                 /* Select expansion interrupt status register */
4379                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
4380                                  MII_TG3_DSP_EXP1_INT_STAT);
4381                 tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &phy2);
4382                 if (phy2 & 0x20) {
4383                         u32 bmcr;
4384
4385                         /* Config code words received, turn on autoneg. */
4386                         tg3_readphy(tp, MII_BMCR, &bmcr);
4387                         tg3_writephy(tp, MII_BMCR, bmcr | BMCR_ANENABLE);
4388
4389                         tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
4390
4391                 }
4392         }
4393 }
4394
4395 static int tg3_setup_phy(struct tg3 *tp, int force_reset)
4396 {
4397         int err;
4398
4399         if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
4400                 err = tg3_setup_fiber_phy(tp, force_reset);
4401         else if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
4402                 err = tg3_setup_fiber_mii_phy(tp, force_reset);
4403         else
4404                 err = tg3_setup_copper_phy(tp, force_reset);
4405
4406         if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5784_AX) {
4407                 u32 val, scale;
4408
4409                 val = tr32(TG3_CPMU_CLCK_STAT) & CPMU_CLCK_STAT_MAC_CLCK_MASK;
4410                 if (val == CPMU_CLCK_STAT_MAC_CLCK_62_5)
4411                         scale = 65;
4412                 else if (val == CPMU_CLCK_STAT_MAC_CLCK_6_25)
4413                         scale = 6;
4414                 else
4415                         scale = 12;
4416
4417                 val = tr32(GRC_MISC_CFG) & ~GRC_MISC_CFG_PRESCALAR_MASK;
4418                 val |= (scale << GRC_MISC_CFG_PRESCALAR_SHIFT);
4419                 tw32(GRC_MISC_CFG, val);
4420         }
4421
4422         if (tp->link_config.active_speed == SPEED_1000 &&
4423             tp->link_config.active_duplex == DUPLEX_HALF)
4424                 tw32(MAC_TX_LENGTHS,
4425                      ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
4426                       (6 << TX_LENGTHS_IPG_SHIFT) |
4427                       (0xff << TX_LENGTHS_SLOT_TIME_SHIFT)));
4428         else
4429                 tw32(MAC_TX_LENGTHS,
4430                      ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
4431                       (6 << TX_LENGTHS_IPG_SHIFT) |
4432                       (32 << TX_LENGTHS_SLOT_TIME_SHIFT)));
4433
4434         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
4435                 if (netif_carrier_ok(tp->dev)) {
4436                         tw32(HOSTCC_STAT_COAL_TICKS,
4437                              tp->coal.stats_block_coalesce_usecs);
4438                 } else {
4439                         tw32(HOSTCC_STAT_COAL_TICKS, 0);
4440                 }
4441         }
4442
4443         if (tp->tg3_flags & TG3_FLAG_ASPM_WORKAROUND) {
4444                 u32 val = tr32(PCIE_PWR_MGMT_THRESH);
4445                 if (!netif_carrier_ok(tp->dev))
4446                         val = (val & ~PCIE_PWR_MGMT_L1_THRESH_MSK) |
4447                               tp->pwrmgmt_thresh;
4448                 else
4449                         val |= PCIE_PWR_MGMT_L1_THRESH_MSK;
4450                 tw32(PCIE_PWR_MGMT_THRESH, val);
4451         }
4452
4453         return err;
4454 }
4455
4456 static inline int tg3_irq_sync(struct tg3 *tp)
4457 {
4458         return tp->irq_sync;
4459 }
4460
4461 /* This is called whenever we suspect that the system chipset is re-
4462  * ordering the sequence of MMIO to the tx send mailbox. The symptom
4463  * is bogus tx completions. We try to recover by setting the
4464  * TG3_FLAG_MBOX_WRITE_REORDER flag and resetting the chip later
4465  * in the workqueue.
4466  */
4467 static void tg3_tx_recover(struct tg3 *tp)
4468 {
4469         BUG_ON((tp->tg3_flags & TG3_FLAG_MBOX_WRITE_REORDER) ||
4470                tp->write32_tx_mbox == tg3_write_indirect_mbox);
4471
4472         netdev_warn(tp->dev,
4473                     "The system may be re-ordering memory-mapped I/O "
4474                     "cycles to the network device, attempting to recover. "
4475                     "Please report the problem to the driver maintainer "
4476                     "and include system chipset information.\n");
4477
4478         spin_lock(&tp->lock);
4479         tp->tg3_flags |= TG3_FLAG_TX_RECOVERY_PENDING;
4480         spin_unlock(&tp->lock);
4481 }
4482
4483 static inline u32 tg3_tx_avail(struct tg3_napi *tnapi)
4484 {
4485         /* Tell compiler to fetch tx indices from memory. */
4486         barrier();
4487         return tnapi->tx_pending -
4488                ((tnapi->tx_prod - tnapi->tx_cons) & (TG3_TX_RING_SIZE - 1));
4489 }
4490
4491 /* Tigon3 never reports partial packet sends.  So we do not
4492  * need special logic to handle SKBs that have not had all
4493  * of their frags sent yet, like SunGEM does.
4494  */
4495 static void tg3_tx(struct tg3_napi *tnapi)
4496 {
4497         struct tg3 *tp = tnapi->tp;
4498         u32 hw_idx = tnapi->hw_status->idx[0].tx_consumer;
4499         u32 sw_idx = tnapi->tx_cons;
4500         struct netdev_queue *txq;
4501         int index = tnapi - tp->napi;
4502
4503         if (tp->tg3_flags3 & TG3_FLG3_ENABLE_TSS)
4504                 index--;
4505
4506         txq = netdev_get_tx_queue(tp->dev, index);
4507
4508         while (sw_idx != hw_idx) {
4509                 struct ring_info *ri = &tnapi->tx_buffers[sw_idx];
4510                 struct sk_buff *skb = ri->skb;
4511                 int i, tx_bug = 0;
4512
4513                 if (unlikely(skb == NULL)) {
4514                         tg3_tx_recover(tp);
4515                         return;
4516                 }
4517
4518                 pci_unmap_single(tp->pdev,
4519                                  dma_unmap_addr(ri, mapping),
4520                                  skb_headlen(skb),
4521                                  PCI_DMA_TODEVICE);
4522
4523                 ri->skb = NULL;
4524
4525                 sw_idx = NEXT_TX(sw_idx);
4526
4527                 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
4528                         ri = &tnapi->tx_buffers[sw_idx];
4529                         if (unlikely(ri->skb != NULL || sw_idx == hw_idx))
4530                                 tx_bug = 1;
4531
4532                         pci_unmap_page(tp->pdev,
4533                                        dma_unmap_addr(ri, mapping),
4534                                        skb_shinfo(skb)->frags[i].size,
4535                                        PCI_DMA_TODEVICE);
4536                         sw_idx = NEXT_TX(sw_idx);
4537                 }
4538
4539                 dev_kfree_skb(skb);
4540
4541                 if (unlikely(tx_bug)) {
4542                         tg3_tx_recover(tp);
4543                         return;
4544                 }
4545         }
4546
4547         tnapi->tx_cons = sw_idx;
4548
4549         /* Need to make the tx_cons update visible to tg3_start_xmit()
4550          * before checking for netif_queue_stopped().  Without the
4551          * memory barrier, there is a small possibility that tg3_start_xmit()
4552          * will miss it and cause the queue to be stopped forever.
4553          */
4554         smp_mb();
4555
4556         if (unlikely(netif_tx_queue_stopped(txq) &&
4557                      (tg3_tx_avail(tnapi) > TG3_TX_WAKEUP_THRESH(tnapi)))) {
4558                 __netif_tx_lock(txq, smp_processor_id());
4559                 if (netif_tx_queue_stopped(txq) &&
4560                     (tg3_tx_avail(tnapi) > TG3_TX_WAKEUP_THRESH(tnapi)))
4561                         netif_tx_wake_queue(txq);
4562                 __netif_tx_unlock(txq);
4563         }
4564 }
4565
4566 static void tg3_rx_skb_free(struct tg3 *tp, struct ring_info *ri, u32 map_sz)
4567 {
4568         if (!ri->skb)
4569                 return;
4570
4571         pci_unmap_single(tp->pdev, dma_unmap_addr(ri, mapping),
4572                          map_sz, PCI_DMA_FROMDEVICE);
4573         dev_kfree_skb_any(ri->skb);
4574         ri->skb = NULL;
4575 }
4576
4577 /* Returns size of skb allocated or < 0 on error.
4578  *
4579  * We only need to fill in the address because the other members
4580  * of the RX descriptor are invariant, see tg3_init_rings.
4581  *
4582  * Note the purposeful assymetry of cpu vs. chip accesses.  For
4583  * posting buffers we only dirty the first cache line of the RX
4584  * descriptor (containing the address).  Whereas for the RX status
4585  * buffers the cpu only reads the last cacheline of the RX descriptor
4586  * (to fetch the error flags, vlan tag, checksum, and opaque cookie).
4587  */
4588 static int tg3_alloc_rx_skb(struct tg3 *tp, struct tg3_rx_prodring_set *tpr,
4589                             u32 opaque_key, u32 dest_idx_unmasked)
4590 {
4591         struct tg3_rx_buffer_desc *desc;
4592         struct ring_info *map;
4593         struct sk_buff *skb;
4594         dma_addr_t mapping;
4595         int skb_size, dest_idx;
4596
4597         switch (opaque_key) {
4598         case RXD_OPAQUE_RING_STD:
4599                 dest_idx = dest_idx_unmasked & tp->rx_std_ring_mask;
4600                 desc = &tpr->rx_std[dest_idx];
4601                 map = &tpr->rx_std_buffers[dest_idx];
4602                 skb_size = tp->rx_pkt_map_sz;
4603                 break;
4604
4605         case RXD_OPAQUE_RING_JUMBO:
4606                 dest_idx = dest_idx_unmasked & tp->rx_jmb_ring_mask;
4607                 desc = &tpr->rx_jmb[dest_idx].std;
4608                 map = &tpr->rx_jmb_buffers[dest_idx];
4609                 skb_size = TG3_RX_JMB_MAP_SZ;
4610                 break;
4611
4612         default:
4613                 return -EINVAL;
4614         }
4615
4616         /* Do not overwrite any of the map or rp information
4617          * until we are sure we can commit to a new buffer.
4618          *
4619          * Callers depend upon this behavior and assume that
4620          * we leave everything unchanged if we fail.
4621          */
4622         skb = netdev_alloc_skb(tp->dev, skb_size + tp->rx_offset);
4623         if (skb == NULL)
4624                 return -ENOMEM;
4625
4626         skb_reserve(skb, tp->rx_offset);
4627
4628         mapping = pci_map_single(tp->pdev, skb->data, skb_size,
4629                                  PCI_DMA_FROMDEVICE);
4630         if (pci_dma_mapping_error(tp->pdev, mapping)) {
4631                 dev_kfree_skb(skb);
4632                 return -EIO;
4633         }
4634
4635         map->skb = skb;
4636         dma_unmap_addr_set(map, mapping, mapping);
4637
4638         desc->addr_hi = ((u64)mapping >> 32);
4639         desc->addr_lo = ((u64)mapping & 0xffffffff);
4640
4641         return skb_size;
4642 }
4643
4644 /* We only need to move over in the address because the other
4645  * members of the RX descriptor are invariant.  See notes above
4646  * tg3_alloc_rx_skb for full details.
4647  */
4648 static void tg3_recycle_rx(struct tg3_napi *tnapi,
4649                            struct tg3_rx_prodring_set *dpr,
4650                            u32 opaque_key, int src_idx,
4651                            u32 dest_idx_unmasked)
4652 {
4653         struct tg3 *tp = tnapi->tp;
4654         struct tg3_rx_buffer_desc *src_desc, *dest_desc;
4655         struct ring_info *src_map, *dest_map;
4656         struct tg3_rx_prodring_set *spr = &tp->napi[0].prodring;
4657         int dest_idx;
4658
4659         switch (opaque_key) {
4660         case RXD_OPAQUE_RING_STD:
4661                 dest_idx = dest_idx_unmasked & tp->rx_std_ring_mask;
4662                 dest_desc = &dpr->rx_std[dest_idx];
4663                 dest_map = &dpr->rx_std_buffers[dest_idx];
4664                 src_desc = &spr->rx_std[src_idx];
4665                 src_map = &spr->rx_std_buffers[src_idx];
4666                 break;
4667
4668         case RXD_OPAQUE_RING_JUMBO:
4669                 dest_idx = dest_idx_unmasked & tp->rx_jmb_ring_mask;
4670                 dest_desc = &dpr->rx_jmb[dest_idx].std;
4671                 dest_map = &dpr->rx_jmb_buffers[dest_idx];
4672                 src_desc = &spr->rx_jmb[src_idx].std;
4673                 src_map = &spr->rx_jmb_buffers[src_idx];
4674                 break;
4675
4676         default:
4677                 return;
4678         }
4679
4680         dest_map->skb = src_map->skb;
4681         dma_unmap_addr_set(dest_map, mapping,
4682                            dma_unmap_addr(src_map, mapping));
4683         dest_desc->addr_hi = src_desc->addr_hi;
4684         dest_desc->addr_lo = src_desc->addr_lo;
4685
4686         /* Ensure that the update to the skb happens after the physical
4687          * addresses have been transferred to the new BD location.
4688          */
4689         smp_wmb();
4690
4691         src_map->skb = NULL;
4692 }
4693
4694 /* The RX ring scheme is composed of multiple rings which post fresh
4695  * buffers to the chip, and one special ring the chip uses to report
4696  * status back to the host.
4697  *
4698  * The special ring reports the status of received packets to the
4699  * host.  The chip does not write into the original descriptor the
4700  * RX buffer was obtained from.  The chip simply takes the original
4701  * descriptor as provided by the host, updates the status and length
4702  * field, then writes this into the next status ring entry.
4703  *
4704  * Each ring the host uses to post buffers to the chip is described
4705  * by a TG3_BDINFO entry in the chips SRAM area.  When a packet arrives,
4706  * it is first placed into the on-chip ram.  When the packet's length
4707  * is known, it walks down the TG3_BDINFO entries to select the ring.
4708  * Each TG3_BDINFO specifies a MAXLEN field and the first TG3_BDINFO
4709  * which is within the range of the new packet's length is chosen.
4710  *
4711  * The "separate ring for rx status" scheme may sound queer, but it makes
4712  * sense from a cache coherency perspective.  If only the host writes
4713  * to the buffer post rings, and only the chip writes to the rx status
4714  * rings, then cache lines never move beyond shared-modified state.
4715  * If both the host and chip were to write into the same ring, cache line
4716  * eviction could occur since both entities want it in an exclusive state.
4717  */
4718 static int tg3_rx(struct tg3_napi *tnapi, int budget)
4719 {
4720         struct tg3 *tp = tnapi->tp;
4721         u32 work_mask, rx_std_posted = 0;
4722         u32 std_prod_idx, jmb_prod_idx;
4723         u32 sw_idx = tnapi->rx_rcb_ptr;
4724         u16 hw_idx;
4725         int received;
4726         struct tg3_rx_prodring_set *tpr = &tnapi->prodring;
4727
4728         hw_idx = *(tnapi->rx_rcb_prod_idx);
4729         /*
4730          * We need to order the read of hw_idx and the read of
4731          * the opaque cookie.
4732          */
4733         rmb();
4734         work_mask = 0;
4735         received = 0;
4736         std_prod_idx = tpr->rx_std_prod_idx;
4737         jmb_prod_idx = tpr->rx_jmb_prod_idx;
4738         while (sw_idx != hw_idx && budget > 0) {
4739                 struct ring_info *ri;
4740                 struct tg3_rx_buffer_desc *desc = &tnapi->rx_rcb[sw_idx];
4741                 unsigned int len;
4742                 struct sk_buff *skb;
4743                 dma_addr_t dma_addr;
4744                 u32 opaque_key, desc_idx, *post_ptr;
4745
4746                 desc_idx = desc->opaque & RXD_OPAQUE_INDEX_MASK;
4747                 opaque_key = desc->opaque & RXD_OPAQUE_RING_MASK;
4748                 if (opaque_key == RXD_OPAQUE_RING_STD) {
4749                         ri = &tp->napi[0].prodring.rx_std_buffers[desc_idx];
4750                         dma_addr = dma_unmap_addr(ri, mapping);
4751                         skb = ri->skb;
4752                         post_ptr = &std_prod_idx;
4753                         rx_std_posted++;
4754                 } else if (opaque_key == RXD_OPAQUE_RING_JUMBO) {
4755                         ri = &tp->napi[0].prodring.rx_jmb_buffers[desc_idx];
4756                         dma_addr = dma_unmap_addr(ri, mapping);
4757                         skb = ri->skb;
4758                         post_ptr = &jmb_prod_idx;
4759                 } else
4760                         goto next_pkt_nopost;
4761
4762                 work_mask |= opaque_key;
4763
4764                 if ((desc->err_vlan & RXD_ERR_MASK) != 0 &&
4765                     (desc->err_vlan != RXD_ERR_ODD_NIBBLE_RCVD_MII)) {
4766                 drop_it:
4767                         tg3_recycle_rx(tnapi, tpr, opaque_key,
4768                                        desc_idx, *post_ptr);
4769                 drop_it_no_recycle:
4770                         /* Other statistics kept track of by card. */
4771                         tp->rx_dropped++;
4772                         goto next_pkt;
4773                 }
4774
4775                 len = ((desc->idx_len & RXD_LEN_MASK) >> RXD_LEN_SHIFT) -
4776                       ETH_FCS_LEN;
4777
4778                 if (len > TG3_RX_COPY_THRESH(tp)) {
4779                         int skb_size;
4780
4781                         skb_size = tg3_alloc_rx_skb(tp, tpr, opaque_key,
4782                                                     *post_ptr);
4783                         if (skb_size < 0)
4784                                 goto drop_it;
4785
4786                         pci_unmap_single(tp->pdev, dma_addr, skb_size,
4787                                          PCI_DMA_FROMDEVICE);
4788
4789                         /* Ensure that the update to the skb happens
4790                          * after the usage of the old DMA mapping.
4791                          */
4792                         smp_wmb();
4793
4794                         ri->skb = NULL;
4795
4796                         skb_put(skb, len);
4797                 } else {
4798                         struct sk_buff *copy_skb;
4799
4800                         tg3_recycle_rx(tnapi, tpr, opaque_key,
4801                                        desc_idx, *post_ptr);
4802
4803                         copy_skb = netdev_alloc_skb(tp->dev, len +
4804                                                     TG3_RAW_IP_ALIGN);
4805                         if (copy_skb == NULL)
4806                                 goto drop_it_no_recycle;
4807
4808                         skb_reserve(copy_skb, TG3_RAW_IP_ALIGN);
4809                         skb_put(copy_skb, len);
4810                         pci_dma_sync_single_for_cpu(tp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE);
4811                         skb_copy_from_linear_data(skb, copy_skb->data, len);
4812                         pci_dma_sync_single_for_device(tp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE);
4813
4814                         /* We'll reuse the original ring buffer. */
4815                         skb = copy_skb;
4816                 }
4817
4818                 if ((tp->tg3_flags & TG3_FLAG_RX_CHECKSUMS) &&
4819                     (desc->type_flags & RXD_FLAG_TCPUDP_CSUM) &&
4820                     (((desc->ip_tcp_csum & RXD_TCPCSUM_MASK)
4821                       >> RXD_TCPCSUM_SHIFT) == 0xffff))
4822                         skb->ip_summed = CHECKSUM_UNNECESSARY;
4823                 else
4824                         skb_checksum_none_assert(skb);
4825
4826                 skb->protocol = eth_type_trans(skb, tp->dev);
4827
4828                 if (len > (tp->dev->mtu + ETH_HLEN) &&
4829                     skb->protocol != htons(ETH_P_8021Q)) {
4830                         dev_kfree_skb(skb);
4831                         goto drop_it_no_recycle;
4832                 }
4833
4834                 if (desc->type_flags & RXD_FLAG_VLAN &&
4835                     !(tp->rx_mode & RX_MODE_KEEP_VLAN_TAG))
4836                         __vlan_hwaccel_put_tag(skb,
4837                                                desc->err_vlan & RXD_VLAN_MASK);
4838
4839                 napi_gro_receive(&tnapi->napi, skb);
4840
4841                 received++;
4842                 budget--;
4843
4844 next_pkt:
4845                 (*post_ptr)++;
4846
4847                 if (unlikely(rx_std_posted >= tp->rx_std_max_post)) {
4848                         tpr->rx_std_prod_idx = std_prod_idx &
4849                                                tp->rx_std_ring_mask;
4850                         tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG,
4851                                      tpr->rx_std_prod_idx);
4852                         work_mask &= ~RXD_OPAQUE_RING_STD;
4853                         rx_std_posted = 0;
4854                 }
4855 next_pkt_nopost:
4856                 sw_idx++;
4857                 sw_idx &= tp->rx_ret_ring_mask;
4858
4859                 /* Refresh hw_idx to see if there is new work */
4860                 if (sw_idx == hw_idx) {
4861                         hw_idx = *(tnapi->rx_rcb_prod_idx);
4862                         rmb();
4863                 }
4864         }
4865
4866         /* ACK the status ring. */
4867         tnapi->rx_rcb_ptr = sw_idx;
4868         tw32_rx_mbox(tnapi->consmbox, sw_idx);
4869
4870         /* Refill RX ring(s). */
4871         if (!(tp->tg3_flags3 & TG3_FLG3_ENABLE_RSS)) {
4872                 if (work_mask & RXD_OPAQUE_RING_STD) {
4873                         tpr->rx_std_prod_idx = std_prod_idx &
4874                                                tp->rx_std_ring_mask;
4875                         tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG,
4876                                      tpr->rx_std_prod_idx);
4877                 }
4878                 if (work_mask & RXD_OPAQUE_RING_JUMBO) {
4879                         tpr->rx_jmb_prod_idx = jmb_prod_idx &
4880                                                tp->rx_jmb_ring_mask;
4881                         tw32_rx_mbox(TG3_RX_JMB_PROD_IDX_REG,
4882                                      tpr->rx_jmb_prod_idx);
4883                 }
4884                 mmiowb();
4885         } else if (work_mask) {
4886                 /* rx_std_buffers[] and rx_jmb_buffers[] entries must be
4887                  * updated before the producer indices can be updated.
4888                  */
4889                 smp_wmb();
4890
4891                 tpr->rx_std_prod_idx = std_prod_idx & tp->rx_std_ring_mask;
4892                 tpr->rx_jmb_prod_idx = jmb_prod_idx & tp->rx_jmb_ring_mask;
4893
4894                 if (tnapi != &tp->napi[1])
4895                         napi_schedule(&tp->napi[1].napi);
4896         }
4897
4898         return received;
4899 }
4900
4901 static void tg3_poll_link(struct tg3 *tp)
4902 {
4903         /* handle link change and other phy events */
4904         if (!(tp->tg3_flags &
4905               (TG3_FLAG_USE_LINKCHG_REG |
4906                TG3_FLAG_POLL_SERDES))) {
4907                 struct tg3_hw_status *sblk = tp->napi[0].hw_status;
4908
4909                 if (sblk->status & SD_STATUS_LINK_CHG) {
4910                         sblk->status = SD_STATUS_UPDATED |
4911                                        (sblk->status & ~SD_STATUS_LINK_CHG);
4912                         spin_lock(&tp->lock);
4913                         if (tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB) {
4914                                 tw32_f(MAC_STATUS,
4915                                      (MAC_STATUS_SYNC_CHANGED |
4916                                       MAC_STATUS_CFG_CHANGED |
4917                                       MAC_STATUS_MI_COMPLETION |
4918                                       MAC_STATUS_LNKSTATE_CHANGED));
4919                                 udelay(40);
4920                         } else
4921                                 tg3_setup_phy(tp, 0);
4922                         spin_unlock(&tp->lock);
4923                 }
4924         }
4925 }
4926
4927 static int tg3_rx_prodring_xfer(struct tg3 *tp,
4928                                 struct tg3_rx_prodring_set *dpr,
4929                                 struct tg3_rx_prodring_set *spr)
4930 {
4931         u32 si, di, cpycnt, src_prod_idx;
4932         int i, err = 0;
4933
4934         while (1) {
4935                 src_prod_idx = spr->rx_std_prod_idx;
4936
4937                 /* Make sure updates to the rx_std_buffers[] entries and the
4938                  * standard producer index are seen in the correct order.
4939                  */
4940                 smp_rmb();
4941
4942                 if (spr->rx_std_cons_idx == src_prod_idx)
4943                         break;
4944
4945                 if (spr->rx_std_cons_idx < src_prod_idx)
4946                         cpycnt = src_prod_idx - spr->rx_std_cons_idx;
4947                 else
4948                         cpycnt = tp->rx_std_ring_mask + 1 -
4949                                  spr->rx_std_cons_idx;
4950
4951                 cpycnt = min(cpycnt,
4952                              tp->rx_std_ring_mask + 1 - dpr->rx_std_prod_idx);
4953
4954                 si = spr->rx_std_cons_idx;
4955                 di = dpr->rx_std_prod_idx;
4956
4957                 for (i = di; i < di + cpycnt; i++) {
4958                         if (dpr->rx_std_buffers[i].skb) {
4959                                 cpycnt = i - di;
4960                                 err = -ENOSPC;
4961                                 break;
4962                         }
4963                 }
4964
4965                 if (!cpycnt)
4966                         break;
4967
4968                 /* Ensure that updates to the rx_std_buffers ring and the
4969                  * shadowed hardware producer ring from tg3_recycle_skb() are
4970                  * ordered correctly WRT the skb check above.
4971                  */
4972                 smp_rmb();
4973
4974                 memcpy(&dpr->rx_std_buffers[di],
4975                        &spr->rx_std_buffers[si],
4976                        cpycnt * sizeof(struct ring_info));
4977
4978                 for (i = 0; i < cpycnt; i++, di++, si++) {
4979                         struct tg3_rx_buffer_desc *sbd, *dbd;
4980                         sbd = &spr->rx_std[si];
4981                         dbd = &dpr->rx_std[di];
4982                         dbd->addr_hi = sbd->addr_hi;
4983                         dbd->addr_lo = sbd->addr_lo;
4984                 }
4985
4986                 spr->rx_std_cons_idx = (spr->rx_std_cons_idx + cpycnt) &
4987                                        tp->rx_std_ring_mask;
4988                 dpr->rx_std_prod_idx = (dpr->rx_std_prod_idx + cpycnt) &
4989                                        tp->rx_std_ring_mask;
4990         }
4991
4992         while (1) {
4993                 src_prod_idx = spr->rx_jmb_prod_idx;
4994
4995                 /* Make sure updates to the rx_jmb_buffers[] entries and
4996                  * the jumbo producer index are seen in the correct order.
4997                  */
4998                 smp_rmb();
4999
5000                 if (spr->rx_jmb_cons_idx == src_prod_idx)
5001                         break;
5002
5003                 if (spr->rx_jmb_cons_idx < src_prod_idx)
5004                         cpycnt = src_prod_idx - spr->rx_jmb_cons_idx;
5005                 else
5006                         cpycnt = tp->rx_jmb_ring_mask + 1 -
5007                                  spr->rx_jmb_cons_idx;
5008
5009                 cpycnt = min(cpycnt,
5010                              tp->rx_jmb_ring_mask + 1 - dpr->rx_jmb_prod_idx);
5011
5012                 si = spr->rx_jmb_cons_idx;
5013                 di = dpr->rx_jmb_prod_idx;
5014
5015                 for (i = di; i < di + cpycnt; i++) {
5016                         if (dpr->rx_jmb_buffers[i].skb) {
5017                                 cpycnt = i - di;
5018                                 err = -ENOSPC;
5019                                 break;
5020                         }
5021                 }
5022
5023                 if (!cpycnt)
5024                         break;
5025
5026                 /* Ensure that updates to the rx_jmb_buffers ring and the
5027                  * shadowed hardware producer ring from tg3_recycle_skb() are
5028                  * ordered correctly WRT the skb check above.
5029                  */
5030                 smp_rmb();
5031
5032                 memcpy(&dpr->rx_jmb_buffers[di],
5033                        &spr->rx_jmb_buffers[si],
5034                        cpycnt * sizeof(struct ring_info));
5035
5036                 for (i = 0; i < cpycnt; i++, di++, si++) {
5037                         struct tg3_rx_buffer_desc *sbd, *dbd;
5038                         sbd = &spr->rx_jmb[si].std;
5039                         dbd = &dpr->rx_jmb[di].std;
5040                         dbd->addr_hi = sbd->addr_hi;
5041                         dbd->addr_lo = sbd->addr_lo;
5042                 }
5043
5044                 spr->rx_jmb_cons_idx = (spr->rx_jmb_cons_idx + cpycnt) &
5045                                        tp->rx_jmb_ring_mask;
5046                 dpr->rx_jmb_prod_idx = (dpr->rx_jmb_prod_idx + cpycnt) &
5047                                        tp->rx_jmb_ring_mask;
5048         }
5049
5050         return err;
5051 }
5052
5053 static int tg3_poll_work(struct tg3_napi *tnapi, int work_done, int budget)
5054 {
5055         struct tg3 *tp = tnapi->tp;
5056
5057         /* run TX completion thread */
5058         if (tnapi->hw_status->idx[0].tx_consumer != tnapi->tx_cons) {
5059                 tg3_tx(tnapi);
5060                 if (unlikely(tp->tg3_flags & TG3_FLAG_TX_RECOVERY_PENDING))
5061                         return work_done;
5062         }
5063
5064         /* run RX thread, within the bounds set by NAPI.
5065          * All RX "locking" is done by ensuring outside
5066          * code synchronizes with tg3->napi.poll()
5067          */
5068         if (*(tnapi->rx_rcb_prod_idx) != tnapi->rx_rcb_ptr)
5069                 work_done += tg3_rx(tnapi, budget - work_done);
5070
5071         if ((tp->tg3_flags3 & TG3_FLG3_ENABLE_RSS) && tnapi == &tp->napi[1]) {
5072                 struct tg3_rx_prodring_set *dpr = &tp->napi[0].prodring;
5073                 int i, err = 0;
5074                 u32 std_prod_idx = dpr->rx_std_prod_idx;
5075                 u32 jmb_prod_idx = dpr->rx_jmb_prod_idx;
5076
5077                 for (i = 1; i < tp->irq_cnt; i++)
5078                         err |= tg3_rx_prodring_xfer(tp, dpr,
5079                                                     &tp->napi[i].prodring);
5080
5081                 wmb();
5082
5083                 if (std_prod_idx != dpr->rx_std_prod_idx)
5084                         tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG,
5085                                      dpr->rx_std_prod_idx);
5086
5087                 if (jmb_prod_idx != dpr->rx_jmb_prod_idx)
5088                         tw32_rx_mbox(TG3_RX_JMB_PROD_IDX_REG,
5089                                      dpr->rx_jmb_prod_idx);
5090
5091                 mmiowb();
5092
5093                 if (err)
5094                         tw32_f(HOSTCC_MODE, tp->coal_now);
5095         }
5096
5097         return work_done;
5098 }
5099
5100 static int tg3_poll_msix(struct napi_struct *napi, int budget)
5101 {
5102         struct tg3_napi *tnapi = container_of(napi, struct tg3_napi, napi);
5103         struct tg3 *tp = tnapi->tp;
5104         int work_done = 0;
5105         struct tg3_hw_status *sblk = tnapi->hw_status;
5106
5107         while (1) {
5108                 work_done = tg3_poll_work(tnapi, work_done, budget);
5109
5110                 if (unlikely(tp->tg3_flags & TG3_FLAG_TX_RECOVERY_PENDING))
5111                         goto tx_recovery;
5112
5113                 if (unlikely(work_done >= budget))
5114                         break;
5115
5116                 /* tp->last_tag is used in tg3_int_reenable() below
5117                  * to tell the hw how much work has been processed,
5118                  * so we must read it before checking for more work.
5119                  */
5120                 tnapi->last_tag = sblk->status_tag;
5121                 tnapi->last_irq_tag = tnapi->last_tag;
5122                 rmb();
5123
5124                 /* check for RX/TX work to do */
5125                 if (likely(sblk->idx[0].tx_consumer == tnapi->tx_cons &&
5126                            *(tnapi->rx_rcb_prod_idx) == tnapi->rx_rcb_ptr)) {
5127                         napi_complete(napi);
5128                         /* Reenable interrupts. */
5129                         tw32_mailbox(tnapi->int_mbox, tnapi->last_tag << 24);
5130                         mmiowb();
5131                         break;
5132                 }
5133         }
5134
5135         return work_done;
5136
5137 tx_recovery:
5138         /* work_done is guaranteed to be less than budget. */
5139         napi_complete(napi);
5140         schedule_work(&tp->reset_task);
5141         return work_done;
5142 }
5143
5144 static int tg3_poll(struct napi_struct *napi, int budget)
5145 {
5146         struct tg3_napi *tnapi = container_of(napi, struct tg3_napi, napi);
5147         struct tg3 *tp = tnapi->tp;
5148         int work_done = 0;
5149         struct tg3_hw_status *sblk = tnapi->hw_status;
5150
5151         while (1) {
5152                 tg3_poll_link(tp);
5153
5154                 work_done = tg3_poll_work(tnapi, work_done, budget);
5155
5156                 if (unlikely(tp->tg3_flags & TG3_FLAG_TX_RECOVERY_PENDING))
5157                         goto tx_recovery;
5158
5159                 if (unlikely(work_done >= budget))
5160                         break;
5161
5162                 if (tp->tg3_flags & TG3_FLAG_TAGGED_STATUS) {
5163                         /* tp->last_tag is used in tg3_int_reenable() below
5164                          * to tell the hw how much work has been processed,
5165                          * so we must read it before checking for more work.
5166                          */
5167                         tnapi->last_tag = sblk->status_tag;
5168                         tnapi->last_irq_tag = tnapi->last_tag;
5169                         rmb();
5170                 } else
5171                         sblk->status &= ~SD_STATUS_UPDATED;
5172
5173                 if (likely(!tg3_has_work(tnapi))) {
5174                         napi_complete(napi);
5175                         tg3_int_reenable(tnapi);
5176                         break;
5177                 }
5178         }
5179
5180         return work_done;
5181
5182 tx_recovery:
5183         /* work_done is guaranteed to be less than budget. */
5184         napi_complete(napi);
5185         schedule_work(&tp->reset_task);
5186         return work_done;
5187 }
5188
5189 static void tg3_napi_disable(struct tg3 *tp)
5190 {
5191         int i;
5192
5193         for (i = tp->irq_cnt - 1; i >= 0; i--)
5194                 napi_disable(&tp->napi[i].napi);
5195 }
5196
5197 static void tg3_napi_enable(struct tg3 *tp)
5198 {
5199         int i;
5200
5201         for (i = 0; i < tp->irq_cnt; i++)
5202                 napi_enable(&tp->napi[i].napi);
5203 }
5204
5205 static void tg3_napi_init(struct tg3 *tp)
5206 {
5207         int i;
5208
5209         netif_napi_add(tp->dev, &tp->napi[0].napi, tg3_poll, 64);
5210         for (i = 1; i < tp->irq_cnt; i++)
5211                 netif_napi_add(tp->dev, &tp->napi[i].napi, tg3_poll_msix, 64);
5212 }
5213
5214 static void tg3_napi_fini(struct tg3 *tp)
5215 {
5216         int i;
5217
5218         for (i = 0; i < tp->irq_cnt; i++)
5219                 netif_napi_del(&tp->napi[i].napi);
5220 }
5221
5222 static inline void tg3_netif_stop(struct tg3 *tp)
5223 {
5224         tp->dev->trans_start = jiffies; /* prevent tx timeout */
5225         tg3_napi_disable(tp);
5226         netif_tx_disable(tp->dev);
5227 }
5228
5229 static inline void tg3_netif_start(struct tg3 *tp)
5230 {
5231         /* NOTE: unconditional netif_tx_wake_all_queues is only
5232          * appropriate so long as all callers are assured to
5233          * have free tx slots (such as after tg3_init_hw)
5234          */
5235         netif_tx_wake_all_queues(tp->dev);
5236
5237         tg3_napi_enable(tp);
5238         tp->napi[0].hw_status->status |= SD_STATUS_UPDATED;
5239         tg3_enable_ints(tp);
5240 }
5241
5242 static void tg3_irq_quiesce(struct tg3 *tp)
5243 {
5244         int i;
5245
5246         BUG_ON(tp->irq_sync);
5247
5248         tp->irq_sync = 1;
5249         smp_mb();
5250
5251         for (i = 0; i < tp->irq_cnt; i++)
5252                 synchronize_irq(tp->napi[i].irq_vec);
5253 }
5254
5255 /* Fully shutdown all tg3 driver activity elsewhere in the system.
5256  * If irq_sync is non-zero, then the IRQ handler must be synchronized
5257  * with as well.  Most of the time, this is not necessary except when
5258  * shutting down the device.
5259  */
5260 static inline void tg3_full_lock(struct tg3 *tp, int irq_sync)
5261 {
5262         spin_lock_bh(&tp->lock);
5263         if (irq_sync)
5264                 tg3_irq_quiesce(tp);
5265 }
5266
5267 static inline void tg3_full_unlock(struct tg3 *tp)
5268 {
5269         spin_unlock_bh(&tp->lock);
5270 }
5271
5272 /* One-shot MSI handler - Chip automatically disables interrupt
5273  * after sending MSI so driver doesn't have to do it.
5274  */
5275 static irqreturn_t tg3_msi_1shot(int irq, void *dev_id)
5276 {
5277         struct tg3_napi *tnapi = dev_id;
5278         struct tg3 *tp = tnapi->tp;
5279
5280         prefetch(tnapi->hw_status);
5281         if (tnapi->rx_rcb)
5282                 prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
5283
5284         if (likely(!tg3_irq_sync(tp)))
5285                 napi_schedule(&tnapi->napi);
5286
5287         return IRQ_HANDLED;
5288 }
5289
5290 /* MSI ISR - No need to check for interrupt sharing and no need to
5291  * flush status block and interrupt mailbox. PCI ordering rules
5292  * guarantee that MSI will arrive after the status block.
5293  */
5294 static irqreturn_t tg3_msi(int irq, void *dev_id)
5295 {
5296         struct tg3_napi *tnapi = dev_id;
5297         struct tg3 *tp = tnapi->tp;
5298
5299         prefetch(tnapi->hw_status);
5300         if (tnapi->rx_rcb)
5301                 prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
5302         /*
5303          * Writing any value to intr-mbox-0 clears PCI INTA# and
5304          * chip-internal interrupt pending events.
5305          * Writing non-zero to intr-mbox-0 additional tells the
5306          * NIC to stop sending us irqs, engaging "in-intr-handler"
5307          * event coalescing.
5308          */
5309         tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
5310         if (likely(!tg3_irq_sync(tp)))
5311                 napi_schedule(&tnapi->napi);
5312
5313         return IRQ_RETVAL(1);
5314 }
5315
5316 static irqreturn_t tg3_interrupt(int irq, void *dev_id)
5317 {
5318         struct tg3_napi *tnapi = dev_id;
5319         struct tg3 *tp = tnapi->tp;
5320         struct tg3_hw_status *sblk = tnapi->hw_status;
5321         unsigned int handled = 1;
5322
5323         /* In INTx mode, it is possible for the interrupt to arrive at
5324          * the CPU before the status block posted prior to the interrupt.
5325          * Reading the PCI State register will confirm whether the
5326          * interrupt is ours and will flush the status block.
5327          */
5328         if (unlikely(!(sblk->status & SD_STATUS_UPDATED))) {
5329                 if ((tp->tg3_flags & TG3_FLAG_CHIP_RESETTING) ||
5330                     (tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
5331                         handled = 0;
5332                         goto out;
5333                 }
5334         }
5335
5336         /*
5337          * Writing any value to intr-mbox-0 clears PCI INTA# and
5338          * chip-internal interrupt pending events.
5339          * Writing non-zero to intr-mbox-0 additional tells the
5340          * NIC to stop sending us irqs, engaging "in-intr-handler"
5341          * event coalescing.
5342          *
5343          * Flush the mailbox to de-assert the IRQ immediately to prevent
5344          * spurious interrupts.  The flush impacts performance but
5345          * excessive spurious interrupts can be worse in some cases.
5346          */
5347         tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
5348         if (tg3_irq_sync(tp))
5349                 goto out;
5350         sblk->status &= ~SD_STATUS_UPDATED;
5351         if (likely(tg3_has_work(tnapi))) {
5352                 prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
5353                 napi_schedule(&tnapi->napi);
5354         } else {
5355                 /* No work, shared interrupt perhaps?  re-enable
5356                  * interrupts, and flush that PCI write
5357                  */
5358                 tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
5359                                0x00000000);
5360         }
5361 out:
5362         return IRQ_RETVAL(handled);
5363 }
5364
5365 static irqreturn_t tg3_interrupt_tagged(int irq, void *dev_id)
5366 {
5367         struct tg3_napi *tnapi = dev_id;
5368         struct tg3 *tp = tnapi->tp;
5369         struct tg3_hw_status *sblk = tnapi->hw_status;
5370         unsigned int handled = 1;
5371
5372         /* In INTx mode, it is possible for the interrupt to arrive at
5373          * the CPU before the status block posted prior to the interrupt.
5374          * Reading the PCI State register will confirm whether the
5375          * interrupt is ours and will flush the status block.
5376          */
5377         if (unlikely(sblk->status_tag == tnapi->last_irq_tag)) {
5378                 if ((tp->tg3_flags & TG3_FLAG_CHIP_RESETTING) ||
5379                     (tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
5380                         handled = 0;
5381                         goto out;
5382                 }
5383         }
5384
5385         /*
5386          * writing any value to intr-mbox-0 clears PCI INTA# and
5387          * chip-internal interrupt pending events.
5388          * writing non-zero to intr-mbox-0 additional tells the
5389          * NIC to stop sending us irqs, engaging "in-intr-handler"
5390          * event coalescing.
5391          *
5392          * Flush the mailbox to de-assert the IRQ immediately to prevent
5393          * spurious interrupts.  The flush impacts performance but
5394          * excessive spurious interrupts can be worse in some cases.
5395          */
5396         tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
5397
5398         /*
5399          * In a shared interrupt configuration, sometimes other devices'
5400          * interrupts will scream.  We record the current status tag here
5401          * so that the above check can report that the screaming interrupts
5402          * are unhandled.  Eventually they will be silenced.
5403          */
5404         tnapi->last_irq_tag = sblk->status_tag;
5405
5406         if (tg3_irq_sync(tp))
5407                 goto out;
5408
5409         prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
5410
5411         napi_schedule(&tnapi->napi);
5412
5413 out:
5414         return IRQ_RETVAL(handled);
5415 }
5416
5417 /* ISR for interrupt test */
5418 static irqreturn_t tg3_test_isr(int irq, void *dev_id)
5419 {
5420         struct tg3_napi *tnapi = dev_id;
5421         struct tg3 *tp = tnapi->tp;
5422         struct tg3_hw_status *sblk = tnapi->hw_status;
5423
5424         if ((sblk->status & SD_STATUS_UPDATED) ||
5425             !(tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
5426                 tg3_disable_ints(tp);
5427                 return IRQ_RETVAL(1);
5428         }
5429         return IRQ_RETVAL(0);
5430 }
5431
5432 static int tg3_init_hw(struct tg3 *, int);
5433 static int tg3_halt(struct tg3 *, int, int);
5434
5435 /* Restart hardware after configuration changes, self-test, etc.
5436  * Invoked with tp->lock held.
5437  */
5438 static int tg3_restart_hw(struct tg3 *tp, int reset_phy)
5439         __releases(tp->lock)
5440         __acquires(tp->lock)
5441 {
5442         int err;
5443
5444         err = tg3_init_hw(tp, reset_phy);
5445         if (err) {
5446                 netdev_err(tp->dev,
5447                            "Failed to re-initialize device, aborting\n");
5448                 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
5449                 tg3_full_unlock(tp);
5450                 del_timer_sync(&tp->timer);
5451                 tp->irq_sync = 0;
5452                 tg3_napi_enable(tp);
5453                 dev_close(tp->dev);
5454                 tg3_full_lock(tp, 0);
5455         }
5456         return err;
5457 }
5458
5459 #ifdef CONFIG_NET_POLL_CONTROLLER
5460 static void tg3_poll_controller(struct net_device *dev)
5461 {
5462         int i;
5463         struct tg3 *tp = netdev_priv(dev);
5464
5465         for (i = 0; i < tp->irq_cnt; i++)
5466                 tg3_interrupt(tp->napi[i].irq_vec, &tp->napi[i]);
5467 }
5468 #endif
5469
5470 static void tg3_reset_task(struct work_struct *work)
5471 {
5472         struct tg3 *tp = container_of(work, struct tg3, reset_task);
5473         int err;
5474         unsigned int restart_timer;
5475
5476         tg3_full_lock(tp, 0);
5477
5478         if (!netif_running(tp->dev)) {
5479                 tg3_full_unlock(tp);
5480                 return;
5481         }
5482
5483         tg3_full_unlock(tp);
5484
5485         tg3_phy_stop(tp);
5486
5487         tg3_netif_stop(tp);
5488
5489         tg3_full_lock(tp, 1);
5490
5491         restart_timer = tp->tg3_flags2 & TG3_FLG2_RESTART_TIMER;
5492         tp->tg3_flags2 &= ~TG3_FLG2_RESTART_TIMER;
5493
5494         if (tp->tg3_flags & TG3_FLAG_TX_RECOVERY_PENDING) {
5495                 tp->write32_tx_mbox = tg3_write32_tx_mbox;
5496                 tp->write32_rx_mbox = tg3_write_flush_reg32;
5497                 tp->tg3_flags |= TG3_FLAG_MBOX_WRITE_REORDER;
5498                 tp->tg3_flags &= ~TG3_FLAG_TX_RECOVERY_PENDING;
5499         }
5500
5501         tg3_halt(tp, RESET_KIND_SHUTDOWN, 0);
5502         err = tg3_init_hw(tp, 1);
5503         if (err)
5504                 goto out;
5505
5506         tg3_netif_start(tp);
5507
5508         if (restart_timer)
5509                 mod_timer(&tp->timer, jiffies + 1);
5510
5511 out:
5512         tg3_full_unlock(tp);
5513
5514         if (!err)
5515                 tg3_phy_start(tp);
5516 }
5517
5518 static void tg3_dump_short_state(struct tg3 *tp)
5519 {
5520         netdev_err(tp->dev, "DEBUG: MAC_TX_STATUS[%08x] MAC_RX_STATUS[%08x]\n",
5521                    tr32(MAC_TX_STATUS), tr32(MAC_RX_STATUS));
5522         netdev_err(tp->dev, "DEBUG: RDMAC_STATUS[%08x] WDMAC_STATUS[%08x]\n",
5523                    tr32(RDMAC_STATUS), tr32(WDMAC_STATUS));
5524 }
5525
5526 static void tg3_tx_timeout(struct net_device *dev)
5527 {
5528         struct tg3 *tp = netdev_priv(dev);
5529
5530         if (netif_msg_tx_err(tp)) {
5531                 netdev_err(dev, "transmit timed out, resetting\n");
5532                 tg3_dump_short_state(tp);
5533         }
5534
5535         schedule_work(&tp->reset_task);
5536 }
5537
5538 /* Test for DMA buffers crossing any 4GB boundaries: 4G, 8G, etc */
5539 static inline int tg3_4g_overflow_test(dma_addr_t mapping, int len)
5540 {
5541         u32 base = (u32) mapping & 0xffffffff;
5542
5543         return (base > 0xffffdcc0) && (base + len + 8 < base);
5544 }
5545
5546 /* Test for DMA addresses > 40-bit */
5547 static inline int tg3_40bit_overflow_test(struct tg3 *tp, dma_addr_t mapping,
5548                                           int len)
5549 {
5550 #if defined(CONFIG_HIGHMEM) && (BITS_PER_LONG == 64)
5551         if (tp->tg3_flags & TG3_FLAG_40BIT_DMA_BUG)
5552                 return ((u64) mapping + len) > DMA_BIT_MASK(40);
5553         return 0;
5554 #else
5555         return 0;
5556 #endif
5557 }
5558
5559 static void tg3_set_txd(struct tg3_napi *, int, dma_addr_t, int, u32, u32);
5560
5561 /* Workaround 4GB and 40-bit hardware DMA bugs. */
5562 static int tigon3_dma_hwbug_workaround(struct tg3_napi *tnapi,
5563                                        struct sk_buff *skb, u32 last_plus_one,
5564                                        u32 *start, u32 base_flags, u32 mss)
5565 {
5566         struct tg3 *tp = tnapi->tp;
5567         struct sk_buff *new_skb;
5568         dma_addr_t new_addr = 0;
5569         u32 entry = *start;
5570         int i, ret = 0;
5571
5572         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701)
5573                 new_skb = skb_copy(skb, GFP_ATOMIC);
5574         else {
5575                 int more_headroom = 4 - ((unsigned long)skb->data & 3);
5576
5577                 new_skb = skb_copy_expand(skb,
5578                                           skb_headroom(skb) + more_headroom,
5579                                           skb_tailroom(skb), GFP_ATOMIC);
5580         }
5581
5582         if (!new_skb) {
5583                 ret = -1;
5584         } else {
5585                 /* New SKB is guaranteed to be linear. */
5586                 entry = *start;
5587                 new_addr = pci_map_single(tp->pdev, new_skb->data, new_skb->len,
5588                                           PCI_DMA_TODEVICE);
5589                 /* Make sure the mapping succeeded */
5590                 if (pci_dma_mapping_error(tp->pdev, new_addr)) {
5591                         ret = -1;
5592                         dev_kfree_skb(new_skb);
5593                         new_skb = NULL;
5594
5595                 /* Make sure new skb does not cross any 4G boundaries.
5596                  * Drop the packet if it does.
5597                  */
5598                 } else if ((tp->tg3_flags3 & TG3_FLG3_4G_DMA_BNDRY_BUG) &&
5599                             tg3_4g_overflow_test(new_addr, new_skb->len)) {
5600                         pci_unmap_single(tp->pdev, new_addr, new_skb->len,
5601                                          PCI_DMA_TODEVICE);
5602                         ret = -1;
5603                         dev_kfree_skb(new_skb);
5604                         new_skb = NULL;
5605                 } else {
5606                         tg3_set_txd(tnapi, entry, new_addr, new_skb->len,
5607                                     base_flags, 1 | (mss << 1));
5608                         *start = NEXT_TX(entry);
5609                 }
5610         }
5611
5612         /* Now clean up the sw ring entries. */
5613         i = 0;
5614         while (entry != last_plus_one) {
5615                 int len;
5616
5617                 if (i == 0)
5618                         len = skb_headlen(skb);
5619                 else
5620                         len = skb_shinfo(skb)->frags[i-1].size;
5621
5622                 pci_unmap_single(tp->pdev,
5623                                  dma_unmap_addr(&tnapi->tx_buffers[entry],
5624                                                 mapping),
5625                                  len, PCI_DMA_TODEVICE);
5626                 if (i == 0) {
5627                         tnapi->tx_buffers[entry].skb = new_skb;
5628                         dma_unmap_addr_set(&tnapi->tx_buffers[entry], mapping,
5629                                            new_addr);
5630                 } else {
5631                         tnapi->tx_buffers[entry].skb = NULL;
5632                 }
5633                 entry = NEXT_TX(entry);
5634                 i++;
5635         }
5636
5637         dev_kfree_skb(skb);
5638
5639         return ret;
5640 }
5641
5642 static void tg3_set_txd(struct tg3_napi *tnapi, int entry,
5643                         dma_addr_t mapping, int len, u32 flags,
5644                         u32 mss_and_is_end)
5645 {
5646         struct tg3_tx_buffer_desc *txd = &tnapi->tx_ring[entry];
5647         int is_end = (mss_and_is_end & 0x1);
5648         u32 mss = (mss_and_is_end >> 1);
5649         u32 vlan_tag = 0;
5650
5651         if (is_end)
5652                 flags |= TXD_FLAG_END;
5653         if (flags & TXD_FLAG_VLAN) {
5654                 vlan_tag = flags >> 16;
5655                 flags &= 0xffff;
5656         }
5657         vlan_tag |= (mss << TXD_MSS_SHIFT);
5658
5659         txd->addr_hi = ((u64) mapping >> 32);
5660         txd->addr_lo = ((u64) mapping & 0xffffffff);
5661         txd->len_flags = (len << TXD_LEN_SHIFT) | flags;
5662         txd->vlan_tag = vlan_tag << TXD_VLAN_TAG_SHIFT;
5663 }
5664
5665 /* hard_start_xmit for devices that don't have any bugs and
5666  * support TG3_FLG2_HW_TSO_2 and TG3_FLG2_HW_TSO_3 only.
5667  */
5668 static netdev_tx_t tg3_start_xmit(struct sk_buff *skb,
5669                                   struct net_device *dev)
5670 {
5671         struct tg3 *tp = netdev_priv(dev);
5672         u32 len, entry, base_flags, mss;
5673         dma_addr_t mapping;
5674         struct tg3_napi *tnapi;
5675         struct netdev_queue *txq;
5676         unsigned int i, last;
5677
5678         txq = netdev_get_tx_queue(dev, skb_get_queue_mapping(skb));
5679         tnapi = &tp->napi[skb_get_queue_mapping(skb)];
5680         if (tp->tg3_flags3 & TG3_FLG3_ENABLE_TSS)
5681                 tnapi++;
5682
5683         /* We are running in BH disabled context with netif_tx_lock
5684          * and TX reclaim runs via tp->napi.poll inside of a software
5685          * interrupt.  Furthermore, IRQ processing runs lockless so we have
5686          * no IRQ context deadlocks to worry about either.  Rejoice!
5687          */
5688         if (unlikely(tg3_tx_avail(tnapi) <= (skb_shinfo(skb)->nr_frags + 1))) {
5689                 if (!netif_tx_queue_stopped(txq)) {
5690                         netif_tx_stop_queue(txq);
5691
5692                         /* This is a hard error, log it. */
5693                         netdev_err(dev,
5694                                    "BUG! Tx Ring full when queue awake!\n");
5695                 }
5696                 return NETDEV_TX_BUSY;
5697         }
5698
5699         entry = tnapi->tx_prod;
5700         base_flags = 0;
5701         mss = skb_shinfo(skb)->gso_size;
5702         if (mss) {
5703                 int tcp_opt_len, ip_tcp_len;
5704                 u32 hdrlen;
5705
5706                 if (skb_header_cloned(skb) &&
5707                     pskb_expand_head(skb, 0, 0, GFP_ATOMIC)) {
5708                         dev_kfree_skb(skb);
5709                         goto out_unlock;
5710                 }
5711
5712                 if (skb_is_gso_v6(skb)) {
5713                         hdrlen = skb_headlen(skb) - ETH_HLEN;
5714                 } else {
5715                         struct iphdr *iph = ip_hdr(skb);
5716
5717                         tcp_opt_len = tcp_optlen(skb);
5718                         ip_tcp_len = ip_hdrlen(skb) + sizeof(struct tcphdr);
5719
5720                         iph->check = 0;
5721                         iph->tot_len = htons(mss + ip_tcp_len + tcp_opt_len);
5722                         hdrlen = ip_tcp_len + tcp_opt_len;
5723                 }
5724
5725                 if (tp->tg3_flags2 & TG3_FLG2_HW_TSO_3) {
5726                         mss |= (hdrlen & 0xc) << 12;
5727                         if (hdrlen & 0x10)
5728                                 base_flags |= 0x00000010;
5729                         base_flags |= (hdrlen & 0x3e0) << 5;
5730                 } else
5731                         mss |= hdrlen << 9;
5732
5733                 base_flags |= (TXD_FLAG_CPU_PRE_DMA |
5734                                TXD_FLAG_CPU_POST_DMA);
5735
5736                 tcp_hdr(skb)->check = 0;
5737
5738         } else if (skb->ip_summed == CHECKSUM_PARTIAL) {
5739                 base_flags |= TXD_FLAG_TCPUDP_CSUM;
5740         }
5741
5742         if (vlan_tx_tag_present(skb))
5743                 base_flags |= (TXD_FLAG_VLAN |
5744                                (vlan_tx_tag_get(skb) << 16));
5745
5746         len = skb_headlen(skb);
5747
5748         /* Queue skb data, a.k.a. the main skb fragment. */
5749         mapping = pci_map_single(tp->pdev, skb->data, len, PCI_DMA_TODEVICE);
5750         if (pci_dma_mapping_error(tp->pdev, mapping)) {
5751                 dev_kfree_skb(skb);
5752                 goto out_unlock;
5753         }
5754
5755         tnapi->tx_buffers[entry].skb = skb;
5756         dma_unmap_addr_set(&tnapi->tx_buffers[entry], mapping, mapping);
5757
5758         if ((tp->tg3_flags3 & TG3_FLG3_USE_JUMBO_BDFLAG) &&
5759             !mss && skb->len > VLAN_ETH_FRAME_LEN)
5760                 base_flags |= TXD_FLAG_JMB_PKT;
5761
5762         tg3_set_txd(tnapi, entry, mapping, len, base_flags,