a46c26764ba76eb779d3a054d0e0cdd7e66ce580
[linux-2.6.git] / drivers / net / tg3.c
1 /*
2  * tg3.c: Broadcom Tigon3 ethernet driver.
3  *
4  * Copyright (C) 2001, 2002, 2003, 2004 David S. Miller (davem@redhat.com)
5  * Copyright (C) 2001, 2002, 2003 Jeff Garzik (jgarzik@pobox.com)
6  * Copyright (C) 2004 Sun Microsystems Inc.
7  * Copyright (C) 2005-2010 Broadcom Corporation.
8  *
9  * Firmware is:
10  *      Derived from proprietary unpublished source code,
11  *      Copyright (C) 2000-2003 Broadcom Corporation.
12  *
13  *      Permission is hereby granted for the distribution of this firmware
14  *      data in hexadecimal or equivalent format, provided this copyright
15  *      notice is accompanying it.
16  */
17
18
19 #include <linux/module.h>
20 #include <linux/moduleparam.h>
21 #include <linux/kernel.h>
22 #include <linux/types.h>
23 #include <linux/compiler.h>
24 #include <linux/slab.h>
25 #include <linux/delay.h>
26 #include <linux/in.h>
27 #include <linux/init.h>
28 #include <linux/ioport.h>
29 #include <linux/pci.h>
30 #include <linux/netdevice.h>
31 #include <linux/etherdevice.h>
32 #include <linux/skbuff.h>
33 #include <linux/ethtool.h>
34 #include <linux/mii.h>
35 #include <linux/phy.h>
36 #include <linux/brcmphy.h>
37 #include <linux/if_vlan.h>
38 #include <linux/ip.h>
39 #include <linux/tcp.h>
40 #include <linux/workqueue.h>
41 #include <linux/prefetch.h>
42 #include <linux/dma-mapping.h>
43 #include <linux/firmware.h>
44
45 #include <net/checksum.h>
46 #include <net/ip.h>
47
48 #include <asm/system.h>
49 #include <asm/io.h>
50 #include <asm/byteorder.h>
51 #include <asm/uaccess.h>
52
53 #ifdef CONFIG_SPARC
54 #include <asm/idprom.h>
55 #include <asm/prom.h>
56 #endif
57
58 #define BAR_0   0
59 #define BAR_2   2
60
61 #if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE)
62 #define TG3_VLAN_TAG_USED 1
63 #else
64 #define TG3_VLAN_TAG_USED 0
65 #endif
66
67 #include "tg3.h"
68
69 #define DRV_MODULE_NAME         "tg3"
70 #define PFX DRV_MODULE_NAME     ": "
71 #define DRV_MODULE_VERSION      "3.106"
72 #define DRV_MODULE_RELDATE      "January 12, 2010"
73
74 #define TG3_DEF_MAC_MODE        0
75 #define TG3_DEF_RX_MODE         0
76 #define TG3_DEF_TX_MODE         0
77 #define TG3_DEF_MSG_ENABLE        \
78         (NETIF_MSG_DRV          | \
79          NETIF_MSG_PROBE        | \
80          NETIF_MSG_LINK         | \
81          NETIF_MSG_TIMER        | \
82          NETIF_MSG_IFDOWN       | \
83          NETIF_MSG_IFUP         | \
84          NETIF_MSG_RX_ERR       | \
85          NETIF_MSG_TX_ERR)
86
87 /* length of time before we decide the hardware is borked,
88  * and dev->tx_timeout() should be called to fix the problem
89  */
90 #define TG3_TX_TIMEOUT                  (5 * HZ)
91
92 /* hardware minimum and maximum for a single frame's data payload */
93 #define TG3_MIN_MTU                     60
94 #define TG3_MAX_MTU(tp) \
95         ((tp->tg3_flags & TG3_FLAG_JUMBO_CAPABLE) ? 9000 : 1500)
96
97 /* These numbers seem to be hard coded in the NIC firmware somehow.
98  * You can't change the ring sizes, but you can change where you place
99  * them in the NIC onboard memory.
100  */
101 #define TG3_RX_RING_SIZE                512
102 #define TG3_DEF_RX_RING_PENDING         200
103 #define TG3_RX_JUMBO_RING_SIZE          256
104 #define TG3_DEF_RX_JUMBO_RING_PENDING   100
105 #define TG3_RSS_INDIR_TBL_SIZE 128
106
107 /* Do not place this n-ring entries value into the tp struct itself,
108  * we really want to expose these constants to GCC so that modulo et
109  * al.  operations are done with shifts and masks instead of with
110  * hw multiply/modulo instructions.  Another solution would be to
111  * replace things like '% foo' with '& (foo - 1)'.
112  */
113 #define TG3_RX_RCB_RING_SIZE(tp)        \
114         (((tp->tg3_flags & TG3_FLAG_JUMBO_CAPABLE) && \
115           !(tp->tg3_flags2 & TG3_FLG2_5780_CLASS)) ? 1024 : 512)
116
117 #define TG3_TX_RING_SIZE                512
118 #define TG3_DEF_TX_RING_PENDING         (TG3_TX_RING_SIZE - 1)
119
120 #define TG3_RX_RING_BYTES       (sizeof(struct tg3_rx_buffer_desc) * \
121                                  TG3_RX_RING_SIZE)
122 #define TG3_RX_JUMBO_RING_BYTES (sizeof(struct tg3_ext_rx_buffer_desc) * \
123                                  TG3_RX_JUMBO_RING_SIZE)
124 #define TG3_RX_RCB_RING_BYTES(tp) (sizeof(struct tg3_rx_buffer_desc) * \
125                                  TG3_RX_RCB_RING_SIZE(tp))
126 #define TG3_TX_RING_BYTES       (sizeof(struct tg3_tx_buffer_desc) * \
127                                  TG3_TX_RING_SIZE)
128 #define NEXT_TX(N)              (((N) + 1) & (TG3_TX_RING_SIZE - 1))
129
130 #define TG3_DMA_BYTE_ENAB               64
131
132 #define TG3_RX_STD_DMA_SZ               1536
133 #define TG3_RX_JMB_DMA_SZ               9046
134
135 #define TG3_RX_DMA_TO_MAP_SZ(x)         ((x) + TG3_DMA_BYTE_ENAB)
136
137 #define TG3_RX_STD_MAP_SZ               TG3_RX_DMA_TO_MAP_SZ(TG3_RX_STD_DMA_SZ)
138 #define TG3_RX_JMB_MAP_SZ               TG3_RX_DMA_TO_MAP_SZ(TG3_RX_JMB_DMA_SZ)
139
140 #define TG3_RX_STD_BUFF_RING_SIZE \
141         (sizeof(struct ring_info) * TG3_RX_RING_SIZE)
142
143 #define TG3_RX_JMB_BUFF_RING_SIZE \
144         (sizeof(struct ring_info) * TG3_RX_JUMBO_RING_SIZE)
145
146 /* minimum number of free TX descriptors required to wake up TX process */
147 #define TG3_TX_WAKEUP_THRESH(tnapi)             ((tnapi)->tx_pending / 4)
148
149 #define TG3_RAW_IP_ALIGN 2
150
151 /* number of ETHTOOL_GSTATS u64's */
152 #define TG3_NUM_STATS           (sizeof(struct tg3_ethtool_stats)/sizeof(u64))
153
154 #define TG3_NUM_TEST            6
155
156 #define FIRMWARE_TG3            "tigon/tg3.bin"
157 #define FIRMWARE_TG3TSO         "tigon/tg3_tso.bin"
158 #define FIRMWARE_TG3TSO5        "tigon/tg3_tso5.bin"
159
160 static char version[] __devinitdata =
161         DRV_MODULE_NAME ".c:v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
162
163 MODULE_AUTHOR("David S. Miller (davem@redhat.com) and Jeff Garzik (jgarzik@pobox.com)");
164 MODULE_DESCRIPTION("Broadcom Tigon3 ethernet driver");
165 MODULE_LICENSE("GPL");
166 MODULE_VERSION(DRV_MODULE_VERSION);
167 MODULE_FIRMWARE(FIRMWARE_TG3);
168 MODULE_FIRMWARE(FIRMWARE_TG3TSO);
169 MODULE_FIRMWARE(FIRMWARE_TG3TSO5);
170
171 #define TG3_RSS_MIN_NUM_MSIX_VECS       2
172
173 static int tg3_debug = -1;      /* -1 == use TG3_DEF_MSG_ENABLE as value */
174 module_param(tg3_debug, int, 0);
175 MODULE_PARM_DESC(tg3_debug, "Tigon3 bitmapped debugging message enable value");
176
177 static DEFINE_PCI_DEVICE_TABLE(tg3_pci_tbl) = {
178         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5700)},
179         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5701)},
180         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702)},
181         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703)},
182         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704)},
183         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702FE)},
184         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705)},
185         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705_2)},
186         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705M)},
187         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705M_2)},
188         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702X)},
189         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703X)},
190         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704S)},
191         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702A3)},
192         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703A3)},
193         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5782)},
194         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5788)},
195         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5789)},
196         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5901)},
197         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5901_2)},
198         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704S_2)},
199         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705F)},
200         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5720)},
201         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5721)},
202         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5722)},
203         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5750)},
204         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751)},
205         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5750M)},
206         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751M)},
207         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751F)},
208         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5752)},
209         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5752M)},
210         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753)},
211         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753M)},
212         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753F)},
213         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5754)},
214         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5754M)},
215         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5755)},
216         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5755M)},
217         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5756)},
218         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5786)},
219         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787)},
220         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787M)},
221         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787F)},
222         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5714)},
223         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5714S)},
224         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5715)},
225         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5715S)},
226         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5780)},
227         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5780S)},
228         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5781)},
229         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5906)},
230         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5906M)},
231         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5784)},
232         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5764)},
233         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5723)},
234         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5761)},
235         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5761E)},
236         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5761S)},
237         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5761SE)},
238         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5785_G)},
239         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5785_F)},
240         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57780)},
241         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57760)},
242         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57790)},
243         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57788)},
244         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5717)},
245         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5718)},
246         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5724)},
247         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57781)},
248         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57785)},
249         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57761)},
250         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57765)},
251         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57791)},
252         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57795)},
253         {PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9DXX)},
254         {PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9MXX)},
255         {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1000)},
256         {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1001)},
257         {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1003)},
258         {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC9100)},
259         {PCI_DEVICE(PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_TIGON3)},
260         {}
261 };
262
263 MODULE_DEVICE_TABLE(pci, tg3_pci_tbl);
264
265 static const struct {
266         const char string[ETH_GSTRING_LEN];
267 } ethtool_stats_keys[TG3_NUM_STATS] = {
268         { "rx_octets" },
269         { "rx_fragments" },
270         { "rx_ucast_packets" },
271         { "rx_mcast_packets" },
272         { "rx_bcast_packets" },
273         { "rx_fcs_errors" },
274         { "rx_align_errors" },
275         { "rx_xon_pause_rcvd" },
276         { "rx_xoff_pause_rcvd" },
277         { "rx_mac_ctrl_rcvd" },
278         { "rx_xoff_entered" },
279         { "rx_frame_too_long_errors" },
280         { "rx_jabbers" },
281         { "rx_undersize_packets" },
282         { "rx_in_length_errors" },
283         { "rx_out_length_errors" },
284         { "rx_64_or_less_octet_packets" },
285         { "rx_65_to_127_octet_packets" },
286         { "rx_128_to_255_octet_packets" },
287         { "rx_256_to_511_octet_packets" },
288         { "rx_512_to_1023_octet_packets" },
289         { "rx_1024_to_1522_octet_packets" },
290         { "rx_1523_to_2047_octet_packets" },
291         { "rx_2048_to_4095_octet_packets" },
292         { "rx_4096_to_8191_octet_packets" },
293         { "rx_8192_to_9022_octet_packets" },
294
295         { "tx_octets" },
296         { "tx_collisions" },
297
298         { "tx_xon_sent" },
299         { "tx_xoff_sent" },
300         { "tx_flow_control" },
301         { "tx_mac_errors" },
302         { "tx_single_collisions" },
303         { "tx_mult_collisions" },
304         { "tx_deferred" },
305         { "tx_excessive_collisions" },
306         { "tx_late_collisions" },
307         { "tx_collide_2times" },
308         { "tx_collide_3times" },
309         { "tx_collide_4times" },
310         { "tx_collide_5times" },
311         { "tx_collide_6times" },
312         { "tx_collide_7times" },
313         { "tx_collide_8times" },
314         { "tx_collide_9times" },
315         { "tx_collide_10times" },
316         { "tx_collide_11times" },
317         { "tx_collide_12times" },
318         { "tx_collide_13times" },
319         { "tx_collide_14times" },
320         { "tx_collide_15times" },
321         { "tx_ucast_packets" },
322         { "tx_mcast_packets" },
323         { "tx_bcast_packets" },
324         { "tx_carrier_sense_errors" },
325         { "tx_discards" },
326         { "tx_errors" },
327
328         { "dma_writeq_full" },
329         { "dma_write_prioq_full" },
330         { "rxbds_empty" },
331         { "rx_discards" },
332         { "rx_errors" },
333         { "rx_threshold_hit" },
334
335         { "dma_readq_full" },
336         { "dma_read_prioq_full" },
337         { "tx_comp_queue_full" },
338
339         { "ring_set_send_prod_index" },
340         { "ring_status_update" },
341         { "nic_irqs" },
342         { "nic_avoided_irqs" },
343         { "nic_tx_threshold_hit" }
344 };
345
346 static const struct {
347         const char string[ETH_GSTRING_LEN];
348 } ethtool_test_keys[TG3_NUM_TEST] = {
349         { "nvram test     (online) " },
350         { "link test      (online) " },
351         { "register test  (offline)" },
352         { "memory test    (offline)" },
353         { "loopback test  (offline)" },
354         { "interrupt test (offline)" },
355 };
356
357 static void tg3_write32(struct tg3 *tp, u32 off, u32 val)
358 {
359         writel(val, tp->regs + off);
360 }
361
362 static u32 tg3_read32(struct tg3 *tp, u32 off)
363 {
364         return (readl(tp->regs + off));
365 }
366
367 static void tg3_ape_write32(struct tg3 *tp, u32 off, u32 val)
368 {
369         writel(val, tp->aperegs + off);
370 }
371
372 static u32 tg3_ape_read32(struct tg3 *tp, u32 off)
373 {
374         return (readl(tp->aperegs + off));
375 }
376
377 static void tg3_write_indirect_reg32(struct tg3 *tp, u32 off, u32 val)
378 {
379         unsigned long flags;
380
381         spin_lock_irqsave(&tp->indirect_lock, flags);
382         pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off);
383         pci_write_config_dword(tp->pdev, TG3PCI_REG_DATA, val);
384         spin_unlock_irqrestore(&tp->indirect_lock, flags);
385 }
386
387 static void tg3_write_flush_reg32(struct tg3 *tp, u32 off, u32 val)
388 {
389         writel(val, tp->regs + off);
390         readl(tp->regs + off);
391 }
392
393 static u32 tg3_read_indirect_reg32(struct tg3 *tp, u32 off)
394 {
395         unsigned long flags;
396         u32 val;
397
398         spin_lock_irqsave(&tp->indirect_lock, flags);
399         pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off);
400         pci_read_config_dword(tp->pdev, TG3PCI_REG_DATA, &val);
401         spin_unlock_irqrestore(&tp->indirect_lock, flags);
402         return val;
403 }
404
405 static void tg3_write_indirect_mbox(struct tg3 *tp, u32 off, u32 val)
406 {
407         unsigned long flags;
408
409         if (off == (MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW)) {
410                 pci_write_config_dword(tp->pdev, TG3PCI_RCV_RET_RING_CON_IDX +
411                                        TG3_64BIT_REG_LOW, val);
412                 return;
413         }
414         if (off == TG3_RX_STD_PROD_IDX_REG) {
415                 pci_write_config_dword(tp->pdev, TG3PCI_STD_RING_PROD_IDX +
416                                        TG3_64BIT_REG_LOW, val);
417                 return;
418         }
419
420         spin_lock_irqsave(&tp->indirect_lock, flags);
421         pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off + 0x5600);
422         pci_write_config_dword(tp->pdev, TG3PCI_REG_DATA, val);
423         spin_unlock_irqrestore(&tp->indirect_lock, flags);
424
425         /* In indirect mode when disabling interrupts, we also need
426          * to clear the interrupt bit in the GRC local ctrl register.
427          */
428         if ((off == (MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW)) &&
429             (val == 0x1)) {
430                 pci_write_config_dword(tp->pdev, TG3PCI_MISC_LOCAL_CTRL,
431                                        tp->grc_local_ctrl|GRC_LCLCTRL_CLEARINT);
432         }
433 }
434
435 static u32 tg3_read_indirect_mbox(struct tg3 *tp, u32 off)
436 {
437         unsigned long flags;
438         u32 val;
439
440         spin_lock_irqsave(&tp->indirect_lock, flags);
441         pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off + 0x5600);
442         pci_read_config_dword(tp->pdev, TG3PCI_REG_DATA, &val);
443         spin_unlock_irqrestore(&tp->indirect_lock, flags);
444         return val;
445 }
446
447 /* usec_wait specifies the wait time in usec when writing to certain registers
448  * where it is unsafe to read back the register without some delay.
449  * GRC_LOCAL_CTRL is one example if the GPIOs are toggled to switch power.
450  * TG3PCI_CLOCK_CTRL is another example if the clock frequencies are changed.
451  */
452 static void _tw32_flush(struct tg3 *tp, u32 off, u32 val, u32 usec_wait)
453 {
454         if ((tp->tg3_flags & TG3_FLAG_PCIX_TARGET_HWBUG) ||
455             (tp->tg3_flags2 & TG3_FLG2_ICH_WORKAROUND))
456                 /* Non-posted methods */
457                 tp->write32(tp, off, val);
458         else {
459                 /* Posted method */
460                 tg3_write32(tp, off, val);
461                 if (usec_wait)
462                         udelay(usec_wait);
463                 tp->read32(tp, off);
464         }
465         /* Wait again after the read for the posted method to guarantee that
466          * the wait time is met.
467          */
468         if (usec_wait)
469                 udelay(usec_wait);
470 }
471
472 static inline void tw32_mailbox_flush(struct tg3 *tp, u32 off, u32 val)
473 {
474         tp->write32_mbox(tp, off, val);
475         if (!(tp->tg3_flags & TG3_FLAG_MBOX_WRITE_REORDER) &&
476             !(tp->tg3_flags2 & TG3_FLG2_ICH_WORKAROUND))
477                 tp->read32_mbox(tp, off);
478 }
479
480 static void tg3_write32_tx_mbox(struct tg3 *tp, u32 off, u32 val)
481 {
482         void __iomem *mbox = tp->regs + off;
483         writel(val, mbox);
484         if (tp->tg3_flags & TG3_FLAG_TXD_MBOX_HWBUG)
485                 writel(val, mbox);
486         if (tp->tg3_flags & TG3_FLAG_MBOX_WRITE_REORDER)
487                 readl(mbox);
488 }
489
490 static u32 tg3_read32_mbox_5906(struct tg3 *tp, u32 off)
491 {
492         return (readl(tp->regs + off + GRCMBOX_BASE));
493 }
494
495 static void tg3_write32_mbox_5906(struct tg3 *tp, u32 off, u32 val)
496 {
497         writel(val, tp->regs + off + GRCMBOX_BASE);
498 }
499
500 #define tw32_mailbox(reg, val)  tp->write32_mbox(tp, reg, val)
501 #define tw32_mailbox_f(reg, val)        tw32_mailbox_flush(tp, (reg), (val))
502 #define tw32_rx_mbox(reg, val)  tp->write32_rx_mbox(tp, reg, val)
503 #define tw32_tx_mbox(reg, val)  tp->write32_tx_mbox(tp, reg, val)
504 #define tr32_mailbox(reg)       tp->read32_mbox(tp, reg)
505
506 #define tw32(reg,val)           tp->write32(tp, reg, val)
507 #define tw32_f(reg,val)         _tw32_flush(tp,(reg),(val), 0)
508 #define tw32_wait_f(reg,val,us) _tw32_flush(tp,(reg),(val), (us))
509 #define tr32(reg)               tp->read32(tp, reg)
510
511 static void tg3_write_mem(struct tg3 *tp, u32 off, u32 val)
512 {
513         unsigned long flags;
514
515         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) &&
516             (off >= NIC_SRAM_STATS_BLK) && (off < NIC_SRAM_TX_BUFFER_DESC))
517                 return;
518
519         spin_lock_irqsave(&tp->indirect_lock, flags);
520         if (tp->tg3_flags & TG3_FLAG_SRAM_USE_CONFIG) {
521                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off);
522                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
523
524                 /* Always leave this as zero. */
525                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
526         } else {
527                 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, off);
528                 tw32_f(TG3PCI_MEM_WIN_DATA, val);
529
530                 /* Always leave this as zero. */
531                 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, 0);
532         }
533         spin_unlock_irqrestore(&tp->indirect_lock, flags);
534 }
535
536 static void tg3_read_mem(struct tg3 *tp, u32 off, u32 *val)
537 {
538         unsigned long flags;
539
540         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) &&
541             (off >= NIC_SRAM_STATS_BLK) && (off < NIC_SRAM_TX_BUFFER_DESC)) {
542                 *val = 0;
543                 return;
544         }
545
546         spin_lock_irqsave(&tp->indirect_lock, flags);
547         if (tp->tg3_flags & TG3_FLAG_SRAM_USE_CONFIG) {
548                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off);
549                 pci_read_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
550
551                 /* Always leave this as zero. */
552                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
553         } else {
554                 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, off);
555                 *val = tr32(TG3PCI_MEM_WIN_DATA);
556
557                 /* Always leave this as zero. */
558                 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, 0);
559         }
560         spin_unlock_irqrestore(&tp->indirect_lock, flags);
561 }
562
563 static void tg3_ape_lock_init(struct tg3 *tp)
564 {
565         int i;
566
567         /* Make sure the driver hasn't any stale locks. */
568         for (i = 0; i < 8; i++)
569                 tg3_ape_write32(tp, TG3_APE_LOCK_GRANT + 4 * i,
570                                 APE_LOCK_GRANT_DRIVER);
571 }
572
573 static int tg3_ape_lock(struct tg3 *tp, int locknum)
574 {
575         int i, off;
576         int ret = 0;
577         u32 status;
578
579         if (!(tp->tg3_flags3 & TG3_FLG3_ENABLE_APE))
580                 return 0;
581
582         switch (locknum) {
583                 case TG3_APE_LOCK_GRC:
584                 case TG3_APE_LOCK_MEM:
585                         break;
586                 default:
587                         return -EINVAL;
588         }
589
590         off = 4 * locknum;
591
592         tg3_ape_write32(tp, TG3_APE_LOCK_REQ + off, APE_LOCK_REQ_DRIVER);
593
594         /* Wait for up to 1 millisecond to acquire lock. */
595         for (i = 0; i < 100; i++) {
596                 status = tg3_ape_read32(tp, TG3_APE_LOCK_GRANT + off);
597                 if (status == APE_LOCK_GRANT_DRIVER)
598                         break;
599                 udelay(10);
600         }
601
602         if (status != APE_LOCK_GRANT_DRIVER) {
603                 /* Revoke the lock request. */
604                 tg3_ape_write32(tp, TG3_APE_LOCK_GRANT + off,
605                                 APE_LOCK_GRANT_DRIVER);
606
607                 ret = -EBUSY;
608         }
609
610         return ret;
611 }
612
613 static void tg3_ape_unlock(struct tg3 *tp, int locknum)
614 {
615         int off;
616
617         if (!(tp->tg3_flags3 & TG3_FLG3_ENABLE_APE))
618                 return;
619
620         switch (locknum) {
621                 case TG3_APE_LOCK_GRC:
622                 case TG3_APE_LOCK_MEM:
623                         break;
624                 default:
625                         return;
626         }
627
628         off = 4 * locknum;
629         tg3_ape_write32(tp, TG3_APE_LOCK_GRANT + off, APE_LOCK_GRANT_DRIVER);
630 }
631
632 static void tg3_disable_ints(struct tg3 *tp)
633 {
634         int i;
635
636         tw32(TG3PCI_MISC_HOST_CTRL,
637              (tp->misc_host_ctrl | MISC_HOST_CTRL_MASK_PCI_INT));
638         for (i = 0; i < tp->irq_max; i++)
639                 tw32_mailbox_f(tp->napi[i].int_mbox, 0x00000001);
640 }
641
642 static void tg3_enable_ints(struct tg3 *tp)
643 {
644         int i;
645         u32 coal_now = 0;
646
647         tp->irq_sync = 0;
648         wmb();
649
650         tw32(TG3PCI_MISC_HOST_CTRL,
651              (tp->misc_host_ctrl & ~MISC_HOST_CTRL_MASK_PCI_INT));
652
653         for (i = 0; i < tp->irq_cnt; i++) {
654                 struct tg3_napi *tnapi = &tp->napi[i];
655                 tw32_mailbox_f(tnapi->int_mbox, tnapi->last_tag << 24);
656                 if (tp->tg3_flags2 & TG3_FLG2_1SHOT_MSI)
657                         tw32_mailbox_f(tnapi->int_mbox, tnapi->last_tag << 24);
658
659                 coal_now |= tnapi->coal_now;
660         }
661
662         /* Force an initial interrupt */
663         if (!(tp->tg3_flags & TG3_FLAG_TAGGED_STATUS) &&
664             (tp->napi[0].hw_status->status & SD_STATUS_UPDATED))
665                 tw32(GRC_LOCAL_CTRL, tp->grc_local_ctrl | GRC_LCLCTRL_SETINT);
666         else
667                 tw32(HOSTCC_MODE, tp->coalesce_mode |
668                      HOSTCC_MODE_ENABLE | coal_now);
669 }
670
671 static inline unsigned int tg3_has_work(struct tg3_napi *tnapi)
672 {
673         struct tg3 *tp = tnapi->tp;
674         struct tg3_hw_status *sblk = tnapi->hw_status;
675         unsigned int work_exists = 0;
676
677         /* check for phy events */
678         if (!(tp->tg3_flags &
679               (TG3_FLAG_USE_LINKCHG_REG |
680                TG3_FLAG_POLL_SERDES))) {
681                 if (sblk->status & SD_STATUS_LINK_CHG)
682                         work_exists = 1;
683         }
684         /* check for RX/TX work to do */
685         if (sblk->idx[0].tx_consumer != tnapi->tx_cons ||
686             *(tnapi->rx_rcb_prod_idx) != tnapi->rx_rcb_ptr)
687                 work_exists = 1;
688
689         return work_exists;
690 }
691
692 /* tg3_int_reenable
693  *  similar to tg3_enable_ints, but it accurately determines whether there
694  *  is new work pending and can return without flushing the PIO write
695  *  which reenables interrupts
696  */
697 static void tg3_int_reenable(struct tg3_napi *tnapi)
698 {
699         struct tg3 *tp = tnapi->tp;
700
701         tw32_mailbox(tnapi->int_mbox, tnapi->last_tag << 24);
702         mmiowb();
703
704         /* When doing tagged status, this work check is unnecessary.
705          * The last_tag we write above tells the chip which piece of
706          * work we've completed.
707          */
708         if (!(tp->tg3_flags & TG3_FLAG_TAGGED_STATUS) &&
709             tg3_has_work(tnapi))
710                 tw32(HOSTCC_MODE, tp->coalesce_mode |
711                      HOSTCC_MODE_ENABLE | tnapi->coal_now);
712 }
713
714 static void tg3_napi_disable(struct tg3 *tp)
715 {
716         int i;
717
718         for (i = tp->irq_cnt - 1; i >= 0; i--)
719                 napi_disable(&tp->napi[i].napi);
720 }
721
722 static void tg3_napi_enable(struct tg3 *tp)
723 {
724         int i;
725
726         for (i = 0; i < tp->irq_cnt; i++)
727                 napi_enable(&tp->napi[i].napi);
728 }
729
730 static inline void tg3_netif_stop(struct tg3 *tp)
731 {
732         tp->dev->trans_start = jiffies; /* prevent tx timeout */
733         tg3_napi_disable(tp);
734         netif_tx_disable(tp->dev);
735 }
736
737 static inline void tg3_netif_start(struct tg3 *tp)
738 {
739         /* NOTE: unconditional netif_tx_wake_all_queues is only
740          * appropriate so long as all callers are assured to
741          * have free tx slots (such as after tg3_init_hw)
742          */
743         netif_tx_wake_all_queues(tp->dev);
744
745         tg3_napi_enable(tp);
746         tp->napi[0].hw_status->status |= SD_STATUS_UPDATED;
747         tg3_enable_ints(tp);
748 }
749
750 static void tg3_switch_clocks(struct tg3 *tp)
751 {
752         u32 clock_ctrl;
753         u32 orig_clock_ctrl;
754
755         if ((tp->tg3_flags & TG3_FLAG_CPMU_PRESENT) ||
756             (tp->tg3_flags2 & TG3_FLG2_5780_CLASS))
757                 return;
758
759         clock_ctrl = tr32(TG3PCI_CLOCK_CTRL);
760
761         orig_clock_ctrl = clock_ctrl;
762         clock_ctrl &= (CLOCK_CTRL_FORCE_CLKRUN |
763                        CLOCK_CTRL_CLKRUN_OENABLE |
764                        0x1f);
765         tp->pci_clock_ctrl = clock_ctrl;
766
767         if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
768                 if (orig_clock_ctrl & CLOCK_CTRL_625_CORE) {
769                         tw32_wait_f(TG3PCI_CLOCK_CTRL,
770                                     clock_ctrl | CLOCK_CTRL_625_CORE, 40);
771                 }
772         } else if ((orig_clock_ctrl & CLOCK_CTRL_44MHZ_CORE) != 0) {
773                 tw32_wait_f(TG3PCI_CLOCK_CTRL,
774                             clock_ctrl |
775                             (CLOCK_CTRL_44MHZ_CORE | CLOCK_CTRL_ALTCLK),
776                             40);
777                 tw32_wait_f(TG3PCI_CLOCK_CTRL,
778                             clock_ctrl | (CLOCK_CTRL_ALTCLK),
779                             40);
780         }
781         tw32_wait_f(TG3PCI_CLOCK_CTRL, clock_ctrl, 40);
782 }
783
784 #define PHY_BUSY_LOOPS  5000
785
786 static int tg3_readphy(struct tg3 *tp, int reg, u32 *val)
787 {
788         u32 frame_val;
789         unsigned int loops;
790         int ret;
791
792         if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
793                 tw32_f(MAC_MI_MODE,
794                      (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
795                 udelay(80);
796         }
797
798         *val = 0x0;
799
800         frame_val  = ((tp->phy_addr << MI_COM_PHY_ADDR_SHIFT) &
801                       MI_COM_PHY_ADDR_MASK);
802         frame_val |= ((reg << MI_COM_REG_ADDR_SHIFT) &
803                       MI_COM_REG_ADDR_MASK);
804         frame_val |= (MI_COM_CMD_READ | MI_COM_START);
805
806         tw32_f(MAC_MI_COM, frame_val);
807
808         loops = PHY_BUSY_LOOPS;
809         while (loops != 0) {
810                 udelay(10);
811                 frame_val = tr32(MAC_MI_COM);
812
813                 if ((frame_val & MI_COM_BUSY) == 0) {
814                         udelay(5);
815                         frame_val = tr32(MAC_MI_COM);
816                         break;
817                 }
818                 loops -= 1;
819         }
820
821         ret = -EBUSY;
822         if (loops != 0) {
823                 *val = frame_val & MI_COM_DATA_MASK;
824                 ret = 0;
825         }
826
827         if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
828                 tw32_f(MAC_MI_MODE, tp->mi_mode);
829                 udelay(80);
830         }
831
832         return ret;
833 }
834
835 static int tg3_writephy(struct tg3 *tp, int reg, u32 val)
836 {
837         u32 frame_val;
838         unsigned int loops;
839         int ret;
840
841         if ((tp->tg3_flags3 & TG3_FLG3_PHY_IS_FET) &&
842             (reg == MII_TG3_CTRL || reg == MII_TG3_AUX_CTRL))
843                 return 0;
844
845         if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
846                 tw32_f(MAC_MI_MODE,
847                      (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
848                 udelay(80);
849         }
850
851         frame_val  = ((tp->phy_addr << MI_COM_PHY_ADDR_SHIFT) &
852                       MI_COM_PHY_ADDR_MASK);
853         frame_val |= ((reg << MI_COM_REG_ADDR_SHIFT) &
854                       MI_COM_REG_ADDR_MASK);
855         frame_val |= (val & MI_COM_DATA_MASK);
856         frame_val |= (MI_COM_CMD_WRITE | MI_COM_START);
857
858         tw32_f(MAC_MI_COM, frame_val);
859
860         loops = PHY_BUSY_LOOPS;
861         while (loops != 0) {
862                 udelay(10);
863                 frame_val = tr32(MAC_MI_COM);
864                 if ((frame_val & MI_COM_BUSY) == 0) {
865                         udelay(5);
866                         frame_val = tr32(MAC_MI_COM);
867                         break;
868                 }
869                 loops -= 1;
870         }
871
872         ret = -EBUSY;
873         if (loops != 0)
874                 ret = 0;
875
876         if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
877                 tw32_f(MAC_MI_MODE, tp->mi_mode);
878                 udelay(80);
879         }
880
881         return ret;
882 }
883
884 static int tg3_bmcr_reset(struct tg3 *tp)
885 {
886         u32 phy_control;
887         int limit, err;
888
889         /* OK, reset it, and poll the BMCR_RESET bit until it
890          * clears or we time out.
891          */
892         phy_control = BMCR_RESET;
893         err = tg3_writephy(tp, MII_BMCR, phy_control);
894         if (err != 0)
895                 return -EBUSY;
896
897         limit = 5000;
898         while (limit--) {
899                 err = tg3_readphy(tp, MII_BMCR, &phy_control);
900                 if (err != 0)
901                         return -EBUSY;
902
903                 if ((phy_control & BMCR_RESET) == 0) {
904                         udelay(40);
905                         break;
906                 }
907                 udelay(10);
908         }
909         if (limit < 0)
910                 return -EBUSY;
911
912         return 0;
913 }
914
915 static int tg3_mdio_read(struct mii_bus *bp, int mii_id, int reg)
916 {
917         struct tg3 *tp = bp->priv;
918         u32 val;
919
920         spin_lock_bh(&tp->lock);
921
922         if (tg3_readphy(tp, reg, &val))
923                 val = -EIO;
924
925         spin_unlock_bh(&tp->lock);
926
927         return val;
928 }
929
930 static int tg3_mdio_write(struct mii_bus *bp, int mii_id, int reg, u16 val)
931 {
932         struct tg3 *tp = bp->priv;
933         u32 ret = 0;
934
935         spin_lock_bh(&tp->lock);
936
937         if (tg3_writephy(tp, reg, val))
938                 ret = -EIO;
939
940         spin_unlock_bh(&tp->lock);
941
942         return ret;
943 }
944
945 static int tg3_mdio_reset(struct mii_bus *bp)
946 {
947         return 0;
948 }
949
950 static void tg3_mdio_config_5785(struct tg3 *tp)
951 {
952         u32 val;
953         struct phy_device *phydev;
954
955         phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
956         switch (phydev->drv->phy_id & phydev->drv->phy_id_mask) {
957         case TG3_PHY_ID_BCM50610:
958         case TG3_PHY_ID_BCM50610M:
959                 val = MAC_PHYCFG2_50610_LED_MODES;
960                 break;
961         case TG3_PHY_ID_BCMAC131:
962                 val = MAC_PHYCFG2_AC131_LED_MODES;
963                 break;
964         case TG3_PHY_ID_RTL8211C:
965                 val = MAC_PHYCFG2_RTL8211C_LED_MODES;
966                 break;
967         case TG3_PHY_ID_RTL8201E:
968                 val = MAC_PHYCFG2_RTL8201E_LED_MODES;
969                 break;
970         default:
971                 return;
972         }
973
974         if (phydev->interface != PHY_INTERFACE_MODE_RGMII) {
975                 tw32(MAC_PHYCFG2, val);
976
977                 val = tr32(MAC_PHYCFG1);
978                 val &= ~(MAC_PHYCFG1_RGMII_INT |
979                          MAC_PHYCFG1_RXCLK_TO_MASK | MAC_PHYCFG1_TXCLK_TO_MASK);
980                 val |= MAC_PHYCFG1_RXCLK_TIMEOUT | MAC_PHYCFG1_TXCLK_TIMEOUT;
981                 tw32(MAC_PHYCFG1, val);
982
983                 return;
984         }
985
986         if (!(tp->tg3_flags3 & TG3_FLG3_RGMII_STD_IBND_DISABLE))
987                 val |= MAC_PHYCFG2_EMODE_MASK_MASK |
988                        MAC_PHYCFG2_FMODE_MASK_MASK |
989                        MAC_PHYCFG2_GMODE_MASK_MASK |
990                        MAC_PHYCFG2_ACT_MASK_MASK   |
991                        MAC_PHYCFG2_QUAL_MASK_MASK |
992                        MAC_PHYCFG2_INBAND_ENABLE;
993
994         tw32(MAC_PHYCFG2, val);
995
996         val = tr32(MAC_PHYCFG1);
997         val &= ~(MAC_PHYCFG1_RXCLK_TO_MASK | MAC_PHYCFG1_TXCLK_TO_MASK |
998                  MAC_PHYCFG1_RGMII_EXT_RX_DEC | MAC_PHYCFG1_RGMII_SND_STAT_EN);
999         if (!(tp->tg3_flags3 & TG3_FLG3_RGMII_STD_IBND_DISABLE)) {
1000                 if (tp->tg3_flags3 & TG3_FLG3_RGMII_EXT_IBND_RX_EN)
1001                         val |= MAC_PHYCFG1_RGMII_EXT_RX_DEC;
1002                 if (tp->tg3_flags3 & TG3_FLG3_RGMII_EXT_IBND_TX_EN)
1003                         val |= MAC_PHYCFG1_RGMII_SND_STAT_EN;
1004         }
1005         val |= MAC_PHYCFG1_RXCLK_TIMEOUT | MAC_PHYCFG1_TXCLK_TIMEOUT |
1006                MAC_PHYCFG1_RGMII_INT | MAC_PHYCFG1_TXC_DRV;
1007         tw32(MAC_PHYCFG1, val);
1008
1009         val = tr32(MAC_EXT_RGMII_MODE);
1010         val &= ~(MAC_RGMII_MODE_RX_INT_B |
1011                  MAC_RGMII_MODE_RX_QUALITY |
1012                  MAC_RGMII_MODE_RX_ACTIVITY |
1013                  MAC_RGMII_MODE_RX_ENG_DET |
1014                  MAC_RGMII_MODE_TX_ENABLE |
1015                  MAC_RGMII_MODE_TX_LOWPWR |
1016                  MAC_RGMII_MODE_TX_RESET);
1017         if (!(tp->tg3_flags3 & TG3_FLG3_RGMII_STD_IBND_DISABLE)) {
1018                 if (tp->tg3_flags3 & TG3_FLG3_RGMII_EXT_IBND_RX_EN)
1019                         val |= MAC_RGMII_MODE_RX_INT_B |
1020                                MAC_RGMII_MODE_RX_QUALITY |
1021                                MAC_RGMII_MODE_RX_ACTIVITY |
1022                                MAC_RGMII_MODE_RX_ENG_DET;
1023                 if (tp->tg3_flags3 & TG3_FLG3_RGMII_EXT_IBND_TX_EN)
1024                         val |= MAC_RGMII_MODE_TX_ENABLE |
1025                                MAC_RGMII_MODE_TX_LOWPWR |
1026                                MAC_RGMII_MODE_TX_RESET;
1027         }
1028         tw32(MAC_EXT_RGMII_MODE, val);
1029 }
1030
1031 static void tg3_mdio_start(struct tg3 *tp)
1032 {
1033         tp->mi_mode &= ~MAC_MI_MODE_AUTO_POLL;
1034         tw32_f(MAC_MI_MODE, tp->mi_mode);
1035         udelay(80);
1036
1037         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717) {
1038                 u32 funcnum, is_serdes;
1039
1040                 funcnum = tr32(TG3_CPMU_STATUS) & TG3_CPMU_STATUS_PCIE_FUNC;
1041                 if (funcnum)
1042                         tp->phy_addr = 2;
1043                 else
1044                         tp->phy_addr = 1;
1045
1046                 if (tp->pci_chip_rev_id != CHIPREV_ID_5717_A0)
1047                         is_serdes = tr32(SG_DIG_STATUS) & SG_DIG_IS_SERDES;
1048                 else
1049                         is_serdes = tr32(TG3_CPMU_PHY_STRAP) &
1050                                     TG3_CPMU_PHY_STRAP_IS_SERDES;
1051                 if (is_serdes)
1052                         tp->phy_addr += 7;
1053         } else
1054                 tp->phy_addr = TG3_PHY_MII_ADDR;
1055
1056         if ((tp->tg3_flags3 & TG3_FLG3_MDIOBUS_INITED) &&
1057             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
1058                 tg3_mdio_config_5785(tp);
1059 }
1060
1061 static int tg3_mdio_init(struct tg3 *tp)
1062 {
1063         int i;
1064         u32 reg;
1065         struct phy_device *phydev;
1066
1067         tg3_mdio_start(tp);
1068
1069         if (!(tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB) ||
1070             (tp->tg3_flags3 & TG3_FLG3_MDIOBUS_INITED))
1071                 return 0;
1072
1073         tp->mdio_bus = mdiobus_alloc();
1074         if (tp->mdio_bus == NULL)
1075                 return -ENOMEM;
1076
1077         tp->mdio_bus->name     = "tg3 mdio bus";
1078         snprintf(tp->mdio_bus->id, MII_BUS_ID_SIZE, "%x",
1079                  (tp->pdev->bus->number << 8) | tp->pdev->devfn);
1080         tp->mdio_bus->priv     = tp;
1081         tp->mdio_bus->parent   = &tp->pdev->dev;
1082         tp->mdio_bus->read     = &tg3_mdio_read;
1083         tp->mdio_bus->write    = &tg3_mdio_write;
1084         tp->mdio_bus->reset    = &tg3_mdio_reset;
1085         tp->mdio_bus->phy_mask = ~(1 << TG3_PHY_MII_ADDR);
1086         tp->mdio_bus->irq      = &tp->mdio_irq[0];
1087
1088         for (i = 0; i < PHY_MAX_ADDR; i++)
1089                 tp->mdio_bus->irq[i] = PHY_POLL;
1090
1091         /* The bus registration will look for all the PHYs on the mdio bus.
1092          * Unfortunately, it does not ensure the PHY is powered up before
1093          * accessing the PHY ID registers.  A chip reset is the
1094          * quickest way to bring the device back to an operational state..
1095          */
1096         if (tg3_readphy(tp, MII_BMCR, &reg) || (reg & BMCR_PDOWN))
1097                 tg3_bmcr_reset(tp);
1098
1099         i = mdiobus_register(tp->mdio_bus);
1100         if (i) {
1101                 printk(KERN_WARNING "%s: mdiobus_reg failed (0x%x)\n",
1102                         tp->dev->name, i);
1103                 mdiobus_free(tp->mdio_bus);
1104                 return i;
1105         }
1106
1107         phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
1108
1109         if (!phydev || !phydev->drv) {
1110                 printk(KERN_WARNING "%s: No PHY devices\n", tp->dev->name);
1111                 mdiobus_unregister(tp->mdio_bus);
1112                 mdiobus_free(tp->mdio_bus);
1113                 return -ENODEV;
1114         }
1115
1116         switch (phydev->drv->phy_id & phydev->drv->phy_id_mask) {
1117         case TG3_PHY_ID_BCM57780:
1118                 phydev->interface = PHY_INTERFACE_MODE_GMII;
1119                 phydev->dev_flags |= PHY_BRCM_AUTO_PWRDWN_ENABLE;
1120                 break;
1121         case TG3_PHY_ID_BCM50610:
1122         case TG3_PHY_ID_BCM50610M:
1123                 phydev->dev_flags |= PHY_BRCM_CLEAR_RGMII_MODE |
1124                                      PHY_BRCM_RX_REFCLK_UNUSED |
1125                                      PHY_BRCM_DIS_TXCRXC_NOENRGY |
1126                                      PHY_BRCM_AUTO_PWRDWN_ENABLE;
1127                 if (tp->tg3_flags3 & TG3_FLG3_RGMII_STD_IBND_DISABLE)
1128                         phydev->dev_flags |= PHY_BRCM_STD_IBND_DISABLE;
1129                 if (tp->tg3_flags3 & TG3_FLG3_RGMII_EXT_IBND_RX_EN)
1130                         phydev->dev_flags |= PHY_BRCM_EXT_IBND_RX_ENABLE;
1131                 if (tp->tg3_flags3 & TG3_FLG3_RGMII_EXT_IBND_TX_EN)
1132                         phydev->dev_flags |= PHY_BRCM_EXT_IBND_TX_ENABLE;
1133                 /* fallthru */
1134         case TG3_PHY_ID_RTL8211C:
1135                 phydev->interface = PHY_INTERFACE_MODE_RGMII;
1136                 break;
1137         case TG3_PHY_ID_RTL8201E:
1138         case TG3_PHY_ID_BCMAC131:
1139                 phydev->interface = PHY_INTERFACE_MODE_MII;
1140                 phydev->dev_flags |= PHY_BRCM_AUTO_PWRDWN_ENABLE;
1141                 tp->tg3_flags3 |= TG3_FLG3_PHY_IS_FET;
1142                 break;
1143         }
1144
1145         tp->tg3_flags3 |= TG3_FLG3_MDIOBUS_INITED;
1146
1147         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
1148                 tg3_mdio_config_5785(tp);
1149
1150         return 0;
1151 }
1152
1153 static void tg3_mdio_fini(struct tg3 *tp)
1154 {
1155         if (tp->tg3_flags3 & TG3_FLG3_MDIOBUS_INITED) {
1156                 tp->tg3_flags3 &= ~TG3_FLG3_MDIOBUS_INITED;
1157                 mdiobus_unregister(tp->mdio_bus);
1158                 mdiobus_free(tp->mdio_bus);
1159         }
1160 }
1161
1162 /* tp->lock is held. */
1163 static inline void tg3_generate_fw_event(struct tg3 *tp)
1164 {
1165         u32 val;
1166
1167         val = tr32(GRC_RX_CPU_EVENT);
1168         val |= GRC_RX_CPU_DRIVER_EVENT;
1169         tw32_f(GRC_RX_CPU_EVENT, val);
1170
1171         tp->last_event_jiffies = jiffies;
1172 }
1173
1174 #define TG3_FW_EVENT_TIMEOUT_USEC 2500
1175
1176 /* tp->lock is held. */
1177 static void tg3_wait_for_event_ack(struct tg3 *tp)
1178 {
1179         int i;
1180         unsigned int delay_cnt;
1181         long time_remain;
1182
1183         /* If enough time has passed, no wait is necessary. */
1184         time_remain = (long)(tp->last_event_jiffies + 1 +
1185                       usecs_to_jiffies(TG3_FW_EVENT_TIMEOUT_USEC)) -
1186                       (long)jiffies;
1187         if (time_remain < 0)
1188                 return;
1189
1190         /* Check if we can shorten the wait time. */
1191         delay_cnt = jiffies_to_usecs(time_remain);
1192         if (delay_cnt > TG3_FW_EVENT_TIMEOUT_USEC)
1193                 delay_cnt = TG3_FW_EVENT_TIMEOUT_USEC;
1194         delay_cnt = (delay_cnt >> 3) + 1;
1195
1196         for (i = 0; i < delay_cnt; i++) {
1197                 if (!(tr32(GRC_RX_CPU_EVENT) & GRC_RX_CPU_DRIVER_EVENT))
1198                         break;
1199                 udelay(8);
1200         }
1201 }
1202
1203 /* tp->lock is held. */
1204 static void tg3_ump_link_report(struct tg3 *tp)
1205 {
1206         u32 reg;
1207         u32 val;
1208
1209         if (!(tp->tg3_flags2 & TG3_FLG2_5780_CLASS) ||
1210             !(tp->tg3_flags  & TG3_FLAG_ENABLE_ASF))
1211                 return;
1212
1213         tg3_wait_for_event_ack(tp);
1214
1215         tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX, FWCMD_NICDRV_LINK_UPDATE);
1216
1217         tg3_write_mem(tp, NIC_SRAM_FW_CMD_LEN_MBOX, 14);
1218
1219         val = 0;
1220         if (!tg3_readphy(tp, MII_BMCR, &reg))
1221                 val = reg << 16;
1222         if (!tg3_readphy(tp, MII_BMSR, &reg))
1223                 val |= (reg & 0xffff);
1224         tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX, val);
1225
1226         val = 0;
1227         if (!tg3_readphy(tp, MII_ADVERTISE, &reg))
1228                 val = reg << 16;
1229         if (!tg3_readphy(tp, MII_LPA, &reg))
1230                 val |= (reg & 0xffff);
1231         tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 4, val);
1232
1233         val = 0;
1234         if (!(tp->tg3_flags2 & TG3_FLG2_MII_SERDES)) {
1235                 if (!tg3_readphy(tp, MII_CTRL1000, &reg))
1236                         val = reg << 16;
1237                 if (!tg3_readphy(tp, MII_STAT1000, &reg))
1238                         val |= (reg & 0xffff);
1239         }
1240         tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 8, val);
1241
1242         if (!tg3_readphy(tp, MII_PHYADDR, &reg))
1243                 val = reg << 16;
1244         else
1245                 val = 0;
1246         tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 12, val);
1247
1248         tg3_generate_fw_event(tp);
1249 }
1250
1251 static void tg3_link_report(struct tg3 *tp)
1252 {
1253         if (!netif_carrier_ok(tp->dev)) {
1254                 if (netif_msg_link(tp))
1255                         printk(KERN_INFO PFX "%s: Link is down.\n",
1256                                tp->dev->name);
1257                 tg3_ump_link_report(tp);
1258         } else if (netif_msg_link(tp)) {
1259                 printk(KERN_INFO PFX "%s: Link is up at %d Mbps, %s duplex.\n",
1260                        tp->dev->name,
1261                        (tp->link_config.active_speed == SPEED_1000 ?
1262                         1000 :
1263                         (tp->link_config.active_speed == SPEED_100 ?
1264                          100 : 10)),
1265                        (tp->link_config.active_duplex == DUPLEX_FULL ?
1266                         "full" : "half"));
1267
1268                 printk(KERN_INFO PFX
1269                        "%s: Flow control is %s for TX and %s for RX.\n",
1270                        tp->dev->name,
1271                        (tp->link_config.active_flowctrl & FLOW_CTRL_TX) ?
1272                        "on" : "off",
1273                        (tp->link_config.active_flowctrl & FLOW_CTRL_RX) ?
1274                        "on" : "off");
1275                 tg3_ump_link_report(tp);
1276         }
1277 }
1278
1279 static u16 tg3_advert_flowctrl_1000T(u8 flow_ctrl)
1280 {
1281         u16 miireg;
1282
1283         if ((flow_ctrl & FLOW_CTRL_TX) && (flow_ctrl & FLOW_CTRL_RX))
1284                 miireg = ADVERTISE_PAUSE_CAP;
1285         else if (flow_ctrl & FLOW_CTRL_TX)
1286                 miireg = ADVERTISE_PAUSE_ASYM;
1287         else if (flow_ctrl & FLOW_CTRL_RX)
1288                 miireg = ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM;
1289         else
1290                 miireg = 0;
1291
1292         return miireg;
1293 }
1294
1295 static u16 tg3_advert_flowctrl_1000X(u8 flow_ctrl)
1296 {
1297         u16 miireg;
1298
1299         if ((flow_ctrl & FLOW_CTRL_TX) && (flow_ctrl & FLOW_CTRL_RX))
1300                 miireg = ADVERTISE_1000XPAUSE;
1301         else if (flow_ctrl & FLOW_CTRL_TX)
1302                 miireg = ADVERTISE_1000XPSE_ASYM;
1303         else if (flow_ctrl & FLOW_CTRL_RX)
1304                 miireg = ADVERTISE_1000XPAUSE | ADVERTISE_1000XPSE_ASYM;
1305         else
1306                 miireg = 0;
1307
1308         return miireg;
1309 }
1310
1311 static u8 tg3_resolve_flowctrl_1000X(u16 lcladv, u16 rmtadv)
1312 {
1313         u8 cap = 0;
1314
1315         if (lcladv & ADVERTISE_1000XPAUSE) {
1316                 if (lcladv & ADVERTISE_1000XPSE_ASYM) {
1317                         if (rmtadv & LPA_1000XPAUSE)
1318                                 cap = FLOW_CTRL_TX | FLOW_CTRL_RX;
1319                         else if (rmtadv & LPA_1000XPAUSE_ASYM)
1320                                 cap = FLOW_CTRL_RX;
1321                 } else {
1322                         if (rmtadv & LPA_1000XPAUSE)
1323                                 cap = FLOW_CTRL_TX | FLOW_CTRL_RX;
1324                 }
1325         } else if (lcladv & ADVERTISE_1000XPSE_ASYM) {
1326                 if ((rmtadv & LPA_1000XPAUSE) && (rmtadv & LPA_1000XPAUSE_ASYM))
1327                         cap = FLOW_CTRL_TX;
1328         }
1329
1330         return cap;
1331 }
1332
1333 static void tg3_setup_flow_control(struct tg3 *tp, u32 lcladv, u32 rmtadv)
1334 {
1335         u8 autoneg;
1336         u8 flowctrl = 0;
1337         u32 old_rx_mode = tp->rx_mode;
1338         u32 old_tx_mode = tp->tx_mode;
1339
1340         if (tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB)
1341                 autoneg = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]->autoneg;
1342         else
1343                 autoneg = tp->link_config.autoneg;
1344
1345         if (autoneg == AUTONEG_ENABLE &&
1346             (tp->tg3_flags & TG3_FLAG_PAUSE_AUTONEG)) {
1347                 if (tp->tg3_flags2 & TG3_FLG2_ANY_SERDES)
1348                         flowctrl = tg3_resolve_flowctrl_1000X(lcladv, rmtadv);
1349                 else
1350                         flowctrl = mii_resolve_flowctrl_fdx(lcladv, rmtadv);
1351         } else
1352                 flowctrl = tp->link_config.flowctrl;
1353
1354         tp->link_config.active_flowctrl = flowctrl;
1355
1356         if (flowctrl & FLOW_CTRL_RX)
1357                 tp->rx_mode |= RX_MODE_FLOW_CTRL_ENABLE;
1358         else
1359                 tp->rx_mode &= ~RX_MODE_FLOW_CTRL_ENABLE;
1360
1361         if (old_rx_mode != tp->rx_mode)
1362                 tw32_f(MAC_RX_MODE, tp->rx_mode);
1363
1364         if (flowctrl & FLOW_CTRL_TX)
1365                 tp->tx_mode |= TX_MODE_FLOW_CTRL_ENABLE;
1366         else
1367                 tp->tx_mode &= ~TX_MODE_FLOW_CTRL_ENABLE;
1368
1369         if (old_tx_mode != tp->tx_mode)
1370                 tw32_f(MAC_TX_MODE, tp->tx_mode);
1371 }
1372
1373 static void tg3_adjust_link(struct net_device *dev)
1374 {
1375         u8 oldflowctrl, linkmesg = 0;
1376         u32 mac_mode, lcl_adv, rmt_adv;
1377         struct tg3 *tp = netdev_priv(dev);
1378         struct phy_device *phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
1379
1380         spin_lock_bh(&tp->lock);
1381
1382         mac_mode = tp->mac_mode & ~(MAC_MODE_PORT_MODE_MASK |
1383                                     MAC_MODE_HALF_DUPLEX);
1384
1385         oldflowctrl = tp->link_config.active_flowctrl;
1386
1387         if (phydev->link) {
1388                 lcl_adv = 0;
1389                 rmt_adv = 0;
1390
1391                 if (phydev->speed == SPEED_100 || phydev->speed == SPEED_10)
1392                         mac_mode |= MAC_MODE_PORT_MODE_MII;
1393                 else if (phydev->speed == SPEED_1000 ||
1394                          GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785)
1395                         mac_mode |= MAC_MODE_PORT_MODE_GMII;
1396                 else
1397                         mac_mode |= MAC_MODE_PORT_MODE_MII;
1398
1399                 if (phydev->duplex == DUPLEX_HALF)
1400                         mac_mode |= MAC_MODE_HALF_DUPLEX;
1401                 else {
1402                         lcl_adv = tg3_advert_flowctrl_1000T(
1403                                   tp->link_config.flowctrl);
1404
1405                         if (phydev->pause)
1406                                 rmt_adv = LPA_PAUSE_CAP;
1407                         if (phydev->asym_pause)
1408                                 rmt_adv |= LPA_PAUSE_ASYM;
1409                 }
1410
1411                 tg3_setup_flow_control(tp, lcl_adv, rmt_adv);
1412         } else
1413                 mac_mode |= MAC_MODE_PORT_MODE_GMII;
1414
1415         if (mac_mode != tp->mac_mode) {
1416                 tp->mac_mode = mac_mode;
1417                 tw32_f(MAC_MODE, tp->mac_mode);
1418                 udelay(40);
1419         }
1420
1421         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785) {
1422                 if (phydev->speed == SPEED_10)
1423                         tw32(MAC_MI_STAT,
1424                              MAC_MI_STAT_10MBPS_MODE |
1425                              MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
1426                 else
1427                         tw32(MAC_MI_STAT, MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
1428         }
1429
1430         if (phydev->speed == SPEED_1000 && phydev->duplex == DUPLEX_HALF)
1431                 tw32(MAC_TX_LENGTHS,
1432                      ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
1433                       (6 << TX_LENGTHS_IPG_SHIFT) |
1434                       (0xff << TX_LENGTHS_SLOT_TIME_SHIFT)));
1435         else
1436                 tw32(MAC_TX_LENGTHS,
1437                      ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
1438                       (6 << TX_LENGTHS_IPG_SHIFT) |
1439                       (32 << TX_LENGTHS_SLOT_TIME_SHIFT)));
1440
1441         if ((phydev->link && tp->link_config.active_speed == SPEED_INVALID) ||
1442             (!phydev->link && tp->link_config.active_speed != SPEED_INVALID) ||
1443             phydev->speed != tp->link_config.active_speed ||
1444             phydev->duplex != tp->link_config.active_duplex ||
1445             oldflowctrl != tp->link_config.active_flowctrl)
1446             linkmesg = 1;
1447
1448         tp->link_config.active_speed = phydev->speed;
1449         tp->link_config.active_duplex = phydev->duplex;
1450
1451         spin_unlock_bh(&tp->lock);
1452
1453         if (linkmesg)
1454                 tg3_link_report(tp);
1455 }
1456
1457 static int tg3_phy_init(struct tg3 *tp)
1458 {
1459         struct phy_device *phydev;
1460
1461         if (tp->tg3_flags3 & TG3_FLG3_PHY_CONNECTED)
1462                 return 0;
1463
1464         /* Bring the PHY back to a known state. */
1465         tg3_bmcr_reset(tp);
1466
1467         phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
1468
1469         /* Attach the MAC to the PHY. */
1470         phydev = phy_connect(tp->dev, dev_name(&phydev->dev), tg3_adjust_link,
1471                              phydev->dev_flags, phydev->interface);
1472         if (IS_ERR(phydev)) {
1473                 printk(KERN_ERR "%s: Could not attach to PHY\n", tp->dev->name);
1474                 return PTR_ERR(phydev);
1475         }
1476
1477         /* Mask with MAC supported features. */
1478         switch (phydev->interface) {
1479         case PHY_INTERFACE_MODE_GMII:
1480         case PHY_INTERFACE_MODE_RGMII:
1481                 if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY)) {
1482                         phydev->supported &= (PHY_GBIT_FEATURES |
1483                                               SUPPORTED_Pause |
1484                                               SUPPORTED_Asym_Pause);
1485                         break;
1486                 }
1487                 /* fallthru */
1488         case PHY_INTERFACE_MODE_MII:
1489                 phydev->supported &= (PHY_BASIC_FEATURES |
1490                                       SUPPORTED_Pause |
1491                                       SUPPORTED_Asym_Pause);
1492                 break;
1493         default:
1494                 phy_disconnect(tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]);
1495                 return -EINVAL;
1496         }
1497
1498         tp->tg3_flags3 |= TG3_FLG3_PHY_CONNECTED;
1499
1500         phydev->advertising = phydev->supported;
1501
1502         return 0;
1503 }
1504
1505 static void tg3_phy_start(struct tg3 *tp)
1506 {
1507         struct phy_device *phydev;
1508
1509         if (!(tp->tg3_flags3 & TG3_FLG3_PHY_CONNECTED))
1510                 return;
1511
1512         phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
1513
1514         if (tp->link_config.phy_is_low_power) {
1515                 tp->link_config.phy_is_low_power = 0;
1516                 phydev->speed = tp->link_config.orig_speed;
1517                 phydev->duplex = tp->link_config.orig_duplex;
1518                 phydev->autoneg = tp->link_config.orig_autoneg;
1519                 phydev->advertising = tp->link_config.orig_advertising;
1520         }
1521
1522         phy_start(phydev);
1523
1524         phy_start_aneg(phydev);
1525 }
1526
1527 static void tg3_phy_stop(struct tg3 *tp)
1528 {
1529         if (!(tp->tg3_flags3 & TG3_FLG3_PHY_CONNECTED))
1530                 return;
1531
1532         phy_stop(tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]);
1533 }
1534
1535 static void tg3_phy_fini(struct tg3 *tp)
1536 {
1537         if (tp->tg3_flags3 & TG3_FLG3_PHY_CONNECTED) {
1538                 phy_disconnect(tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]);
1539                 tp->tg3_flags3 &= ~TG3_FLG3_PHY_CONNECTED;
1540         }
1541 }
1542
1543 static void tg3_phydsp_write(struct tg3 *tp, u32 reg, u32 val)
1544 {
1545         tg3_writephy(tp, MII_TG3_DSP_ADDRESS, reg);
1546         tg3_writephy(tp, MII_TG3_DSP_RW_PORT, val);
1547 }
1548
1549 static void tg3_phy_fet_toggle_apd(struct tg3 *tp, bool enable)
1550 {
1551         u32 phytest;
1552
1553         if (!tg3_readphy(tp, MII_TG3_FET_TEST, &phytest)) {
1554                 u32 phy;
1555
1556                 tg3_writephy(tp, MII_TG3_FET_TEST,
1557                              phytest | MII_TG3_FET_SHADOW_EN);
1558                 if (!tg3_readphy(tp, MII_TG3_FET_SHDW_AUXSTAT2, &phy)) {
1559                         if (enable)
1560                                 phy |= MII_TG3_FET_SHDW_AUXSTAT2_APD;
1561                         else
1562                                 phy &= ~MII_TG3_FET_SHDW_AUXSTAT2_APD;
1563                         tg3_writephy(tp, MII_TG3_FET_SHDW_AUXSTAT2, phy);
1564                 }
1565                 tg3_writephy(tp, MII_TG3_FET_TEST, phytest);
1566         }
1567 }
1568
1569 static void tg3_phy_toggle_apd(struct tg3 *tp, bool enable)
1570 {
1571         u32 reg;
1572
1573         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS) ||
1574                 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 &&
1575              (tp->tg3_flags2 & TG3_FLG2_MII_SERDES)))
1576                 return;
1577
1578         if (tp->tg3_flags3 & TG3_FLG3_PHY_IS_FET) {
1579                 tg3_phy_fet_toggle_apd(tp, enable);
1580                 return;
1581         }
1582
1583         reg = MII_TG3_MISC_SHDW_WREN |
1584               MII_TG3_MISC_SHDW_SCR5_SEL |
1585               MII_TG3_MISC_SHDW_SCR5_LPED |
1586               MII_TG3_MISC_SHDW_SCR5_DLPTLM |
1587               MII_TG3_MISC_SHDW_SCR5_SDTL |
1588               MII_TG3_MISC_SHDW_SCR5_C125OE;
1589         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5784 || !enable)
1590                 reg |= MII_TG3_MISC_SHDW_SCR5_DLLAPD;
1591
1592         tg3_writephy(tp, MII_TG3_MISC_SHDW, reg);
1593
1594
1595         reg = MII_TG3_MISC_SHDW_WREN |
1596               MII_TG3_MISC_SHDW_APD_SEL |
1597               MII_TG3_MISC_SHDW_APD_WKTM_84MS;
1598         if (enable)
1599                 reg |= MII_TG3_MISC_SHDW_APD_ENABLE;
1600
1601         tg3_writephy(tp, MII_TG3_MISC_SHDW, reg);
1602 }
1603
1604 static void tg3_phy_toggle_automdix(struct tg3 *tp, int enable)
1605 {
1606         u32 phy;
1607
1608         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS) ||
1609             (tp->tg3_flags2 & TG3_FLG2_ANY_SERDES))
1610                 return;
1611
1612         if (tp->tg3_flags3 & TG3_FLG3_PHY_IS_FET) {
1613                 u32 ephy;
1614
1615                 if (!tg3_readphy(tp, MII_TG3_FET_TEST, &ephy)) {
1616                         u32 reg = MII_TG3_FET_SHDW_MISCCTRL;
1617
1618                         tg3_writephy(tp, MII_TG3_FET_TEST,
1619                                      ephy | MII_TG3_FET_SHADOW_EN);
1620                         if (!tg3_readphy(tp, reg, &phy)) {
1621                                 if (enable)
1622                                         phy |= MII_TG3_FET_SHDW_MISCCTRL_MDIX;
1623                                 else
1624                                         phy &= ~MII_TG3_FET_SHDW_MISCCTRL_MDIX;
1625                                 tg3_writephy(tp, reg, phy);
1626                         }
1627                         tg3_writephy(tp, MII_TG3_FET_TEST, ephy);
1628                 }
1629         } else {
1630                 phy = MII_TG3_AUXCTL_MISC_RDSEL_MISC |
1631                       MII_TG3_AUXCTL_SHDWSEL_MISC;
1632                 if (!tg3_writephy(tp, MII_TG3_AUX_CTRL, phy) &&
1633                     !tg3_readphy(tp, MII_TG3_AUX_CTRL, &phy)) {
1634                         if (enable)
1635                                 phy |= MII_TG3_AUXCTL_MISC_FORCE_AMDIX;
1636                         else
1637                                 phy &= ~MII_TG3_AUXCTL_MISC_FORCE_AMDIX;
1638                         phy |= MII_TG3_AUXCTL_MISC_WREN;
1639                         tg3_writephy(tp, MII_TG3_AUX_CTRL, phy);
1640                 }
1641         }
1642 }
1643
1644 static void tg3_phy_set_wirespeed(struct tg3 *tp)
1645 {
1646         u32 val;
1647
1648         if (tp->tg3_flags2 & TG3_FLG2_NO_ETH_WIRE_SPEED)
1649                 return;
1650
1651         if (!tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x7007) &&
1652             !tg3_readphy(tp, MII_TG3_AUX_CTRL, &val))
1653                 tg3_writephy(tp, MII_TG3_AUX_CTRL,
1654                              (val | (1 << 15) | (1 << 4)));
1655 }
1656
1657 static void tg3_phy_apply_otp(struct tg3 *tp)
1658 {
1659         u32 otp, phy;
1660
1661         if (!tp->phy_otp)
1662                 return;
1663
1664         otp = tp->phy_otp;
1665
1666         /* Enable SM_DSP clock and tx 6dB coding. */
1667         phy = MII_TG3_AUXCTL_SHDWSEL_AUXCTL |
1668               MII_TG3_AUXCTL_ACTL_SMDSP_ENA |
1669               MII_TG3_AUXCTL_ACTL_TX_6DB;
1670         tg3_writephy(tp, MII_TG3_AUX_CTRL, phy);
1671
1672         phy = ((otp & TG3_OTP_AGCTGT_MASK) >> TG3_OTP_AGCTGT_SHIFT);
1673         phy |= MII_TG3_DSP_TAP1_AGCTGT_DFLT;
1674         tg3_phydsp_write(tp, MII_TG3_DSP_TAP1, phy);
1675
1676         phy = ((otp & TG3_OTP_HPFFLTR_MASK) >> TG3_OTP_HPFFLTR_SHIFT) |
1677               ((otp & TG3_OTP_HPFOVER_MASK) >> TG3_OTP_HPFOVER_SHIFT);
1678         tg3_phydsp_write(tp, MII_TG3_DSP_AADJ1CH0, phy);
1679
1680         phy = ((otp & TG3_OTP_LPFDIS_MASK) >> TG3_OTP_LPFDIS_SHIFT);
1681         phy |= MII_TG3_DSP_AADJ1CH3_ADCCKADJ;
1682         tg3_phydsp_write(tp, MII_TG3_DSP_AADJ1CH3, phy);
1683
1684         phy = ((otp & TG3_OTP_VDAC_MASK) >> TG3_OTP_VDAC_SHIFT);
1685         tg3_phydsp_write(tp, MII_TG3_DSP_EXP75, phy);
1686
1687         phy = ((otp & TG3_OTP_10BTAMP_MASK) >> TG3_OTP_10BTAMP_SHIFT);
1688         tg3_phydsp_write(tp, MII_TG3_DSP_EXP96, phy);
1689
1690         phy = ((otp & TG3_OTP_ROFF_MASK) >> TG3_OTP_ROFF_SHIFT) |
1691               ((otp & TG3_OTP_RCOFF_MASK) >> TG3_OTP_RCOFF_SHIFT);
1692         tg3_phydsp_write(tp, MII_TG3_DSP_EXP97, phy);
1693
1694         /* Turn off SM_DSP clock. */
1695         phy = MII_TG3_AUXCTL_SHDWSEL_AUXCTL |
1696               MII_TG3_AUXCTL_ACTL_TX_6DB;
1697         tg3_writephy(tp, MII_TG3_AUX_CTRL, phy);
1698 }
1699
1700 static int tg3_wait_macro_done(struct tg3 *tp)
1701 {
1702         int limit = 100;
1703
1704         while (limit--) {
1705                 u32 tmp32;
1706
1707                 if (!tg3_readphy(tp, 0x16, &tmp32)) {
1708                         if ((tmp32 & 0x1000) == 0)
1709                                 break;
1710                 }
1711         }
1712         if (limit < 0)
1713                 return -EBUSY;
1714
1715         return 0;
1716 }
1717
1718 static int tg3_phy_write_and_check_testpat(struct tg3 *tp, int *resetp)
1719 {
1720         static const u32 test_pat[4][6] = {
1721         { 0x00005555, 0x00000005, 0x00002aaa, 0x0000000a, 0x00003456, 0x00000003 },
1722         { 0x00002aaa, 0x0000000a, 0x00003333, 0x00000003, 0x0000789a, 0x00000005 },
1723         { 0x00005a5a, 0x00000005, 0x00002a6a, 0x0000000a, 0x00001bcd, 0x00000003 },
1724         { 0x00002a5a, 0x0000000a, 0x000033c3, 0x00000003, 0x00002ef1, 0x00000005 }
1725         };
1726         int chan;
1727
1728         for (chan = 0; chan < 4; chan++) {
1729                 int i;
1730
1731                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
1732                              (chan * 0x2000) | 0x0200);
1733                 tg3_writephy(tp, 0x16, 0x0002);
1734
1735                 for (i = 0; i < 6; i++)
1736                         tg3_writephy(tp, MII_TG3_DSP_RW_PORT,
1737                                      test_pat[chan][i]);
1738
1739                 tg3_writephy(tp, 0x16, 0x0202);
1740                 if (tg3_wait_macro_done(tp)) {
1741                         *resetp = 1;
1742                         return -EBUSY;
1743                 }
1744
1745                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
1746                              (chan * 0x2000) | 0x0200);
1747                 tg3_writephy(tp, 0x16, 0x0082);
1748                 if (tg3_wait_macro_done(tp)) {
1749                         *resetp = 1;
1750                         return -EBUSY;
1751                 }
1752
1753                 tg3_writephy(tp, 0x16, 0x0802);
1754                 if (tg3_wait_macro_done(tp)) {
1755                         *resetp = 1;
1756                         return -EBUSY;
1757                 }
1758
1759                 for (i = 0; i < 6; i += 2) {
1760                         u32 low, high;
1761
1762                         if (tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &low) ||
1763                             tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &high) ||
1764                             tg3_wait_macro_done(tp)) {
1765                                 *resetp = 1;
1766                                 return -EBUSY;
1767                         }
1768                         low &= 0x7fff;
1769                         high &= 0x000f;
1770                         if (low != test_pat[chan][i] ||
1771                             high != test_pat[chan][i+1]) {
1772                                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000b);
1773                                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x4001);
1774                                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x4005);
1775
1776                                 return -EBUSY;
1777                         }
1778                 }
1779         }
1780
1781         return 0;
1782 }
1783
1784 static int tg3_phy_reset_chanpat(struct tg3 *tp)
1785 {
1786         int chan;
1787
1788         for (chan = 0; chan < 4; chan++) {
1789                 int i;
1790
1791                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
1792                              (chan * 0x2000) | 0x0200);
1793                 tg3_writephy(tp, 0x16, 0x0002);
1794                 for (i = 0; i < 6; i++)
1795                         tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x000);
1796                 tg3_writephy(tp, 0x16, 0x0202);
1797                 if (tg3_wait_macro_done(tp))
1798                         return -EBUSY;
1799         }
1800
1801         return 0;
1802 }
1803
1804 static int tg3_phy_reset_5703_4_5(struct tg3 *tp)
1805 {
1806         u32 reg32, phy9_orig;
1807         int retries, do_phy_reset, err;
1808
1809         retries = 10;
1810         do_phy_reset = 1;
1811         do {
1812                 if (do_phy_reset) {
1813                         err = tg3_bmcr_reset(tp);
1814                         if (err)
1815                                 return err;
1816                         do_phy_reset = 0;
1817                 }
1818
1819                 /* Disable transmitter and interrupt.  */
1820                 if (tg3_readphy(tp, MII_TG3_EXT_CTRL, &reg32))
1821                         continue;
1822
1823                 reg32 |= 0x3000;
1824                 tg3_writephy(tp, MII_TG3_EXT_CTRL, reg32);
1825
1826                 /* Set full-duplex, 1000 mbps.  */
1827                 tg3_writephy(tp, MII_BMCR,
1828                              BMCR_FULLDPLX | TG3_BMCR_SPEED1000);
1829
1830                 /* Set to master mode.  */
1831                 if (tg3_readphy(tp, MII_TG3_CTRL, &phy9_orig))
1832                         continue;
1833
1834                 tg3_writephy(tp, MII_TG3_CTRL,
1835                              (MII_TG3_CTRL_AS_MASTER |
1836                               MII_TG3_CTRL_ENABLE_AS_MASTER));
1837
1838                 /* Enable SM_DSP_CLOCK and 6dB.  */
1839                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0c00);
1840
1841                 /* Block the PHY control access.  */
1842                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8005);
1843                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0800);
1844
1845                 err = tg3_phy_write_and_check_testpat(tp, &do_phy_reset);
1846                 if (!err)
1847                         break;
1848         } while (--retries);
1849
1850         err = tg3_phy_reset_chanpat(tp);
1851         if (err)
1852                 return err;
1853
1854         tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8005);
1855         tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0000);
1856
1857         tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8200);
1858         tg3_writephy(tp, 0x16, 0x0000);
1859
1860         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
1861             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
1862                 /* Set Extended packet length bit for jumbo frames */
1863                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x4400);
1864         }
1865         else {
1866                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0400);
1867         }
1868
1869         tg3_writephy(tp, MII_TG3_CTRL, phy9_orig);
1870
1871         if (!tg3_readphy(tp, MII_TG3_EXT_CTRL, &reg32)) {
1872                 reg32 &= ~0x3000;
1873                 tg3_writephy(tp, MII_TG3_EXT_CTRL, reg32);
1874         } else if (!err)
1875                 err = -EBUSY;
1876
1877         return err;
1878 }
1879
1880 /* This will reset the tigon3 PHY if there is no valid
1881  * link unless the FORCE argument is non-zero.
1882  */
1883 static int tg3_phy_reset(struct tg3 *tp)
1884 {
1885         u32 cpmuctrl;
1886         u32 phy_status;
1887         int err;
1888
1889         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
1890                 u32 val;
1891
1892                 val = tr32(GRC_MISC_CFG);
1893                 tw32_f(GRC_MISC_CFG, val & ~GRC_MISC_CFG_EPHY_IDDQ);
1894                 udelay(40);
1895         }
1896         err  = tg3_readphy(tp, MII_BMSR, &phy_status);
1897         err |= tg3_readphy(tp, MII_BMSR, &phy_status);
1898         if (err != 0)
1899                 return -EBUSY;
1900
1901         if (netif_running(tp->dev) && netif_carrier_ok(tp->dev)) {
1902                 netif_carrier_off(tp->dev);
1903                 tg3_link_report(tp);
1904         }
1905
1906         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
1907             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
1908             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
1909                 err = tg3_phy_reset_5703_4_5(tp);
1910                 if (err)
1911                         return err;
1912                 goto out;
1913         }
1914
1915         cpmuctrl = 0;
1916         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 &&
1917             GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5784_AX) {
1918                 cpmuctrl = tr32(TG3_CPMU_CTRL);
1919                 if (cpmuctrl & CPMU_CTRL_GPHY_10MB_RXONLY)
1920                         tw32(TG3_CPMU_CTRL,
1921                              cpmuctrl & ~CPMU_CTRL_GPHY_10MB_RXONLY);
1922         }
1923
1924         err = tg3_bmcr_reset(tp);
1925         if (err)
1926                 return err;
1927
1928         if (cpmuctrl & CPMU_CTRL_GPHY_10MB_RXONLY) {
1929                 u32 phy;
1930
1931                 phy = MII_TG3_DSP_EXP8_AEDW | MII_TG3_DSP_EXP8_REJ2MHz;
1932                 tg3_phydsp_write(tp, MII_TG3_DSP_EXP8, phy);
1933
1934                 tw32(TG3_CPMU_CTRL, cpmuctrl);
1935         }
1936
1937         if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5784_AX ||
1938             GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5761_AX) {
1939                 u32 val;
1940
1941                 val = tr32(TG3_CPMU_LSPD_1000MB_CLK);
1942                 if ((val & CPMU_LSPD_1000MB_MACCLK_MASK) ==
1943                     CPMU_LSPD_1000MB_MACCLK_12_5) {
1944                         val &= ~CPMU_LSPD_1000MB_MACCLK_MASK;
1945                         udelay(40);
1946                         tw32_f(TG3_CPMU_LSPD_1000MB_CLK, val);
1947                 }
1948         }
1949
1950         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 &&
1951             (tp->tg3_flags2 & TG3_FLG2_MII_SERDES))
1952                 return 0;
1953
1954         tg3_phy_apply_otp(tp);
1955
1956         if (tp->tg3_flags3 & TG3_FLG3_PHY_ENABLE_APD)
1957                 tg3_phy_toggle_apd(tp, true);
1958         else
1959                 tg3_phy_toggle_apd(tp, false);
1960
1961 out:
1962         if (tp->tg3_flags2 & TG3_FLG2_PHY_ADC_BUG) {
1963                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0c00);
1964                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x201f);
1965                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x2aaa);
1966                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000a);
1967                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0323);
1968                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0400);
1969         }
1970         if (tp->tg3_flags2 & TG3_FLG2_PHY_5704_A0_BUG) {
1971                 tg3_writephy(tp, 0x1c, 0x8d68);
1972                 tg3_writephy(tp, 0x1c, 0x8d68);
1973         }
1974         if (tp->tg3_flags2 & TG3_FLG2_PHY_BER_BUG) {
1975                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0c00);
1976                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000a);
1977                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x310b);
1978                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x201f);
1979                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x9506);
1980                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x401f);
1981                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x14e2);
1982                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0400);
1983         }
1984         else if (tp->tg3_flags2 & TG3_FLG2_PHY_JITTER_BUG) {
1985                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0c00);
1986                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000a);
1987                 if (tp->tg3_flags2 & TG3_FLG2_PHY_ADJUST_TRIM) {
1988                         tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x110b);
1989                         tg3_writephy(tp, MII_TG3_TEST1,
1990                                      MII_TG3_TEST1_TRIM_EN | 0x4);
1991                 } else
1992                         tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x010b);
1993                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0400);
1994         }
1995         /* Set Extended packet length bit (bit 14) on all chips that */
1996         /* support jumbo frames */
1997         if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5401) {
1998                 /* Cannot do read-modify-write on 5401 */
1999                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x4c20);
2000         } else if (tp->tg3_flags & TG3_FLAG_JUMBO_CAPABLE) {
2001                 u32 phy_reg;
2002
2003                 /* Set bit 14 with read-modify-write to preserve other bits */
2004                 if (!tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0007) &&
2005                     !tg3_readphy(tp, MII_TG3_AUX_CTRL, &phy_reg))
2006                         tg3_writephy(tp, MII_TG3_AUX_CTRL, phy_reg | 0x4000);
2007         }
2008
2009         /* Set phy register 0x10 bit 0 to high fifo elasticity to support
2010          * jumbo frames transmission.
2011          */
2012         if (tp->tg3_flags & TG3_FLAG_JUMBO_CAPABLE) {
2013                 u32 phy_reg;
2014
2015                 if (!tg3_readphy(tp, MII_TG3_EXT_CTRL, &phy_reg))
2016                     tg3_writephy(tp, MII_TG3_EXT_CTRL,
2017                                  phy_reg | MII_TG3_EXT_CTRL_FIFO_ELASTIC);
2018         }
2019
2020         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
2021                 /* adjust output voltage */
2022                 tg3_writephy(tp, MII_TG3_FET_PTEST, 0x12);
2023         }
2024
2025         tg3_phy_toggle_automdix(tp, 1);
2026         tg3_phy_set_wirespeed(tp);
2027         return 0;
2028 }
2029
2030 static void tg3_frob_aux_power(struct tg3 *tp)
2031 {
2032         struct tg3 *tp_peer = tp;
2033
2034         /* The GPIOs do something completely different on 57765. */
2035         if ((tp->tg3_flags2 & TG3_FLG2_IS_NIC) == 0 ||
2036             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765)
2037                 return;
2038
2039         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
2040             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714 ||
2041             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717) {
2042                 struct net_device *dev_peer;
2043
2044                 dev_peer = pci_get_drvdata(tp->pdev_peer);
2045                 /* remove_one() may have been run on the peer. */
2046                 if (!dev_peer)
2047                         tp_peer = tp;
2048                 else
2049                         tp_peer = netdev_priv(dev_peer);
2050         }
2051
2052         if ((tp->tg3_flags & TG3_FLAG_WOL_ENABLE) != 0 ||
2053             (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) != 0 ||
2054             (tp_peer->tg3_flags & TG3_FLAG_WOL_ENABLE) != 0 ||
2055             (tp_peer->tg3_flags & TG3_FLAG_ENABLE_ASF) != 0) {
2056                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
2057                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
2058                         tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
2059                                     (GRC_LCLCTRL_GPIO_OE0 |
2060                                      GRC_LCLCTRL_GPIO_OE1 |
2061                                      GRC_LCLCTRL_GPIO_OE2 |
2062                                      GRC_LCLCTRL_GPIO_OUTPUT0 |
2063                                      GRC_LCLCTRL_GPIO_OUTPUT1),
2064                                     100);
2065                 } else if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761 ||
2066                            tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761S) {
2067                         /* The 5761 non-e device swaps GPIO 0 and GPIO 2. */
2068                         u32 grc_local_ctrl = GRC_LCLCTRL_GPIO_OE0 |
2069                                              GRC_LCLCTRL_GPIO_OE1 |
2070                                              GRC_LCLCTRL_GPIO_OE2 |
2071                                              GRC_LCLCTRL_GPIO_OUTPUT0 |
2072                                              GRC_LCLCTRL_GPIO_OUTPUT1 |
2073                                              tp->grc_local_ctrl;
2074                         tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl, 100);
2075
2076                         grc_local_ctrl |= GRC_LCLCTRL_GPIO_OUTPUT2;
2077                         tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl, 100);
2078
2079                         grc_local_ctrl &= ~GRC_LCLCTRL_GPIO_OUTPUT0;
2080                         tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl, 100);
2081                 } else {
2082                         u32 no_gpio2;
2083                         u32 grc_local_ctrl = 0;
2084
2085                         if (tp_peer != tp &&
2086                             (tp_peer->tg3_flags & TG3_FLAG_INIT_COMPLETE) != 0)
2087                                 return;
2088
2089                         /* Workaround to prevent overdrawing Amps. */
2090                         if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
2091                             ASIC_REV_5714) {
2092                                 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE3;
2093                                 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
2094                                             grc_local_ctrl, 100);
2095                         }
2096
2097                         /* On 5753 and variants, GPIO2 cannot be used. */
2098                         no_gpio2 = tp->nic_sram_data_cfg &
2099                                     NIC_SRAM_DATA_CFG_NO_GPIO2;
2100
2101                         grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE0 |
2102                                          GRC_LCLCTRL_GPIO_OE1 |
2103                                          GRC_LCLCTRL_GPIO_OE2 |
2104                                          GRC_LCLCTRL_GPIO_OUTPUT1 |
2105                                          GRC_LCLCTRL_GPIO_OUTPUT2;
2106                         if (no_gpio2) {
2107                                 grc_local_ctrl &= ~(GRC_LCLCTRL_GPIO_OE2 |
2108                                                     GRC_LCLCTRL_GPIO_OUTPUT2);
2109                         }
2110                         tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
2111                                                     grc_local_ctrl, 100);
2112
2113                         grc_local_ctrl |= GRC_LCLCTRL_GPIO_OUTPUT0;
2114
2115                         tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
2116                                                     grc_local_ctrl, 100);
2117
2118                         if (!no_gpio2) {
2119                                 grc_local_ctrl &= ~GRC_LCLCTRL_GPIO_OUTPUT2;
2120                                 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
2121                                             grc_local_ctrl, 100);
2122                         }
2123                 }
2124         } else {
2125                 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
2126                     GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701) {
2127                         if (tp_peer != tp &&
2128                             (tp_peer->tg3_flags & TG3_FLAG_INIT_COMPLETE) != 0)
2129                                 return;
2130
2131                         tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
2132                                     (GRC_LCLCTRL_GPIO_OE1 |
2133                                      GRC_LCLCTRL_GPIO_OUTPUT1), 100);
2134
2135                         tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
2136                                     GRC_LCLCTRL_GPIO_OE1, 100);
2137
2138                         tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
2139                                     (GRC_LCLCTRL_GPIO_OE1 |
2140                                      GRC_LCLCTRL_GPIO_OUTPUT1), 100);
2141                 }
2142         }
2143 }
2144
2145 static int tg3_5700_link_polarity(struct tg3 *tp, u32 speed)
2146 {
2147         if (tp->led_ctrl == LED_CTRL_MODE_PHY_2)
2148                 return 1;
2149         else if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5411) {
2150                 if (speed != SPEED_10)
2151                         return 1;
2152         } else if (speed == SPEED_10)
2153                 return 1;
2154
2155         return 0;
2156 }
2157
2158 static int tg3_setup_phy(struct tg3 *, int);
2159
2160 #define RESET_KIND_SHUTDOWN     0
2161 #define RESET_KIND_INIT         1
2162 #define RESET_KIND_SUSPEND      2
2163
2164 static void tg3_write_sig_post_reset(struct tg3 *, int);
2165 static int tg3_halt_cpu(struct tg3 *, u32);
2166
2167 static void tg3_power_down_phy(struct tg3 *tp, bool do_low_power)
2168 {
2169         u32 val;
2170
2171         if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) {
2172                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
2173                         u32 sg_dig_ctrl = tr32(SG_DIG_CTRL);
2174                         u32 serdes_cfg = tr32(MAC_SERDES_CFG);
2175
2176                         sg_dig_ctrl |=
2177                                 SG_DIG_USING_HW_AUTONEG | SG_DIG_SOFT_RESET;
2178                         tw32(SG_DIG_CTRL, sg_dig_ctrl);
2179                         tw32(MAC_SERDES_CFG, serdes_cfg | (1 << 15));
2180                 }
2181                 return;
2182         }
2183
2184         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
2185                 tg3_bmcr_reset(tp);
2186                 val = tr32(GRC_MISC_CFG);
2187                 tw32_f(GRC_MISC_CFG, val | GRC_MISC_CFG_EPHY_IDDQ);
2188                 udelay(40);
2189                 return;
2190         } else if (tp->tg3_flags3 & TG3_FLG3_PHY_IS_FET) {
2191                 u32 phytest;
2192                 if (!tg3_readphy(tp, MII_TG3_FET_TEST, &phytest)) {
2193                         u32 phy;
2194
2195                         tg3_writephy(tp, MII_ADVERTISE, 0);
2196                         tg3_writephy(tp, MII_BMCR,
2197                                      BMCR_ANENABLE | BMCR_ANRESTART);
2198
2199                         tg3_writephy(tp, MII_TG3_FET_TEST,
2200                                      phytest | MII_TG3_FET_SHADOW_EN);
2201                         if (!tg3_readphy(tp, MII_TG3_FET_SHDW_AUXMODE4, &phy)) {
2202                                 phy |= MII_TG3_FET_SHDW_AUXMODE4_SBPD;
2203                                 tg3_writephy(tp,
2204                                              MII_TG3_FET_SHDW_AUXMODE4,
2205                                              phy);
2206                         }
2207                         tg3_writephy(tp, MII_TG3_FET_TEST, phytest);
2208                 }
2209                 return;
2210         } else if (do_low_power) {
2211                 tg3_writephy(tp, MII_TG3_EXT_CTRL,
2212                              MII_TG3_EXT_CTRL_FORCE_LED_OFF);
2213
2214                 tg3_writephy(tp, MII_TG3_AUX_CTRL,
2215                              MII_TG3_AUXCTL_SHDWSEL_PWRCTL |
2216                              MII_TG3_AUXCTL_PCTL_100TX_LPWR |
2217                              MII_TG3_AUXCTL_PCTL_SPR_ISOLATE |
2218                              MII_TG3_AUXCTL_PCTL_VREG_11V);
2219         }
2220
2221         /* The PHY should not be powered down on some chips because
2222          * of bugs.
2223          */
2224         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
2225             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
2226             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780 &&
2227              (tp->tg3_flags2 & TG3_FLG2_MII_SERDES)))
2228                 return;
2229
2230         if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5784_AX ||
2231             GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5761_AX) {
2232                 val = tr32(TG3_CPMU_LSPD_1000MB_CLK);
2233                 val &= ~CPMU_LSPD_1000MB_MACCLK_MASK;
2234                 val |= CPMU_LSPD_1000MB_MACCLK_12_5;
2235                 tw32_f(TG3_CPMU_LSPD_1000MB_CLK, val);
2236         }
2237
2238         tg3_writephy(tp, MII_BMCR, BMCR_PDOWN);
2239 }
2240
2241 /* tp->lock is held. */
2242 static int tg3_nvram_lock(struct tg3 *tp)
2243 {
2244         if (tp->tg3_flags & TG3_FLAG_NVRAM) {
2245                 int i;
2246
2247                 if (tp->nvram_lock_cnt == 0) {
2248                         tw32(NVRAM_SWARB, SWARB_REQ_SET1);
2249                         for (i = 0; i < 8000; i++) {
2250                                 if (tr32(NVRAM_SWARB) & SWARB_GNT1)
2251                                         break;
2252                                 udelay(20);
2253                         }
2254                         if (i == 8000) {
2255                                 tw32(NVRAM_SWARB, SWARB_REQ_CLR1);
2256                                 return -ENODEV;
2257                         }
2258                 }
2259                 tp->nvram_lock_cnt++;
2260         }
2261         return 0;
2262 }
2263
2264 /* tp->lock is held. */
2265 static void tg3_nvram_unlock(struct tg3 *tp)
2266 {
2267         if (tp->tg3_flags & TG3_FLAG_NVRAM) {
2268                 if (tp->nvram_lock_cnt > 0)
2269                         tp->nvram_lock_cnt--;
2270                 if (tp->nvram_lock_cnt == 0)
2271                         tw32_f(NVRAM_SWARB, SWARB_REQ_CLR1);
2272         }
2273 }
2274
2275 /* tp->lock is held. */
2276 static void tg3_enable_nvram_access(struct tg3 *tp)
2277 {
2278         if ((tp->tg3_flags2 & TG3_FLG2_5750_PLUS) &&
2279             !(tp->tg3_flags3 & TG3_FLG3_PROTECTED_NVRAM)) {
2280                 u32 nvaccess = tr32(NVRAM_ACCESS);
2281
2282                 tw32(NVRAM_ACCESS, nvaccess | ACCESS_ENABLE);
2283         }
2284 }
2285
2286 /* tp->lock is held. */
2287 static void tg3_disable_nvram_access(struct tg3 *tp)
2288 {
2289         if ((tp->tg3_flags2 & TG3_FLG2_5750_PLUS) &&
2290             !(tp->tg3_flags3 & TG3_FLG3_PROTECTED_NVRAM)) {
2291                 u32 nvaccess = tr32(NVRAM_ACCESS);
2292
2293                 tw32(NVRAM_ACCESS, nvaccess & ~ACCESS_ENABLE);
2294         }
2295 }
2296
2297 static int tg3_nvram_read_using_eeprom(struct tg3 *tp,
2298                                         u32 offset, u32 *val)
2299 {
2300         u32 tmp;
2301         int i;
2302
2303         if (offset > EEPROM_ADDR_ADDR_MASK || (offset % 4) != 0)
2304                 return -EINVAL;
2305
2306         tmp = tr32(GRC_EEPROM_ADDR) & ~(EEPROM_ADDR_ADDR_MASK |
2307                                         EEPROM_ADDR_DEVID_MASK |
2308                                         EEPROM_ADDR_READ);
2309         tw32(GRC_EEPROM_ADDR,
2310              tmp |
2311              (0 << EEPROM_ADDR_DEVID_SHIFT) |
2312              ((offset << EEPROM_ADDR_ADDR_SHIFT) &
2313               EEPROM_ADDR_ADDR_MASK) |
2314              EEPROM_ADDR_READ | EEPROM_ADDR_START);
2315
2316         for (i = 0; i < 1000; i++) {
2317                 tmp = tr32(GRC_EEPROM_ADDR);
2318
2319                 if (tmp & EEPROM_ADDR_COMPLETE)
2320                         break;
2321                 msleep(1);
2322         }
2323         if (!(tmp & EEPROM_ADDR_COMPLETE))
2324                 return -EBUSY;
2325
2326         tmp = tr32(GRC_EEPROM_DATA);
2327
2328         /*
2329          * The data will always be opposite the native endian
2330          * format.  Perform a blind byteswap to compensate.
2331          */
2332         *val = swab32(tmp);
2333
2334         return 0;
2335 }
2336
2337 #define NVRAM_CMD_TIMEOUT 10000
2338
2339 static int tg3_nvram_exec_cmd(struct tg3 *tp, u32 nvram_cmd)
2340 {
2341         int i;
2342
2343         tw32(NVRAM_CMD, nvram_cmd);
2344         for (i = 0; i < NVRAM_CMD_TIMEOUT; i++) {
2345                 udelay(10);
2346                 if (tr32(NVRAM_CMD) & NVRAM_CMD_DONE) {
2347                         udelay(10);
2348                         break;
2349                 }
2350         }
2351
2352         if (i == NVRAM_CMD_TIMEOUT)
2353                 return -EBUSY;
2354
2355         return 0;
2356 }
2357
2358 static u32 tg3_nvram_phys_addr(struct tg3 *tp, u32 addr)
2359 {
2360         if ((tp->tg3_flags & TG3_FLAG_NVRAM) &&
2361             (tp->tg3_flags & TG3_FLAG_NVRAM_BUFFERED) &&
2362             (tp->tg3_flags2 & TG3_FLG2_FLASH) &&
2363            !(tp->tg3_flags3 & TG3_FLG3_NO_NVRAM_ADDR_TRANS) &&
2364             (tp->nvram_jedecnum == JEDEC_ATMEL))
2365
2366                 addr = ((addr / tp->nvram_pagesize) <<
2367                         ATMEL_AT45DB0X1B_PAGE_POS) +
2368                        (addr % tp->nvram_pagesize);
2369
2370         return addr;
2371 }
2372
2373 static u32 tg3_nvram_logical_addr(struct tg3 *tp, u32 addr)
2374 {
2375         if ((tp->tg3_flags & TG3_FLAG_NVRAM) &&
2376             (tp->tg3_flags & TG3_FLAG_NVRAM_BUFFERED) &&
2377             (tp->tg3_flags2 & TG3_FLG2_FLASH) &&
2378            !(tp->tg3_flags3 & TG3_FLG3_NO_NVRAM_ADDR_TRANS) &&
2379             (tp->nvram_jedecnum == JEDEC_ATMEL))
2380
2381                 addr = ((addr >> ATMEL_AT45DB0X1B_PAGE_POS) *
2382                         tp->nvram_pagesize) +
2383                        (addr & ((1 << ATMEL_AT45DB0X1B_PAGE_POS) - 1));
2384
2385         return addr;
2386 }
2387
2388 /* NOTE: Data read in from NVRAM is byteswapped according to
2389  * the byteswapping settings for all other register accesses.
2390  * tg3 devices are BE devices, so on a BE machine, the data
2391  * returned will be exactly as it is seen in NVRAM.  On a LE
2392  * machine, the 32-bit value will be byteswapped.
2393  */
2394 static int tg3_nvram_read(struct tg3 *tp, u32 offset, u32 *val)
2395 {
2396         int ret;
2397
2398         if (!(tp->tg3_flags & TG3_FLAG_NVRAM))
2399                 return tg3_nvram_read_using_eeprom(tp, offset, val);
2400
2401         offset = tg3_nvram_phys_addr(tp, offset);
2402
2403         if (offset > NVRAM_ADDR_MSK)
2404                 return -EINVAL;
2405
2406         ret = tg3_nvram_lock(tp);
2407         if (ret)
2408                 return ret;
2409
2410         tg3_enable_nvram_access(tp);
2411
2412         tw32(NVRAM_ADDR, offset);
2413         ret = tg3_nvram_exec_cmd(tp, NVRAM_CMD_RD | NVRAM_CMD_GO |
2414                 NVRAM_CMD_FIRST | NVRAM_CMD_LAST | NVRAM_CMD_DONE);
2415
2416         if (ret == 0)
2417                 *val = tr32(NVRAM_RDDATA);
2418
2419         tg3_disable_nvram_access(tp);
2420
2421         tg3_nvram_unlock(tp);
2422
2423         return ret;
2424 }
2425
2426 /* Ensures NVRAM data is in bytestream format. */
2427 static int tg3_nvram_read_be32(struct tg3 *tp, u32 offset, __be32 *val)
2428 {
2429         u32 v;
2430         int res = tg3_nvram_read(tp, offset, &v);
2431         if (!res)
2432                 *val = cpu_to_be32(v);
2433         return res;
2434 }
2435
2436 /* tp->lock is held. */
2437 static void __tg3_set_mac_addr(struct tg3 *tp, int skip_mac_1)
2438 {
2439         u32 addr_high, addr_low;
2440         int i;
2441
2442         addr_high = ((tp->dev->dev_addr[0] << 8) |
2443                      tp->dev->dev_addr[1]);
2444         addr_low = ((tp->dev->dev_addr[2] << 24) |
2445                     (tp->dev->dev_addr[3] << 16) |
2446                     (tp->dev->dev_addr[4] <<  8) |
2447                     (tp->dev->dev_addr[5] <<  0));
2448         for (i = 0; i < 4; i++) {
2449                 if (i == 1 && skip_mac_1)
2450                         continue;
2451                 tw32(MAC_ADDR_0_HIGH + (i * 8), addr_high);
2452                 tw32(MAC_ADDR_0_LOW + (i * 8), addr_low);
2453         }
2454
2455         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
2456             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
2457                 for (i = 0; i < 12; i++) {
2458                         tw32(MAC_EXTADDR_0_HIGH + (i * 8), addr_high);
2459                         tw32(MAC_EXTADDR_0_LOW + (i * 8), addr_low);
2460                 }
2461         }
2462
2463         addr_high = (tp->dev->dev_addr[0] +
2464                      tp->dev->dev_addr[1] +
2465                      tp->dev->dev_addr[2] +
2466                      tp->dev->dev_addr[3] +
2467                      tp->dev->dev_addr[4] +
2468                      tp->dev->dev_addr[5]) &
2469                 TX_BACKOFF_SEED_MASK;
2470         tw32(MAC_TX_BACKOFF_SEED, addr_high);
2471 }
2472
2473 static int tg3_set_power_state(struct tg3 *tp, pci_power_t state)
2474 {
2475         u32 misc_host_ctrl;
2476         bool device_should_wake, do_low_power;
2477
2478         /* Make sure register accesses (indirect or otherwise)
2479          * will function correctly.
2480          */
2481         pci_write_config_dword(tp->pdev,
2482                                TG3PCI_MISC_HOST_CTRL,
2483                                tp->misc_host_ctrl);
2484
2485         switch (state) {
2486         case PCI_D0:
2487                 pci_enable_wake(tp->pdev, state, false);
2488                 pci_set_power_state(tp->pdev, PCI_D0);
2489
2490                 /* Switch out of Vaux if it is a NIC */
2491                 if (tp->tg3_flags2 & TG3_FLG2_IS_NIC)
2492                         tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl, 100);
2493
2494                 return 0;
2495
2496         case PCI_D1:
2497         case PCI_D2:
2498         case PCI_D3hot:
2499                 break;
2500
2501         default:
2502                 printk(KERN_ERR PFX "%s: Invalid power state (D%d) requested\n",
2503                         tp->dev->name, state);
2504                 return -EINVAL;
2505         }
2506
2507         /* Restore the CLKREQ setting. */
2508         if (tp->tg3_flags3 & TG3_FLG3_CLKREQ_BUG) {
2509                 u16 lnkctl;
2510
2511                 pci_read_config_word(tp->pdev,
2512                                      tp->pcie_cap + PCI_EXP_LNKCTL,
2513                                      &lnkctl);
2514                 lnkctl |= PCI_EXP_LNKCTL_CLKREQ_EN;
2515                 pci_write_config_word(tp->pdev,
2516                                       tp->pcie_cap + PCI_EXP_LNKCTL,
2517                                       lnkctl);
2518         }
2519
2520         misc_host_ctrl = tr32(TG3PCI_MISC_HOST_CTRL);
2521         tw32(TG3PCI_MISC_HOST_CTRL,
2522              misc_host_ctrl | MISC_HOST_CTRL_MASK_PCI_INT);
2523
2524         device_should_wake = pci_pme_capable(tp->pdev, state) &&
2525                              device_may_wakeup(&tp->pdev->dev) &&
2526                              (tp->tg3_flags & TG3_FLAG_WOL_ENABLE);
2527
2528         if (tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB) {
2529                 do_low_power = false;
2530                 if ((tp->tg3_flags3 & TG3_FLG3_PHY_CONNECTED) &&
2531                     !tp->link_config.phy_is_low_power) {
2532                         struct phy_device *phydev;
2533                         u32 phyid, advertising;
2534
2535                         phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
2536
2537                         tp->link_config.phy_is_low_power = 1;
2538
2539                         tp->link_config.orig_speed = phydev->speed;
2540                         tp->link_config.orig_duplex = phydev->duplex;
2541                         tp->link_config.orig_autoneg = phydev->autoneg;
2542                         tp->link_config.orig_advertising = phydev->advertising;
2543
2544                         advertising = ADVERTISED_TP |
2545                                       ADVERTISED_Pause |
2546                                       ADVERTISED_Autoneg |
2547                                       ADVERTISED_10baseT_Half;
2548
2549                         if ((tp->tg3_flags & TG3_FLAG_ENABLE_ASF) ||
2550                             device_should_wake) {
2551                                 if (tp->tg3_flags & TG3_FLAG_WOL_SPEED_100MB)
2552                                         advertising |=
2553                                                 ADVERTISED_100baseT_Half |
2554                                                 ADVERTISED_100baseT_Full |
2555                                                 ADVERTISED_10baseT_Full;
2556                                 else
2557                                         advertising |= ADVERTISED_10baseT_Full;
2558                         }
2559
2560                         phydev->advertising = advertising;
2561
2562                         phy_start_aneg(phydev);
2563
2564                         phyid = phydev->drv->phy_id & phydev->drv->phy_id_mask;
2565                         if (phyid != TG3_PHY_ID_BCMAC131) {
2566                                 phyid &= TG3_PHY_OUI_MASK;
2567                                 if (phyid == TG3_PHY_OUI_1 ||
2568                                     phyid == TG3_PHY_OUI_2 ||
2569                                     phyid == TG3_PHY_OUI_3)
2570                                         do_low_power = true;
2571                         }
2572                 }
2573         } else {
2574                 do_low_power = true;
2575
2576                 if (tp->link_config.phy_is_low_power == 0) {
2577                         tp->link_config.phy_is_low_power = 1;
2578                         tp->link_config.orig_speed = tp->link_config.speed;
2579                         tp->link_config.orig_duplex = tp->link_config.duplex;
2580                         tp->link_config.orig_autoneg = tp->link_config.autoneg;
2581                 }
2582
2583                 if (!(tp->tg3_flags2 & TG3_FLG2_ANY_SERDES)) {
2584                         tp->link_config.speed = SPEED_10;
2585                         tp->link_config.duplex = DUPLEX_HALF;
2586                         tp->link_config.autoneg = AUTONEG_ENABLE;
2587                         tg3_setup_phy(tp, 0);
2588                 }
2589         }
2590
2591         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
2592                 u32 val;
2593
2594                 val = tr32(GRC_VCPU_EXT_CTRL);
2595                 tw32(GRC_VCPU_EXT_CTRL, val | GRC_VCPU_EXT_CTRL_DISABLE_WOL);
2596         } else if (!(tp->tg3_flags & TG3_FLAG_ENABLE_ASF)) {
2597                 int i;
2598                 u32 val;
2599
2600                 for (i = 0; i < 200; i++) {
2601                         tg3_read_mem(tp, NIC_SRAM_FW_ASF_STATUS_MBOX, &val);
2602                         if (val == ~NIC_SRAM_FIRMWARE_MBOX_MAGIC1)
2603                                 break;
2604                         msleep(1);
2605                 }
2606         }
2607         if (tp->tg3_flags & TG3_FLAG_WOL_CAP)
2608                 tg3_write_mem(tp, NIC_SRAM_WOL_MBOX, WOL_SIGNATURE |
2609                                                      WOL_DRV_STATE_SHUTDOWN |
2610                                                      WOL_DRV_WOL |
2611                                                      WOL_SET_MAGIC_PKT);
2612
2613         if (device_should_wake) {
2614                 u32 mac_mode;
2615
2616                 if (!(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)) {
2617                         if (do_low_power) {
2618                                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x5a);
2619                                 udelay(40);
2620                         }
2621
2622                         if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES)
2623                                 mac_mode = MAC_MODE_PORT_MODE_GMII;
2624                         else
2625                                 mac_mode = MAC_MODE_PORT_MODE_MII;
2626
2627                         mac_mode |= tp->mac_mode & MAC_MODE_LINK_POLARITY;
2628                         if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
2629                             ASIC_REV_5700) {
2630                                 u32 speed = (tp->tg3_flags &
2631                                              TG3_FLAG_WOL_SPEED_100MB) ?
2632                                              SPEED_100 : SPEED_10;
2633                                 if (tg3_5700_link_polarity(tp, speed))
2634                                         mac_mode |= MAC_MODE_LINK_POLARITY;
2635                                 else
2636                                         mac_mode &= ~MAC_MODE_LINK_POLARITY;
2637                         }
2638                 } else {
2639                         mac_mode = MAC_MODE_PORT_MODE_TBI;
2640                 }
2641
2642                 if (!(tp->tg3_flags2 & TG3_FLG2_5750_PLUS))
2643                         tw32(MAC_LED_CTRL, tp->led_ctrl);
2644
2645                 mac_mode |= MAC_MODE_MAGIC_PKT_ENABLE;
2646                 if (((tp->tg3_flags2 & TG3_FLG2_5705_PLUS) &&
2647                     !(tp->tg3_flags2 & TG3_FLG2_5780_CLASS)) &&
2648                     ((tp->tg3_flags & TG3_FLAG_ENABLE_ASF) ||
2649                      (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE)))
2650                         mac_mode |= MAC_MODE_KEEP_FRAME_IN_WOL;
2651
2652                 if (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE) {
2653                         mac_mode |= tp->mac_mode &
2654                                     (MAC_MODE_APE_TX_EN | MAC_MODE_APE_RX_EN);
2655                         if (mac_mode & MAC_MODE_APE_TX_EN)
2656                                 mac_mode |= MAC_MODE_TDE_ENABLE;
2657                 }
2658
2659                 tw32_f(MAC_MODE, mac_mode);
2660                 udelay(100);
2661
2662                 tw32_f(MAC_RX_MODE, RX_MODE_ENABLE);
2663                 udelay(10);
2664         }
2665
2666         if (!(tp->tg3_flags & TG3_FLAG_WOL_SPEED_100MB) &&
2667             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
2668              GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)) {
2669                 u32 base_val;
2670
2671                 base_val = tp->pci_clock_ctrl;
2672                 base_val |= (CLOCK_CTRL_RXCLK_DISABLE |
2673                              CLOCK_CTRL_TXCLK_DISABLE);
2674
2675                 tw32_wait_f(TG3PCI_CLOCK_CTRL, base_val | CLOCK_CTRL_ALTCLK |
2676                             CLOCK_CTRL_PWRDOWN_PLL133, 40);
2677         } else if ((tp->tg3_flags2 & TG3_FLG2_5780_CLASS) ||
2678                    (tp->tg3_flags & TG3_FLAG_CPMU_PRESENT) ||
2679                    (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)) {
2680                 /* do nothing */
2681         } else if (!((tp->tg3_flags2 & TG3_FLG2_5750_PLUS) &&
2682                      (tp->tg3_flags & TG3_FLAG_ENABLE_ASF))) {
2683                 u32 newbits1, newbits2;
2684
2685                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
2686                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
2687                         newbits1 = (CLOCK_CTRL_RXCLK_DISABLE |
2688                                     CLOCK_CTRL_TXCLK_DISABLE |
2689                                     CLOCK_CTRL_ALTCLK);
2690                         newbits2 = newbits1 | CLOCK_CTRL_44MHZ_CORE;
2691                 } else if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
2692                         newbits1 = CLOCK_CTRL_625_CORE;
2693                         newbits2 = newbits1 | CLOCK_CTRL_ALTCLK;
2694                 } else {
2695                         newbits1 = CLOCK_CTRL_ALTCLK;
2696                         newbits2 = newbits1 | CLOCK_CTRL_44MHZ_CORE;
2697                 }
2698
2699                 tw32_wait_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl | newbits1,
2700                             40);
2701
2702                 tw32_wait_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl | newbits2,
2703                             40);
2704
2705                 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
2706                         u32 newbits3;
2707
2708                         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
2709                             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
2710                                 newbits3 = (CLOCK_CTRL_RXCLK_DISABLE |
2711                                             CLOCK_CTRL_TXCLK_DISABLE |
2712                                             CLOCK_CTRL_44MHZ_CORE);
2713                         } else {
2714                                 newbits3 = CLOCK_CTRL_44MHZ_CORE;
2715                         }
2716
2717                         tw32_wait_f(TG3PCI_CLOCK_CTRL,
2718                                     tp->pci_clock_ctrl | newbits3, 40);
2719                 }
2720         }
2721
2722         if (!(device_should_wake) &&
2723             !(tp->tg3_flags & TG3_FLAG_ENABLE_ASF))
2724                 tg3_power_down_phy(tp, do_low_power);
2725
2726         tg3_frob_aux_power(tp);
2727
2728         /* Workaround for unstable PLL clock */
2729         if ((GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_AX) ||
2730             (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_BX)) {
2731                 u32 val = tr32(0x7d00);
2732
2733                 val &= ~((1 << 16) | (1 << 4) | (1 << 2) | (1 << 1) | 1);
2734                 tw32(0x7d00, val);
2735                 if (!(tp->tg3_flags & TG3_FLAG_ENABLE_ASF)) {
2736                         int err;
2737
2738                         err = tg3_nvram_lock(tp);
2739                         tg3_halt_cpu(tp, RX_CPU_BASE);
2740                         if (!err)
2741                                 tg3_nvram_unlock(tp);
2742                 }
2743         }
2744
2745         tg3_write_sig_post_reset(tp, RESET_KIND_SHUTDOWN);
2746
2747         if (device_should_wake)
2748                 pci_enable_wake(tp->pdev, state, true);
2749
2750         /* Finally, set the new power state. */
2751         pci_set_power_state(tp->pdev, state);
2752
2753         return 0;
2754 }
2755
2756 static void tg3_aux_stat_to_speed_duplex(struct tg3 *tp, u32 val, u16 *speed, u8 *duplex)
2757 {
2758         switch (val & MII_TG3_AUX_STAT_SPDMASK) {
2759         case MII_TG3_AUX_STAT_10HALF:
2760                 *speed = SPEED_10;
2761                 *duplex = DUPLEX_HALF;
2762                 break;
2763
2764         case MII_TG3_AUX_STAT_10FULL:
2765                 *speed = SPEED_10;
2766                 *duplex = DUPLEX_FULL;
2767                 break;
2768
2769         case MII_TG3_AUX_STAT_100HALF:
2770                 *speed = SPEED_100;
2771                 *duplex = DUPLEX_HALF;
2772                 break;
2773
2774         case MII_TG3_AUX_STAT_100FULL:
2775                 *speed = SPEED_100;
2776                 *duplex = DUPLEX_FULL;
2777                 break;
2778
2779         case MII_TG3_AUX_STAT_1000HALF:
2780                 *speed = SPEED_1000;
2781                 *duplex = DUPLEX_HALF;
2782                 break;
2783
2784         case MII_TG3_AUX_STAT_1000FULL:
2785                 *speed = SPEED_1000;
2786                 *duplex = DUPLEX_FULL;
2787                 break;
2788
2789         default:
2790                 if (tp->tg3_flags3 & TG3_FLG3_PHY_IS_FET) {
2791                         *speed = (val & MII_TG3_AUX_STAT_100) ? SPEED_100 :
2792                                  SPEED_10;
2793                         *duplex = (val & MII_TG3_AUX_STAT_FULL) ? DUPLEX_FULL :
2794                                   DUPLEX_HALF;
2795                         break;
2796                 }
2797                 *speed = SPEED_INVALID;
2798                 *duplex = DUPLEX_INVALID;
2799                 break;
2800         }
2801 }
2802
2803 static void tg3_phy_copper_begin(struct tg3 *tp)
2804 {
2805         u32 new_adv;
2806         int i;
2807
2808         if (tp->link_config.phy_is_low_power) {
2809                 /* Entering low power mode.  Disable gigabit and
2810                  * 100baseT advertisements.
2811                  */
2812                 tg3_writephy(tp, MII_TG3_CTRL, 0);
2813
2814                 new_adv = (ADVERTISE_10HALF | ADVERTISE_10FULL |
2815                            ADVERTISE_CSMA | ADVERTISE_PAUSE_CAP);
2816                 if (tp->tg3_flags & TG3_FLAG_WOL_SPEED_100MB)
2817                         new_adv |= (ADVERTISE_100HALF | ADVERTISE_100FULL);
2818
2819                 tg3_writephy(tp, MII_ADVERTISE, new_adv);
2820         } else if (tp->link_config.speed == SPEED_INVALID) {
2821                 if (tp->tg3_flags & TG3_FLAG_10_100_ONLY)
2822                         tp->link_config.advertising &=
2823                                 ~(ADVERTISED_1000baseT_Half |
2824                                   ADVERTISED_1000baseT_Full);
2825
2826                 new_adv = ADVERTISE_CSMA;
2827                 if (tp->link_config.advertising & ADVERTISED_10baseT_Half)
2828                         new_adv |= ADVERTISE_10HALF;
2829                 if (tp->link_config.advertising & ADVERTISED_10baseT_Full)
2830                         new_adv |= ADVERTISE_10FULL;
2831                 if (tp->link_config.advertising & ADVERTISED_100baseT_Half)
2832                         new_adv |= ADVERTISE_100HALF;
2833                 if (tp->link_config.advertising & ADVERTISED_100baseT_Full)
2834                         new_adv |= ADVERTISE_100FULL;
2835
2836                 new_adv |= tg3_advert_flowctrl_1000T(tp->link_config.flowctrl);
2837
2838                 tg3_writephy(tp, MII_ADVERTISE, new_adv);
2839
2840                 if (tp->link_config.advertising &
2841                     (ADVERTISED_1000baseT_Half | ADVERTISED_1000baseT_Full)) {
2842                         new_adv = 0;
2843                         if (tp->link_config.advertising & ADVERTISED_1000baseT_Half)
2844                                 new_adv |= MII_TG3_CTRL_ADV_1000_HALF;
2845                         if (tp->link_config.advertising & ADVERTISED_1000baseT_Full)
2846                                 new_adv |= MII_TG3_CTRL_ADV_1000_FULL;
2847                         if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY) &&
2848                             (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
2849                              tp->pci_chip_rev_id == CHIPREV_ID_5701_B0))
2850                                 new_adv |= (MII_TG3_CTRL_AS_MASTER |
2851                                             MII_TG3_CTRL_ENABLE_AS_MASTER);
2852                         tg3_writephy(tp, MII_TG3_CTRL, new_adv);
2853                 } else {
2854                         tg3_writephy(tp, MII_TG3_CTRL, 0);
2855                 }
2856         } else {
2857                 new_adv = tg3_advert_flowctrl_1000T(tp->link_config.flowctrl);
2858                 new_adv |= ADVERTISE_CSMA;
2859
2860                 /* Asking for a specific link mode. */
2861                 if (tp->link_config.speed == SPEED_1000) {
2862                         tg3_writephy(tp, MII_ADVERTISE, new_adv);
2863
2864                         if (tp->link_config.duplex == DUPLEX_FULL)
2865                                 new_adv = MII_TG3_CTRL_ADV_1000_FULL;
2866                         else
2867                                 new_adv = MII_TG3_CTRL_ADV_1000_HALF;
2868                         if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
2869                             tp->pci_chip_rev_id == CHIPREV_ID_5701_B0)
2870                                 new_adv |= (MII_TG3_CTRL_AS_MASTER |
2871                                             MII_TG3_CTRL_ENABLE_AS_MASTER);
2872                 } else {
2873                         if (tp->link_config.speed == SPEED_100) {
2874                                 if (tp->link_config.duplex == DUPLEX_FULL)
2875                                         new_adv |= ADVERTISE_100FULL;
2876                                 else
2877                                         new_adv |= ADVERTISE_100HALF;
2878                         } else {
2879                                 if (tp->link_config.duplex == DUPLEX_FULL)
2880                                         new_adv |= ADVERTISE_10FULL;
2881                                 else
2882                                         new_adv |= ADVERTISE_10HALF;
2883                         }
2884                         tg3_writephy(tp, MII_ADVERTISE, new_adv);
2885
2886                         new_adv = 0;
2887                 }
2888
2889                 tg3_writephy(tp, MII_TG3_CTRL, new_adv);
2890         }
2891
2892         if (tp->link_config.autoneg == AUTONEG_DISABLE &&
2893             tp->link_config.speed != SPEED_INVALID) {
2894                 u32 bmcr, orig_bmcr;
2895
2896                 tp->link_config.active_speed = tp->link_config.speed;
2897                 tp->link_config.active_duplex = tp->link_config.duplex;
2898
2899                 bmcr = 0;
2900                 switch (tp->link_config.speed) {
2901                 default:
2902                 case SPEED_10:
2903                         break;
2904
2905                 case SPEED_100:
2906                         bmcr |= BMCR_SPEED100;
2907                         break;
2908
2909                 case SPEED_1000:
2910                         bmcr |= TG3_BMCR_SPEED1000;
2911                         break;
2912                 }
2913
2914                 if (tp->link_config.duplex == DUPLEX_FULL)
2915                         bmcr |= BMCR_FULLDPLX;
2916
2917                 if (!tg3_readphy(tp, MII_BMCR, &orig_bmcr) &&
2918                     (bmcr != orig_bmcr)) {
2919                         tg3_writephy(tp, MII_BMCR, BMCR_LOOPBACK);
2920                         for (i = 0; i < 1500; i++) {
2921                                 u32 tmp;
2922
2923                                 udelay(10);
2924                                 if (tg3_readphy(tp, MII_BMSR, &tmp) ||
2925                                     tg3_readphy(tp, MII_BMSR, &tmp))
2926                                         continue;
2927                                 if (!(tmp & BMSR_LSTATUS)) {
2928                                         udelay(40);
2929                                         break;
2930                                 }
2931                         }
2932                         tg3_writephy(tp, MII_BMCR, bmcr);
2933                         udelay(40);
2934                 }
2935         } else {
2936                 tg3_writephy(tp, MII_BMCR,
2937                              BMCR_ANENABLE | BMCR_ANRESTART);
2938         }
2939 }
2940
2941 static int tg3_init_5401phy_dsp(struct tg3 *tp)
2942 {
2943         int err;
2944
2945         /* Turn off tap power management. */
2946         /* Set Extended packet length bit */
2947         err  = tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x4c20);
2948
2949         err |= tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x0012);
2950         err |= tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x1804);
2951
2952         err |= tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x0013);
2953         err |= tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x1204);
2954
2955         err |= tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8006);
2956         err |= tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0132);
2957
2958         err |= tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8006);
2959         err |= tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0232);
2960
2961         err |= tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x201f);
2962         err |= tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0a20);
2963
2964         udelay(40);
2965
2966         return err;
2967 }
2968
2969 static int tg3_copper_is_advertising_all(struct tg3 *tp, u32 mask)
2970 {
2971         u32 adv_reg, all_mask = 0;
2972
2973         if (mask & ADVERTISED_10baseT_Half)
2974                 all_mask |= ADVERTISE_10HALF;
2975         if (mask & ADVERTISED_10baseT_Full)
2976                 all_mask |= ADVERTISE_10FULL;
2977         if (mask & ADVERTISED_100baseT_Half)
2978                 all_mask |= ADVERTISE_100HALF;
2979         if (mask & ADVERTISED_100baseT_Full)
2980                 all_mask |= ADVERTISE_100FULL;
2981
2982         if (tg3_readphy(tp, MII_ADVERTISE, &adv_reg))
2983                 return 0;
2984
2985         if ((adv_reg & all_mask) != all_mask)
2986                 return 0;
2987         if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY)) {
2988                 u32 tg3_ctrl;
2989
2990                 all_mask = 0;
2991                 if (mask & ADVERTISED_1000baseT_Half)
2992                         all_mask |= ADVERTISE_1000HALF;
2993                 if (mask & ADVERTISED_1000baseT_Full)
2994                         all_mask |= ADVERTISE_1000FULL;
2995
2996                 if (tg3_readphy(tp, MII_TG3_CTRL, &tg3_ctrl))
2997                         return 0;
2998
2999                 if ((tg3_ctrl & all_mask) != all_mask)
3000                         return 0;
3001         }
3002         return 1;
3003 }
3004
3005 static int tg3_adv_1000T_flowctrl_ok(struct tg3 *tp, u32 *lcladv, u32 *rmtadv)
3006 {
3007         u32 curadv, reqadv;
3008
3009         if (tg3_readphy(tp, MII_ADVERTISE, lcladv))
3010                 return 1;
3011
3012         curadv = *lcladv & (ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM);
3013         reqadv = tg3_advert_flowctrl_1000T(tp->link_config.flowctrl);
3014
3015         if (tp->link_config.active_duplex == DUPLEX_FULL) {
3016                 if (curadv != reqadv)
3017                         return 0;
3018
3019                 if (tp->tg3_flags & TG3_FLAG_PAUSE_AUTONEG)
3020                         tg3_readphy(tp, MII_LPA, rmtadv);
3021         } else {
3022                 /* Reprogram the advertisement register, even if it
3023                  * does not affect the current link.  If the link
3024                  * gets renegotiated in the future, we can save an
3025                  * additional renegotiation cycle by advertising
3026                  * it correctly in the first place.
3027                  */
3028                 if (curadv != reqadv) {
3029                         *lcladv &= ~(ADVERTISE_PAUSE_CAP |
3030                                      ADVERTISE_PAUSE_ASYM);
3031                         tg3_writephy(tp, MII_ADVERTISE, *lcladv | reqadv);
3032                 }
3033         }
3034
3035         return 1;
3036 }
3037
3038 static int tg3_setup_copper_phy(struct tg3 *tp, int force_reset)
3039 {
3040         int current_link_up;
3041         u32 bmsr, dummy;
3042         u32 lcl_adv, rmt_adv;
3043         u16 current_speed;
3044         u8 current_duplex;
3045         int i, err;
3046
3047         tw32(MAC_EVENT, 0);
3048
3049         tw32_f(MAC_STATUS,
3050              (MAC_STATUS_SYNC_CHANGED |
3051               MAC_STATUS_CFG_CHANGED |
3052               MAC_STATUS_MI_COMPLETION |
3053               MAC_STATUS_LNKSTATE_CHANGED));
3054         udelay(40);
3055
3056         if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
3057                 tw32_f(MAC_MI_MODE,
3058                      (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
3059                 udelay(80);
3060         }
3061
3062         tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x02);
3063
3064         /* Some third-party PHYs need to be reset on link going
3065          * down.
3066          */
3067         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
3068              GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
3069              GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) &&
3070             netif_carrier_ok(tp->dev)) {
3071                 tg3_readphy(tp, MII_BMSR, &bmsr);
3072                 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
3073                     !(bmsr & BMSR_LSTATUS))
3074                         force_reset = 1;
3075         }
3076         if (force_reset)
3077                 tg3_phy_reset(tp);
3078
3079         if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5401) {
3080                 tg3_readphy(tp, MII_BMSR, &bmsr);
3081                 if (tg3_readphy(tp, MII_BMSR, &bmsr) ||
3082                     !(tp->tg3_flags & TG3_FLAG_INIT_COMPLETE))
3083                         bmsr = 0;
3084
3085                 if (!(bmsr & BMSR_LSTATUS)) {
3086                         err = tg3_init_5401phy_dsp(tp);
3087                         if (err)
3088                                 return err;
3089
3090                         tg3_readphy(tp, MII_BMSR, &bmsr);
3091                         for (i = 0; i < 1000; i++) {
3092                                 udelay(10);
3093                                 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
3094                                     (bmsr & BMSR_LSTATUS)) {
3095                                         udelay(40);
3096                                         break;
3097                                 }
3098                         }
3099
3100                         if ((tp->phy_id & PHY_ID_REV_MASK) == PHY_REV_BCM5401_B0 &&
3101                             !(bmsr & BMSR_LSTATUS) &&
3102                             tp->link_config.active_speed == SPEED_1000) {
3103                                 err = tg3_phy_reset(tp);
3104                                 if (!err)
3105                                         err = tg3_init_5401phy_dsp(tp);
3106                                 if (err)
3107                                         return err;
3108                         }
3109                 }
3110         } else if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
3111                    tp->pci_chip_rev_id == CHIPREV_ID_5701_B0) {
3112                 /* 5701 {A0,B0} CRC bug workaround */
3113                 tg3_writephy(tp, 0x15, 0x0a75);
3114                 tg3_writephy(tp, 0x1c, 0x8c68);
3115                 tg3_writephy(tp, 0x1c, 0x8d68);
3116                 tg3_writephy(tp, 0x1c, 0x8c68);
3117         }
3118
3119         /* Clear pending interrupts... */
3120         tg3_readphy(tp, MII_TG3_ISTAT, &dummy);
3121         tg3_readphy(tp, MII_TG3_ISTAT, &dummy);
3122
3123         if (tp->tg3_flags & TG3_FLAG_USE_MI_INTERRUPT)
3124                 tg3_writephy(tp, MII_TG3_IMASK, ~MII_TG3_INT_LINKCHG);
3125         else if (!(tp->tg3_flags3 & TG3_FLG3_PHY_IS_FET))
3126                 tg3_writephy(tp, MII_TG3_IMASK, ~0);
3127
3128         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
3129             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
3130                 if (tp->led_ctrl == LED_CTRL_MODE_PHY_1)
3131                         tg3_writephy(tp, MII_TG3_EXT_CTRL,
3132                                      MII_TG3_EXT_CTRL_LNK3_LED_MODE);
3133                 else
3134                         tg3_writephy(tp, MII_TG3_EXT_CTRL, 0);
3135         }
3136
3137         current_link_up = 0;
3138         current_speed = SPEED_INVALID;
3139         current_duplex = DUPLEX_INVALID;
3140
3141         if (tp->tg3_flags2 & TG3_FLG2_CAPACITIVE_COUPLING) {
3142                 u32 val;
3143
3144                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x4007);
3145                 tg3_readphy(tp, MII_TG3_AUX_CTRL, &val);
3146                 if (!(val & (1 << 10))) {
3147                         val |= (1 << 10);
3148                         tg3_writephy(tp, MII_TG3_AUX_CTRL, val);
3149                         goto relink;
3150                 }
3151         }
3152
3153         bmsr = 0;
3154         for (i = 0; i < 100; i++) {
3155                 tg3_readphy(tp, MII_BMSR, &bmsr);
3156                 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
3157                     (bmsr & BMSR_LSTATUS))
3158                         break;
3159                 udelay(40);
3160         }
3161
3162         if (bmsr & BMSR_LSTATUS) {
3163                 u32 aux_stat, bmcr;
3164
3165                 tg3_readphy(tp, MII_TG3_AUX_STAT, &aux_stat);
3166                 for (i = 0; i < 2000; i++) {
3167                         udelay(10);
3168                         if (!tg3_readphy(tp, MII_TG3_AUX_STAT, &aux_stat) &&
3169                             aux_stat)
3170                                 break;
3171                 }
3172
3173                 tg3_aux_stat_to_speed_duplex(tp, aux_stat,
3174                                              &current_speed,
3175                                              &current_duplex);
3176
3177                 bmcr = 0;
3178                 for (i = 0; i < 200; i++) {
3179                         tg3_readphy(tp, MII_BMCR, &bmcr);
3180                         if (tg3_readphy(tp, MII_BMCR, &bmcr))
3181                                 continue;
3182                         if (bmcr && bmcr != 0x7fff)
3183                                 break;
3184                         udelay(10);
3185                 }
3186
3187                 lcl_adv = 0;
3188                 rmt_adv = 0;
3189
3190                 tp->link_config.active_speed = current_speed;
3191                 tp->link_config.active_duplex = current_duplex;
3192
3193                 if (tp->link_config.autoneg == AUTONEG_ENABLE) {
3194                         if ((bmcr & BMCR_ANENABLE) &&
3195                             tg3_copper_is_advertising_all(tp,
3196                                                 tp->link_config.advertising)) {
3197                                 if (tg3_adv_1000T_flowctrl_ok(tp, &lcl_adv,
3198                                                                   &rmt_adv))
3199                                         current_link_up = 1;
3200                         }
3201                 } else {
3202                         if (!(bmcr & BMCR_ANENABLE) &&
3203                             tp->link_config.speed == current_speed &&
3204                             tp->link_config.duplex == current_duplex &&
3205                             tp->link_config.flowctrl ==
3206                             tp->link_config.active_flowctrl) {
3207                                 current_link_up = 1;
3208                         }
3209                 }
3210
3211                 if (current_link_up == 1 &&
3212                     tp->link_config.active_duplex == DUPLEX_FULL)
3213                         tg3_setup_flow_control(tp, lcl_adv, rmt_adv);
3214         }
3215
3216 relink:
3217         if (current_link_up == 0 || tp->link_config.phy_is_low_power) {
3218                 u32 tmp;
3219
3220                 tg3_phy_copper_begin(tp);
3221
3222                 tg3_readphy(tp, MII_BMSR, &tmp);
3223                 if (!tg3_readphy(tp, MII_BMSR, &tmp) &&
3224                     (tmp & BMSR_LSTATUS))
3225                         current_link_up = 1;
3226         }
3227
3228         tp->mac_mode &= ~MAC_MODE_PORT_MODE_MASK;
3229         if (current_link_up == 1) {
3230                 if (tp->link_config.active_speed == SPEED_100 ||
3231                     tp->link_config.active_speed == SPEED_10)
3232                         tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
3233                 else
3234                         tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
3235         } else if (tp->tg3_flags3 & TG3_FLG3_PHY_IS_FET)
3236                 tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
3237         else
3238                 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
3239
3240         tp->mac_mode &= ~MAC_MODE_HALF_DUPLEX;
3241         if (tp->link_config.active_duplex == DUPLEX_HALF)
3242                 tp->mac_mode |= MAC_MODE_HALF_DUPLEX;
3243
3244         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700) {
3245                 if (current_link_up == 1 &&
3246                     tg3_5700_link_polarity(tp, tp->link_config.active_speed))
3247                         tp->mac_mode |= MAC_MODE_LINK_POLARITY;
3248                 else
3249                         tp->mac_mode &= ~MAC_MODE_LINK_POLARITY;
3250         }
3251
3252         /* ??? Without this setting Netgear GA302T PHY does not
3253          * ??? send/receive packets...
3254          */
3255         if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5411 &&
3256             tp->pci_chip_rev_id == CHIPREV_ID_5700_ALTIMA) {
3257                 tp->mi_mode |= MAC_MI_MODE_AUTO_POLL;
3258                 tw32_f(MAC_MI_MODE, tp->mi_mode);
3259                 udelay(80);
3260         }
3261
3262         tw32_f(MAC_MODE, tp->mac_mode);
3263         udelay(40);
3264
3265         if (tp->tg3_flags & TG3_FLAG_USE_LINKCHG_REG) {
3266                 /* Polled via timer. */
3267                 tw32_f(MAC_EVENT, 0);
3268         } else {
3269                 tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
3270         }
3271         udelay(40);
3272
3273         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 &&
3274             current_link_up == 1 &&
3275             tp->link_config.active_speed == SPEED_1000 &&
3276             ((tp->tg3_flags & TG3_FLAG_PCIX_MODE) ||
3277              (tp->tg3_flags & TG3_FLAG_PCI_HIGH_SPEED))) {
3278                 udelay(120);
3279                 tw32_f(MAC_STATUS,
3280                      (MAC_STATUS_SYNC_CHANGED |
3281                       MAC_STATUS_CFG_CHANGED));
3282                 udelay(40);
3283                 tg3_write_mem(tp,
3284                               NIC_SRAM_FIRMWARE_MBOX,
3285                               NIC_SRAM_FIRMWARE_MBOX_MAGIC2);
3286         }
3287
3288         /* Prevent send BD corruption. */
3289         if (tp->tg3_flags3 & TG3_FLG3_CLKREQ_BUG) {
3290                 u16 oldlnkctl, newlnkctl;
3291
3292                 pci_read_config_word(tp->pdev,
3293                                      tp->pcie_cap + PCI_EXP_LNKCTL,
3294                                      &oldlnkctl);
3295                 if (tp->link_config.active_speed == SPEED_100 ||
3296                     tp->link_config.active_speed == SPEED_10)
3297                         newlnkctl = oldlnkctl & ~PCI_EXP_LNKCTL_CLKREQ_EN;
3298                 else
3299                         newlnkctl = oldlnkctl | PCI_EXP_LNKCTL_CLKREQ_EN;
3300                 if (newlnkctl != oldlnkctl)
3301                         pci_write_config_word(tp->pdev,
3302                                               tp->pcie_cap + PCI_EXP_LNKCTL,
3303                                               newlnkctl);
3304         }
3305
3306         if (current_link_up != netif_carrier_ok(tp->dev)) {
3307                 if (current_link_up)
3308                         netif_carrier_on(tp->dev);
3309                 else
3310                         netif_carrier_off(tp->dev);
3311                 tg3_link_report(tp);
3312         }
3313
3314         return 0;
3315 }
3316
3317 struct tg3_fiber_aneginfo {
3318         int state;
3319 #define ANEG_STATE_UNKNOWN              0
3320 #define ANEG_STATE_AN_ENABLE            1
3321 #define ANEG_STATE_RESTART_INIT         2
3322 #define ANEG_STATE_RESTART              3
3323 #define ANEG_STATE_DISABLE_LINK_OK      4
3324 #define ANEG_STATE_ABILITY_DETECT_INIT  5
3325 #define ANEG_STATE_ABILITY_DETECT       6
3326 #define ANEG_STATE_ACK_DETECT_INIT      7
3327 #define ANEG_STATE_ACK_DETECT           8
3328 #define ANEG_STATE_COMPLETE_ACK_INIT    9
3329 #define ANEG_STATE_COMPLETE_ACK         10
3330 #define ANEG_STATE_IDLE_DETECT_INIT     11
3331 #define ANEG_STATE_IDLE_DETECT          12
3332 #define ANEG_STATE_LINK_OK              13
3333 #define ANEG_STATE_NEXT_PAGE_WAIT_INIT  14
3334 #define ANEG_STATE_NEXT_PAGE_WAIT       15
3335
3336         u32 flags;
3337 #define MR_AN_ENABLE            0x00000001
3338 #define MR_RESTART_AN           0x00000002
3339 #define MR_AN_COMPLETE          0x00000004
3340 #define MR_PAGE_RX              0x00000008
3341 #define MR_NP_LOADED            0x00000010
3342 #define MR_TOGGLE_TX            0x00000020
3343 #define MR_LP_ADV_FULL_DUPLEX   0x00000040
3344 #define MR_LP_ADV_HALF_DUPLEX   0x00000080
3345 #define MR_LP_ADV_SYM_PAUSE     0x00000100
3346 #define MR_LP_ADV_ASYM_PAUSE    0x00000200
3347 #define MR_LP_ADV_REMOTE_FAULT1 0x00000400
3348 #define MR_LP_ADV_REMOTE_FAULT2 0x00000800
3349 #define MR_LP_ADV_NEXT_PAGE     0x00001000
3350 #define MR_TOGGLE_RX            0x00002000
3351 #define MR_NP_RX                0x00004000
3352
3353 #define MR_LINK_OK              0x80000000
3354
3355         unsigned long link_time, cur_time;
3356
3357         u32 ability_match_cfg;
3358         int ability_match_count;
3359
3360         char ability_match, idle_match, ack_match;
3361
3362         u32 txconfig, rxconfig;
3363 #define ANEG_CFG_NP             0x00000080
3364 #define ANEG_CFG_ACK            0x00000040
3365 #define ANEG_CFG_RF2            0x00000020
3366 #define ANEG_CFG_RF1            0x00000010
3367 #define ANEG_CFG_PS2            0x00000001
3368 #define ANEG_CFG_PS1            0x00008000
3369 #define ANEG_CFG_HD             0x00004000
3370 #define ANEG_CFG_FD             0x00002000
3371 #define ANEG_CFG_INVAL          0x00001f06
3372
3373 };
3374 #define ANEG_OK         0
3375 #define ANEG_DONE       1
3376 #define ANEG_TIMER_ENAB 2
3377 #define ANEG_FAILED     -1
3378
3379 #define ANEG_STATE_SETTLE_TIME  10000
3380
3381 static int tg3_fiber_aneg_smachine(struct tg3 *tp,
3382                                    struct tg3_fiber_aneginfo *ap)
3383 {
3384         u16 flowctrl;
3385         unsigned long delta;
3386         u32 rx_cfg_reg;
3387         int ret;
3388
3389         if (ap->state == ANEG_STATE_UNKNOWN) {
3390                 ap->rxconfig = 0;
3391                 ap->link_time = 0;
3392                 ap->cur_time = 0;
3393                 ap->ability_match_cfg = 0;
3394                 ap->ability_match_count = 0;
3395                 ap->ability_match = 0;
3396                 ap->idle_match = 0;
3397                 ap->ack_match = 0;
3398         }
3399         ap->cur_time++;
3400
3401         if (tr32(MAC_STATUS) & MAC_STATUS_RCVD_CFG) {
3402                 rx_cfg_reg = tr32(MAC_RX_AUTO_NEG);
3403
3404                 if (rx_cfg_reg != ap->ability_match_cfg) {
3405                         ap->ability_match_cfg = rx_cfg_reg;
3406                         ap->ability_match = 0;
3407                         ap->ability_match_count = 0;
3408                 } else {
3409                         if (++ap->ability_match_count > 1) {
3410                                 ap->ability_match = 1;
3411                                 ap->ability_match_cfg = rx_cfg_reg;
3412                         }
3413                 }
3414                 if (rx_cfg_reg & ANEG_CFG_ACK)
3415                         ap->ack_match = 1;
3416                 else
3417                         ap->ack_match = 0;
3418
3419                 ap->idle_match = 0;
3420         } else {
3421                 ap->idle_match = 1;
3422                 ap->ability_match_cfg = 0;
3423                 ap->ability_match_count = 0;
3424                 ap->ability_match = 0;
3425                 ap->ack_match = 0;
3426
3427                 rx_cfg_reg = 0;
3428         }
3429
3430         ap->rxconfig = rx_cfg_reg;
3431         ret = ANEG_OK;
3432
3433         switch(ap->state) {
3434         case ANEG_STATE_UNKNOWN:
3435                 if (ap->flags & (MR_AN_ENABLE | MR_RESTART_AN))
3436                         ap->state = ANEG_STATE_AN_ENABLE;
3437
3438                 /* fallthru */
3439         case ANEG_STATE_AN_ENABLE:
3440                 ap->flags &= ~(MR_AN_COMPLETE | MR_PAGE_RX);
3441                 if (ap->flags & MR_AN_ENABLE) {
3442                         ap->link_time = 0;
3443                         ap->cur_time = 0;
3444                         ap->ability_match_cfg = 0;
3445                         ap->ability_match_count = 0;
3446                         ap->ability_match = 0;
3447                         ap->idle_match = 0;
3448                         ap->ack_match = 0;
3449
3450                         ap->state = ANEG_STATE_RESTART_INIT;
3451                 } else {
3452                         ap->state = ANEG_STATE_DISABLE_LINK_OK;
3453                 }
3454                 break;
3455
3456         case ANEG_STATE_RESTART_INIT:
3457                 ap->link_time = ap->cur_time;
3458                 ap->flags &= ~(MR_NP_LOADED);
3459                 ap->txconfig = 0;
3460                 tw32(MAC_TX_AUTO_NEG, 0);
3461                 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
3462                 tw32_f(MAC_MODE, tp->mac_mode);
3463                 udelay(40);
3464
3465                 ret = ANEG_TIMER_ENAB;
3466                 ap->state = ANEG_STATE_RESTART;
3467
3468                 /* fallthru */
3469         case ANEG_STATE_RESTART:
3470                 delta = ap->cur_time - ap->link_time;
3471                 if (delta > ANEG_STATE_SETTLE_TIME) {
3472                         ap->state = ANEG_STATE_ABILITY_DETECT_INIT;
3473                 } else {
3474                         ret = ANEG_TIMER_ENAB;
3475                 }
3476                 break;
3477
3478         case ANEG_STATE_DISABLE_LINK_OK:
3479                 ret = ANEG_DONE;
3480                 break;
3481
3482         case ANEG_STATE_ABILITY_DETECT_INIT:
3483                 ap->flags &= ~(MR_TOGGLE_TX);
3484                 ap->txconfig = ANEG_CFG_FD;
3485                 flowctrl = tg3_advert_flowctrl_1000X(tp->link_config.flowctrl);
3486                 if (flowctrl & ADVERTISE_1000XPAUSE)
3487                         ap->txconfig |= ANEG_CFG_PS1;
3488                 if (flowctrl & ADVERTISE_1000XPSE_ASYM)
3489                         ap->txconfig |= ANEG_CFG_PS2;
3490                 tw32(MAC_TX_AUTO_NEG, ap->txconfig);
3491                 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
3492                 tw32_f(MAC_MODE, tp->mac_mode);
3493                 udelay(40);
3494
3495                 ap->state = ANEG_STATE_ABILITY_DETECT;
3496                 break;
3497
3498         case ANEG_STATE_ABILITY_DETECT:
3499                 if (ap->ability_match != 0 && ap->rxconfig != 0) {
3500                         ap->state = ANEG_STATE_ACK_DETECT_INIT;
3501                 }
3502                 break;
3503
3504         case ANEG_STATE_ACK_DETECT_INIT:
3505                 ap->txconfig |= ANEG_CFG_ACK;
3506                 tw32(MAC_TX_AUTO_NEG, ap->txconfig);
3507                 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
3508                 tw32_f(MAC_MODE, tp->mac_mode);
3509                 udelay(40);
3510
3511                 ap->state = ANEG_STATE_ACK_DETECT;
3512
3513                 /* fallthru */
3514         case ANEG_STATE_ACK_DETECT:
3515                 if (ap->ack_match != 0) {
3516                         if ((ap->rxconfig & ~ANEG_CFG_ACK) ==
3517                             (ap->ability_match_cfg & ~ANEG_CFG_ACK)) {
3518                                 ap->state = ANEG_STATE_COMPLETE_ACK_INIT;
3519                         } else {
3520                                 ap->state = ANEG_STATE_AN_ENABLE;
3521                         }
3522                 } else if (ap->ability_match != 0 &&
3523                            ap->rxconfig == 0) {
3524                         ap->state = ANEG_STATE_AN_ENABLE;
3525                 }
3526                 break;
3527
3528         case ANEG_STATE_COMPLETE_ACK_INIT:
3529                 if (ap->rxconfig & ANEG_CFG_INVAL) {
3530                         ret = ANEG_FAILED;
3531                         break;
3532                 }
3533                 ap->flags &= ~(MR_LP_ADV_FULL_DUPLEX |
3534                                MR_LP_ADV_HALF_DUPLEX |
3535                                MR_LP_ADV_SYM_PAUSE |
3536                                MR_LP_ADV_ASYM_PAUSE |
3537                                MR_LP_ADV_REMOTE_FAULT1 |
3538                                MR_LP_ADV_REMOTE_FAULT2 |
3539                                MR_LP_ADV_NEXT_PAGE |
3540                                MR_TOGGLE_RX |
3541                                MR_NP_RX);
3542                 if (ap->rxconfig & ANEG_CFG_FD)
3543                         ap->flags |= MR_LP_ADV_FULL_DUPLEX;
3544                 if (ap->rxconfig & ANEG_CFG_HD)
3545                         ap->flags |= MR_LP_ADV_HALF_DUPLEX;
3546                 if (ap->rxconfig & ANEG_CFG_PS1)
3547                         ap->flags |= MR_LP_ADV_SYM_PAUSE;
3548                 if (ap->rxconfig & ANEG_CFG_PS2)
3549                         ap->flags |= MR_LP_ADV_ASYM_PAUSE;
3550                 if (ap->rxconfig & ANEG_CFG_RF1)
3551                         ap->flags |= MR_LP_ADV_REMOTE_FAULT1;
3552                 if (ap->rxconfig & ANEG_CFG_RF2)
3553                         ap->flags |= MR_LP_ADV_REMOTE_FAULT2;
3554                 if (ap->rxconfig & ANEG_CFG_NP)
3555                         ap->flags |= MR_LP_ADV_NEXT_PAGE;
3556
3557                 ap->link_time = ap->cur_time;
3558
3559                 ap->flags ^= (MR_TOGGLE_TX);
3560                 if (ap->rxconfig & 0x0008)
3561                         ap->flags |= MR_TOGGLE_RX;
3562                 if (ap->rxconfig & ANEG_CFG_NP)
3563                         ap->flags |= MR_NP_RX;
3564                 ap->flags |= MR_PAGE_RX;
3565
3566                 ap->state = ANEG_STATE_COMPLETE_ACK;
3567                 ret = ANEG_TIMER_ENAB;
3568                 break;
3569
3570         case ANEG_STATE_COMPLETE_ACK:
3571                 if (ap->ability_match != 0 &&
3572                     ap->rxconfig == 0) {
3573                         ap->state = ANEG_STATE_AN_ENABLE;
3574                         break;
3575                 }
3576                 delta = ap->cur_time - ap->link_time;
3577                 if (delta > ANEG_STATE_SETTLE_TIME) {
3578                         if (!(ap->flags & (MR_LP_ADV_NEXT_PAGE))) {
3579                                 ap->state = ANEG_STATE_IDLE_DETECT_INIT;
3580                         } else {
3581                                 if ((ap->txconfig & ANEG_CFG_NP) == 0 &&
3582                                     !(ap->flags & MR_NP_RX)) {
3583                                         ap->state = ANEG_STATE_IDLE_DETECT_INIT;
3584                                 } else {
3585                                         ret = ANEG_FAILED;
3586                                 }
3587                         }
3588                 }
3589                 break;
3590
3591         case ANEG_STATE_IDLE_DETECT_INIT:
3592                 ap->link_time = ap->cur_time;
3593                 tp->mac_mode &= ~MAC_MODE_SEND_CONFIGS;
3594                 tw32_f(MAC_MODE, tp->mac_mode);
3595                 udelay(40);
3596
3597                 ap->state = ANEG_STATE_IDLE_DETECT;
3598                 ret = ANEG_TIMER_ENAB;
3599                 break;
3600
3601         case ANEG_STATE_IDLE_DETECT:
3602                 if (ap->ability_match != 0 &&
3603                     ap->rxconfig == 0) {
3604                         ap->state = ANEG_STATE_AN_ENABLE;
3605                         break;
3606                 }
3607                 delta = ap->cur_time - ap->link_time;
3608                 if (delta > ANEG_STATE_SETTLE_TIME) {
3609                         /* XXX another gem from the Broadcom driver :( */
3610                         ap->state = ANEG_STATE_LINK_OK;
3611                 }
3612                 break;
3613
3614         case ANEG_STATE_LINK_OK:
3615                 ap->flags |= (MR_AN_COMPLETE | MR_LINK_OK);
3616                 ret = ANEG_DONE;
3617                 break;
3618
3619         case ANEG_STATE_NEXT_PAGE_WAIT_INIT:
3620                 /* ??? unimplemented */
3621                 break;
3622
3623         case ANEG_STATE_NEXT_PAGE_WAIT:
3624                 /* ??? unimplemented */
3625                 break;
3626
3627         default:
3628                 ret = ANEG_FAILED;
3629                 break;
3630         }
3631
3632         return ret;
3633 }
3634
3635 static int fiber_autoneg(struct tg3 *tp, u32 *txflags, u32 *rxflags)
3636 {
3637         int res = 0;
3638         struct tg3_fiber_aneginfo aninfo;
3639         int status = ANEG_FAILED;
3640         unsigned int tick;
3641         u32 tmp;
3642
3643         tw32_f(MAC_TX_AUTO_NEG, 0);
3644
3645         tmp = tp->mac_mode & ~MAC_MODE_PORT_MODE_MASK;
3646         tw32_f(MAC_MODE, tmp | MAC_MODE_PORT_MODE_GMII);
3647         udelay(40);
3648
3649         tw32_f(MAC_MODE, tp->mac_mode | MAC_MODE_SEND_CONFIGS);
3650         udelay(40);
3651
3652         memset(&aninfo, 0, sizeof(aninfo));
3653         aninfo.flags |= MR_AN_ENABLE;
3654         aninfo.state = ANEG_STATE_UNKNOWN;
3655         aninfo.cur_time = 0;
3656         tick = 0;
3657         while (++tick < 195000) {
3658                 status = tg3_fiber_aneg_smachine(tp, &aninfo);
3659                 if (status == ANEG_DONE || status == ANEG_FAILED)
3660                         break;
3661
3662                 udelay(1);
3663         }
3664
3665         tp->mac_mode &= ~MAC_MODE_SEND_CONFIGS;
3666         tw32_f(MAC_MODE, tp->mac_mode);
3667         udelay(40);
3668
3669         *txflags = aninfo.txconfig;
3670         *rxflags = aninfo.flags;
3671
3672         if (status == ANEG_DONE &&
3673             (aninfo.flags & (MR_AN_COMPLETE | MR_LINK_OK |
3674                              MR_LP_ADV_FULL_DUPLEX)))
3675                 res = 1;
3676
3677         return res;
3678 }
3679
3680 static void tg3_init_bcm8002(struct tg3 *tp)
3681 {
3682         u32 mac_status = tr32(MAC_STATUS);
3683         int i;
3684
3685         /* Reset when initting first time or we have a link. */
3686         if ((tp->tg3_flags & TG3_FLAG_INIT_COMPLETE) &&
3687             !(mac_status & MAC_STATUS_PCS_SYNCED))
3688                 return;
3689
3690         /* Set PLL lock range. */
3691         tg3_writephy(tp, 0x16, 0x8007);
3692
3693         /* SW reset */
3694         tg3_writephy(tp, MII_BMCR, BMCR_RESET);
3695
3696         /* Wait for reset to complete. */
3697         /* XXX schedule_timeout() ... */
3698         for (i = 0; i < 500; i++)
3699                 udelay(10);
3700
3701         /* Config mode; select PMA/Ch 1 regs. */
3702         tg3_writephy(tp, 0x10, 0x8411);
3703
3704         /* Enable auto-lock and comdet, select txclk for tx. */
3705         tg3_writephy(tp, 0x11, 0x0a10);
3706
3707         tg3_writephy(tp, 0x18, 0x00a0);
3708         tg3_writephy(tp, 0x16, 0x41ff);
3709
3710         /* Assert and deassert POR. */
3711         tg3_writephy(tp, 0x13, 0x0400);
3712         udelay(40);
3713         tg3_writephy(tp, 0x13, 0x0000);
3714
3715         tg3_writephy(tp, 0x11, 0x0a50);
3716         udelay(40);
3717         tg3_writephy(tp, 0x11, 0x0a10);
3718
3719         /* Wait for signal to stabilize */
3720         /* XXX schedule_timeout() ... */
3721         for (i = 0; i < 15000; i++)
3722                 udelay(10);
3723
3724         /* Deselect the channel register so we can read the PHYID
3725          * later.
3726          */
3727         tg3_writephy(tp, 0x10, 0x8011);
3728 }
3729
3730 static int tg3_setup_fiber_hw_autoneg(struct tg3 *tp, u32 mac_status)
3731 {
3732         u16 flowctrl;
3733         u32 sg_dig_ctrl, sg_dig_status;
3734         u32 serdes_cfg, expected_sg_dig_ctrl;
3735         int workaround, port_a;
3736         int current_link_up;
3737
3738         serdes_cfg = 0;
3739         expected_sg_dig_ctrl = 0;
3740         workaround = 0;
3741         port_a = 1;
3742         current_link_up = 0;
3743
3744         if (tp->pci_chip_rev_id != CHIPREV_ID_5704_A0 &&
3745             tp->pci_chip_rev_id != CHIPREV_ID_5704_A1) {
3746                 workaround = 1;
3747                 if (tr32(TG3PCI_DUAL_MAC_CTRL) & DUAL_MAC_CTRL_ID)
3748                         port_a = 0;
3749
3750                 /* preserve bits 0-11,13,14 for signal pre-emphasis */
3751                 /* preserve bits 20-23 for voltage regulator */
3752                 serdes_cfg = tr32(MAC_SERDES_CFG) & 0x00f06fff;
3753         }
3754
3755         sg_dig_ctrl = tr32(SG_DIG_CTRL);
3756
3757         if (tp->link_config.autoneg != AUTONEG_ENABLE) {
3758                 if (sg_dig_ctrl & SG_DIG_USING_HW_AUTONEG) {
3759                         if (workaround) {
3760                                 u32 val = serdes_cfg;
3761
3762                                 if (port_a)
3763                                         val |= 0xc010000;
3764                                 else
3765                                         val |= 0x4010000;
3766                                 tw32_f(MAC_SERDES_CFG, val);
3767                         }
3768
3769                         tw32_f(SG_DIG_CTRL, SG_DIG_COMMON_SETUP);
3770                 }
3771                 if (mac_status & MAC_STATUS_PCS_SYNCED) {
3772                         tg3_setup_flow_control(tp, 0, 0);
3773                         current_link_up = 1;
3774                 }
3775                 goto out;
3776         }
3777
3778         /* Want auto-negotiation.  */
3779         expected_sg_dig_ctrl = SG_DIG_USING_HW_AUTONEG | SG_DIG_COMMON_SETUP;
3780
3781         flowctrl = tg3_advert_flowctrl_1000X(tp->link_config.flowctrl);
3782         if (flowctrl & ADVERTISE_1000XPAUSE)
3783                 expected_sg_dig_ctrl |= SG_DIG_PAUSE_CAP;
3784         if (flowctrl & ADVERTISE_1000XPSE_ASYM)
3785                 expected_sg_dig_ctrl |= SG_DIG_ASYM_PAUSE;
3786
3787         if (sg_dig_ctrl != expected_sg_dig_ctrl) {
3788                 if ((tp->tg3_flags2 & TG3_FLG2_PARALLEL_DETECT) &&
3789                     tp->serdes_counter &&
3790                     ((mac_status & (MAC_STATUS_PCS_SYNCED |
3791                                     MAC_STATUS_RCVD_CFG)) ==
3792                      MAC_STATUS_PCS_SYNCED)) {
3793                         tp->serdes_counter--;
3794                         current_link_up = 1;
3795                         goto out;
3796                 }
3797 restart_autoneg:
3798                 if (workaround)
3799                         tw32_f(MAC_SERDES_CFG, serdes_cfg | 0xc011000);
3800                 tw32_f(SG_DIG_CTRL, expected_sg_dig_ctrl | SG_DIG_SOFT_RESET);
3801                 udelay(5);
3802                 tw32_f(SG_DIG_CTRL, expected_sg_dig_ctrl);
3803
3804                 tp->serdes_counter = SERDES_AN_TIMEOUT_5704S;
3805                 tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
3806         } else if (mac_status & (MAC_STATUS_PCS_SYNCED |
3807                                  MAC_STATUS_SIGNAL_DET)) {
3808                 sg_dig_status = tr32(SG_DIG_STATUS);
3809                 mac_status = tr32(MAC_STATUS);
3810
3811                 if ((sg_dig_status & SG_DIG_AUTONEG_COMPLETE) &&
3812                     (mac_status & MAC_STATUS_PCS_SYNCED)) {
3813                         u32 local_adv = 0, remote_adv = 0;
3814
3815                         if (sg_dig_ctrl & SG_DIG_PAUSE_CAP)
3816                                 local_adv |= ADVERTISE_1000XPAUSE;
3817                         if (sg_dig_ctrl & SG_DIG_ASYM_PAUSE)
3818                                 local_adv |= ADVERTISE_1000XPSE_ASYM;
3819
3820                         if (sg_dig_status & SG_DIG_PARTNER_PAUSE_CAPABLE)
3821                                 remote_adv |= LPA_1000XPAUSE;
3822                         if (sg_dig_status & SG_DIG_PARTNER_ASYM_PAUSE)
3823                                 remote_adv |= LPA_1000XPAUSE_ASYM;
3824
3825                         tg3_setup_flow_control(tp, local_adv, remote_adv);
3826                         current_link_up = 1;
3827                         tp->serdes_counter = 0;
3828                         tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
3829                 } else if (!(sg_dig_status & SG_DIG_AUTONEG_COMPLETE)) {
3830                         if (tp->serdes_counter)
3831                                 tp->serdes_counter--;
3832                         else {
3833                                 if (workaround) {
3834                                         u32 val = serdes_cfg;
3835
3836                                         if (port_a)
3837                                                 val |= 0xc010000;
3838                                         else
3839                                                 val |= 0x4010000;
3840
3841                                         tw32_f(MAC_SERDES_CFG, val);
3842                                 }
3843
3844                                 tw32_f(SG_DIG_CTRL, SG_DIG_COMMON_SETUP);
3845                                 udelay(40);
3846
3847                                 /* Link parallel detection - link is up */
3848                                 /* only if we have PCS_SYNC and not */
3849                                 /* receiving config code words */
3850                                 mac_status = tr32(MAC_STATUS);
3851                                 if ((mac_status & MAC_STATUS_PCS_SYNCED) &&
3852                                     !(mac_status & MAC_STATUS_RCVD_CFG)) {
3853                                         tg3_setup_flow_control(tp, 0, 0);
3854                                         current_link_up = 1;
3855                                         tp->tg3_flags2 |=
3856                                                 TG3_FLG2_PARALLEL_DETECT;
3857                                         tp->serdes_counter =
3858                                                 SERDES_PARALLEL_DET_TIMEOUT;
3859                                 } else
3860                                         goto restart_autoneg;
3861                         }
3862                 }
3863         } else {
3864                 tp->serdes_counter = SERDES_AN_TIMEOUT_5704S;
3865                 tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
3866         }
3867
3868 out:
3869         return current_link_up;
3870 }
3871
3872 static int tg3_setup_fiber_by_hand(struct tg3 *tp, u32 mac_status)
3873 {
3874         int current_link_up = 0;
3875
3876         if (!(mac_status & MAC_STATUS_PCS_SYNCED))
3877                 goto out;
3878
3879         if (tp->link_config.autoneg == AUTONEG_ENABLE) {
3880                 u32 txflags, rxflags;
3881                 int i;
3882
3883                 if (fiber_autoneg(tp, &txflags, &rxflags)) {
3884                         u32 local_adv = 0, remote_adv = 0;
3885
3886                         if (txflags & ANEG_CFG_PS1)
3887                                 local_adv |= ADVERTISE_1000XPAUSE;
3888                         if (txflags & ANEG_CFG_PS2)
3889                                 local_adv |= ADVERTISE_1000XPSE_ASYM;
3890
3891                         if (rxflags & MR_LP_ADV_SYM_PAUSE)
3892                                 remote_adv |= LPA_1000XPAUSE;
3893                         if (rxflags & MR_LP_ADV_ASYM_PAUSE)
3894                                 remote_adv |= LPA_1000XPAUSE_ASYM;
3895
3896                         tg3_setup_flow_control(tp, local_adv, remote_adv);
3897
3898                         current_link_up = 1;
3899                 }
3900                 for (i = 0; i < 30; i++) {
3901                         udelay(20);
3902                         tw32_f(MAC_STATUS,
3903                                (MAC_STATUS_SYNC_CHANGED |
3904                                 MAC_STATUS_CFG_CHANGED));
3905                         udelay(40);
3906                         if ((tr32(MAC_STATUS) &
3907                              (MAC_STATUS_SYNC_CHANGED |
3908                               MAC_STATUS_CFG_CHANGED)) == 0)
3909                                 break;
3910                 }
3911
3912                 mac_status = tr32(MAC_STATUS);
3913                 if (current_link_up == 0 &&
3914                     (mac_status & MAC_STATUS_PCS_SYNCED) &&
3915                     !(mac_status & MAC_STATUS_RCVD_CFG))
3916                         current_link_up = 1;
3917         } else {
3918                 tg3_setup_flow_control(tp, 0, 0);
3919
3920                 /* Forcing 1000FD link up. */
3921                 current_link_up = 1;
3922
3923                 tw32_f(MAC_MODE, (tp->mac_mode | MAC_MODE_SEND_CONFIGS));
3924                 udelay(40);
3925
3926                 tw32_f(MAC_MODE, tp->mac_mode);
3927                 udelay(40);
3928         }
3929
3930 out:
3931         return current_link_up;
3932 }
3933
3934 static int tg3_setup_fiber_phy(struct tg3 *tp, int force_reset)
3935 {
3936         u32 orig_pause_cfg;
3937         u16 orig_active_speed;
3938         u8 orig_active_duplex;
3939         u32 mac_status;
3940         int current_link_up;
3941         int i;
3942
3943         orig_pause_cfg = tp->link_config.active_flowctrl;
3944         orig_active_speed = tp->link_config.active_speed;
3945         orig_active_duplex = tp->link_config.active_duplex;
3946
3947         if (!(tp->tg3_flags2 & TG3_FLG2_HW_AUTONEG) &&
3948             netif_carrier_ok(tp->dev) &&
3949             (tp->tg3_flags & TG3_FLAG_INIT_COMPLETE)) {
3950                 mac_status = tr32(MAC_STATUS);
3951                 mac_status &= (MAC_STATUS_PCS_SYNCED |
3952                                MAC_STATUS_SIGNAL_DET |
3953                                MAC_STATUS_CFG_CHANGED |
3954                                MAC_STATUS_RCVD_CFG);
3955                 if (mac_status == (MAC_STATUS_PCS_SYNCED |
3956                                    MAC_STATUS_SIGNAL_DET)) {
3957                         tw32_f(MAC_STATUS, (MAC_STATUS_SYNC_CHANGED |
3958                                             MAC_STATUS_CFG_CHANGED));
3959                         return 0;
3960                 }
3961         }
3962
3963         tw32_f(MAC_TX_AUTO_NEG, 0);
3964
3965         tp->mac_mode &= ~(MAC_MODE_PORT_MODE_MASK | MAC_MODE_HALF_DUPLEX);
3966         tp->mac_mode |= MAC_MODE_PORT_MODE_TBI;
3967         tw32_f(MAC_MODE, tp->mac_mode);
3968         udelay(40);
3969
3970         if (tp->phy_id == PHY_ID_BCM8002)
3971                 tg3_init_bcm8002(tp);
3972
3973         /* Enable link change event even when serdes polling.  */
3974         tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
3975         udelay(40);
3976
3977         current_link_up = 0;
3978         mac_status = tr32(MAC_STATUS);
3979
3980         if (tp->tg3_flags2 & TG3_FLG2_HW_AUTONEG)
3981                 current_link_up = tg3_setup_fiber_hw_autoneg(tp, mac_status);
3982         else
3983                 current_link_up = tg3_setup_fiber_by_hand(tp, mac_status);
3984
3985         tp->napi[0].hw_status->status =
3986                 (SD_STATUS_UPDATED |
3987                  (tp->napi[0].hw_status->status & ~SD_STATUS_LINK_CHG));
3988
3989         for (i = 0; i < 100; i++) {
3990                 tw32_f(MAC_STATUS, (MAC_STATUS_SYNC_CHANGED |
3991                                     MAC_STATUS_CFG_CHANGED));
3992                 udelay(5);
3993                 if ((tr32(MAC_STATUS) & (MAC_STATUS_SYNC_CHANGED |
3994                                          MAC_STATUS_CFG_CHANGED |
3995                                          MAC_STATUS_LNKSTATE_CHANGED)) == 0)
3996                         break;
3997         }
3998
3999         mac_status = tr32(MAC_STATUS);
4000         if ((mac_status & MAC_STATUS_PCS_SYNCED) == 0) {
4001                 current_link_up = 0;
4002                 if (tp->link_config.autoneg == AUTONEG_ENABLE &&
4003                     tp->serdes_counter == 0) {
4004                         tw32_f(MAC_MODE, (tp->mac_mode |
4005                                           MAC_MODE_SEND_CONFIGS));
4006                         udelay(1);
4007                         tw32_f(MAC_MODE, tp->mac_mode);
4008                 }
4009         }
4010
4011         if (current_link_up == 1) {
4012                 tp->link_config.active_speed = SPEED_1000;
4013                 tp->link_config.active_duplex = DUPLEX_FULL;
4014                 tw32(MAC_LED_CTRL, (tp->led_ctrl |
4015                                     LED_CTRL_LNKLED_OVERRIDE |
4016                                     LED_CTRL_1000MBPS_ON));
4017         } else {
4018                 tp->link_config.active_speed = SPEED_INVALID;
4019                 tp->link_config.active_duplex = DUPLEX_INVALID;
4020                 tw32(MAC_LED_CTRL, (tp->led_ctrl |
4021                                     LED_CTRL_LNKLED_OVERRIDE |
4022                                     LED_CTRL_TRAFFIC_OVERRIDE));
4023         }
4024
4025         if (current_link_up != netif_carrier_ok(tp->dev)) {
4026                 if (current_link_up)
4027                         netif_carrier_on(tp->dev);
4028                 else
4029                         netif_carrier_off(tp->dev);
4030                 tg3_link_report(tp);
4031         } else {
4032                 u32 now_pause_cfg = tp->link_config.active_flowctrl;
4033                 if (orig_pause_cfg != now_pause_cfg ||
4034                     orig_active_speed != tp->link_config.active_speed ||
4035                     orig_active_duplex != tp->link_config.active_duplex)
4036                         tg3_link_report(tp);
4037         }
4038
4039         return 0;
4040 }
4041
4042 static int tg3_setup_fiber_mii_phy(struct tg3 *tp, int force_reset)
4043 {
4044         int current_link_up, err = 0;
4045         u32 bmsr, bmcr;
4046         u16 current_speed;
4047         u8 current_duplex;
4048         u32 local_adv, remote_adv;
4049
4050         tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
4051         tw32_f(MAC_MODE, tp->mac_mode);
4052         udelay(40);
4053
4054         tw32(MAC_EVENT, 0);
4055
4056         tw32_f(MAC_STATUS,
4057              (MAC_STATUS_SYNC_CHANGED |
4058               MAC_STATUS_CFG_CHANGED |
4059               MAC_STATUS_MI_COMPLETION |
4060               MAC_STATUS_LNKSTATE_CHANGED));
4061         udelay(40);
4062
4063         if (force_reset)
4064                 tg3_phy_reset(tp);
4065
4066         current_link_up = 0;
4067         current_speed = SPEED_INVALID;
4068         current_duplex = DUPLEX_INVALID;
4069
4070         err |= tg3_readphy(tp, MII_BMSR, &bmsr);
4071         err |= tg3_readphy(tp, MII_BMSR, &bmsr);
4072         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) {
4073                 if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
4074                         bmsr |= BMSR_LSTATUS;
4075                 else
4076                         bmsr &= ~BMSR_LSTATUS;
4077         }
4078
4079         err |= tg3_readphy(tp, MII_BMCR, &bmcr);
4080
4081         if ((tp->link_config.autoneg == AUTONEG_ENABLE) && !force_reset &&
4082             (tp->tg3_flags2 & TG3_FLG2_PARALLEL_DETECT)) {
4083                 /* do nothing, just check for link up at the end */
4084         } else if (tp->link_config.autoneg == AUTONEG_ENABLE) {
4085                 u32 adv, new_adv;
4086
4087                 err |= tg3_readphy(tp, MII_ADVERTISE, &adv);
4088                 new_adv = adv & ~(ADVERTISE_1000XFULL | ADVERTISE_1000XHALF |
4089                                   ADVERTISE_1000XPAUSE |
4090                                   ADVERTISE_1000XPSE_ASYM |
4091                                   ADVERTISE_SLCT);
4092
4093                 new_adv |= tg3_advert_flowctrl_1000X(tp->link_config.flowctrl);
4094
4095                 if (tp->link_config.advertising & ADVERTISED_1000baseT_Half)
4096                         new_adv |= ADVERTISE_1000XHALF;
4097                 if (tp->link_config.advertising & ADVERTISED_1000baseT_Full)
4098                         new_adv |= ADVERTISE_1000XFULL;
4099
4100                 if ((new_adv != adv) || !(bmcr & BMCR_ANENABLE)) {
4101                         tg3_writephy(tp, MII_ADVERTISE, new_adv);
4102                         bmcr |= BMCR_ANENABLE | BMCR_ANRESTART;
4103                         tg3_writephy(tp, MII_BMCR, bmcr);
4104
4105                         tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
4106                         tp->serdes_counter = SERDES_AN_TIMEOUT_5714S;
4107                         tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
4108
4109                         return err;
4110                 }
4111         } else {
4112                 u32 new_bmcr;
4113
4114                 bmcr &= ~BMCR_SPEED1000;
4115                 new_bmcr = bmcr & ~(BMCR_ANENABLE | BMCR_FULLDPLX);
4116
4117                 if (tp->link_config.duplex == DUPLEX_FULL)
4118                         new_bmcr |= BMCR_FULLDPLX;
4119
4120                 if (new_bmcr != bmcr) {
4121                         /* BMCR_SPEED1000 is a reserved bit that needs
4122                          * to be set on write.
4123                          */
4124                         new_bmcr |= BMCR_SPEED1000;
4125
4126                         /* Force a linkdown */
4127                         if (netif_carrier_ok(tp->dev)) {
4128                                 u32 adv;
4129
4130                                 err |= tg3_readphy(tp, MII_ADVERTISE, &adv);
4131                                 adv &= ~(ADVERTISE_1000XFULL |
4132                                          ADVERTISE_1000XHALF |
4133                                          ADVERTISE_SLCT);
4134                                 tg3_writephy(tp, MII_ADVERTISE, adv);
4135                                 tg3_writephy(tp, MII_BMCR, bmcr |
4136                                                            BMCR_ANRESTART |
4137                                                            BMCR_ANENABLE);
4138                                 udelay(10);
4139                                 netif_carrier_off(tp->dev);
4140                         }
4141                         tg3_writephy(tp, MII_BMCR, new_bmcr);
4142                         bmcr = new_bmcr;
4143                         err |= tg3_readphy(tp, MII_BMSR, &bmsr);
4144                         err |= tg3_readphy(tp, MII_BMSR, &bmsr);
4145                         if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
4146                             ASIC_REV_5714) {
4147                                 if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
4148                                         bmsr |= BMSR_LSTATUS;
4149                                 else
4150                                         bmsr &= ~BMSR_LSTATUS;
4151                         }
4152                         tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
4153                 }
4154         }
4155
4156         if (bmsr & BMSR_LSTATUS) {
4157                 current_speed = SPEED_1000;
4158                 current_link_up = 1;
4159                 if (bmcr & BMCR_FULLDPLX)
4160                         current_duplex = DUPLEX_FULL;
4161                 else
4162                         current_duplex = DUPLEX_HALF;
4163
4164                 local_adv = 0;
4165                 remote_adv = 0;
4166
4167                 if (bmcr & BMCR_ANENABLE) {
4168                         u32 common;
4169
4170                         err |= tg3_readphy(tp, MII_ADVERTISE, &local_adv);
4171                         err |= tg3_readphy(tp, MII_LPA, &remote_adv);
4172                         common = local_adv & remote_adv;
4173                         if (common & (ADVERTISE_1000XHALF |
4174                                       ADVERTISE_1000XFULL)) {
4175                                 if (common & ADVERTISE_1000XFULL)
4176                                         current_duplex = DUPLEX_FULL;
4177                                 else
4178                                         current_duplex = DUPLEX_HALF;
4179                         }
4180                         else
4181                                 current_link_up = 0;
4182                 }
4183         }
4184
4185         if (current_link_up == 1 && current_duplex == DUPLEX_FULL)
4186                 tg3_setup_flow_control(tp, local_adv, remote_adv);
4187
4188         tp->mac_mode &= ~MAC_MODE_HALF_DUPLEX;
4189         if (tp->link_config.active_duplex == DUPLEX_HALF)
4190                 tp->mac_mode |= MAC_MODE_HALF_DUPLEX;
4191
4192         tw32_f(MAC_MODE, tp->mac_mode);
4193         udelay(40);
4194
4195         tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
4196
4197         tp->link_config.active_speed = current_speed;
4198         tp->link_config.active_duplex = current_duplex;
4199
4200         if (current_link_up != netif_carrier_ok(tp->dev)) {
4201                 if (current_link_up)
4202                         netif_carrier_on(tp->dev);
4203                 else {
4204                         netif_carrier_off(tp->dev);
4205                         tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
4206                 }
4207                 tg3_link_report(tp);
4208         }
4209         return err;
4210 }
4211
4212 static void tg3_serdes_parallel_detect(struct tg3 *tp)
4213 {
4214         if (tp->serdes_counter) {
4215                 /* Give autoneg time to complete. */
4216                 tp->serdes_counter--;
4217                 return;
4218         }
4219         if (!netif_carrier_ok(tp->dev) &&
4220             (tp->link_config.autoneg == AUTONEG_ENABLE)) {
4221                 u32 bmcr;
4222
4223                 tg3_readphy(tp, MII_BMCR, &bmcr);
4224                 if (bmcr & BMCR_ANENABLE) {
4225                         u32 phy1, phy2;
4226
4227                         /* Select shadow register 0x1f */
4228                         tg3_writephy(tp, 0x1c, 0x7c00);
4229                         tg3_readphy(tp, 0x1c, &phy1);
4230
4231                         /* Select expansion interrupt status register */
4232                         tg3_writephy(tp, 0x17, 0x0f01);
4233                         tg3_readphy(tp, 0x15, &phy2);
4234                         tg3_readphy(tp, 0x15, &phy2);
4235
4236                         if ((phy1 & 0x10) && !(phy2 & 0x20)) {
4237                                 /* We have signal detect and not receiving
4238                                  * config code words, link is up by parallel
4239                                  * detection.
4240                                  */
4241
4242                                 bmcr &= ~BMCR_ANENABLE;
4243                                 bmcr |= BMCR_SPEED1000 | BMCR_FULLDPLX;
4244                                 tg3_writephy(tp, MII_BMCR, bmcr);
4245                                 tp->tg3_flags2 |= TG3_FLG2_PARALLEL_DETECT;
4246                         }
4247                 }
4248         }
4249         else if (netif_carrier_ok(tp->dev) &&
4250                  (tp->link_config.autoneg == AUTONEG_ENABLE) &&
4251                  (tp->tg3_flags2 & TG3_FLG2_PARALLEL_DETECT)) {
4252                 u32 phy2;
4253
4254                 /* Select expansion interrupt status register */
4255                 tg3_writephy(tp, 0x17, 0x0f01);
4256                 tg3_readphy(tp, 0x15, &phy2);
4257                 if (phy2 & 0x20) {
4258                         u32 bmcr;
4259
4260                         /* Config code words received, turn on autoneg. */
4261                         tg3_readphy(tp, MII_BMCR, &bmcr);
4262                         tg3_writephy(tp, MII_BMCR, bmcr | BMCR_ANENABLE);
4263
4264                         tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
4265
4266                 }
4267         }
4268 }
4269
4270 static int tg3_setup_phy(struct tg3 *tp, int force_reset)
4271 {
4272         int err;
4273
4274         if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) {
4275                 err = tg3_setup_fiber_phy(tp, force_reset);
4276         } else if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES) {
4277                 err = tg3_setup_fiber_mii_phy(tp, force_reset);
4278         } else {
4279                 err = tg3_setup_copper_phy(tp, force_reset);
4280         }
4281
4282         if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5784_AX) {
4283                 u32 val, scale;
4284
4285                 val = tr32(TG3_CPMU_CLCK_STAT) & CPMU_CLCK_STAT_MAC_CLCK_MASK;
4286                 if (val == CPMU_CLCK_STAT_MAC_CLCK_62_5)
4287                         scale = 65;
4288                 else if (val == CPMU_CLCK_STAT_MAC_CLCK_6_25)
4289                         scale = 6;
4290                 else
4291                         scale = 12;
4292
4293                 val = tr32(GRC_MISC_CFG) & ~GRC_MISC_CFG_PRESCALAR_MASK;
4294                 val |= (scale << GRC_MISC_CFG_PRESCALAR_SHIFT);
4295                 tw32(GRC_MISC_CFG, val);
4296         }
4297
4298         if (tp->link_config.active_speed == SPEED_1000 &&
4299             tp->link_config.active_duplex == DUPLEX_HALF)
4300                 tw32(MAC_TX_LENGTHS,
4301                      ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
4302                       (6 << TX_LENGTHS_IPG_SHIFT) |
4303                       (0xff << TX_LENGTHS_SLOT_TIME_SHIFT)));
4304         else
4305                 tw32(MAC_TX_LENGTHS,
4306                      ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
4307                       (6 << TX_LENGTHS_IPG_SHIFT) |
4308                       (32 << TX_LENGTHS_SLOT_TIME_SHIFT)));
4309
4310         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
4311                 if (netif_carrier_ok(tp->dev)) {
4312                         tw32(HOSTCC_STAT_COAL_TICKS,
4313                              tp->coal.stats_block_coalesce_usecs);
4314                 } else {
4315                         tw32(HOSTCC_STAT_COAL_TICKS, 0);
4316                 }
4317         }
4318
4319         if (tp->tg3_flags & TG3_FLAG_ASPM_WORKAROUND) {
4320                 u32 val = tr32(PCIE_PWR_MGMT_THRESH);
4321                 if (!netif_carrier_ok(tp->dev))
4322                         val = (val & ~PCIE_PWR_MGMT_L1_THRESH_MSK) |
4323                               tp->pwrmgmt_thresh;
4324                 else
4325                         val |= PCIE_PWR_MGMT_L1_THRESH_MSK;
4326                 tw32(PCIE_PWR_MGMT_THRESH, val);
4327         }
4328
4329         return err;
4330 }
4331
4332 /* This is called whenever we suspect that the system chipset is re-
4333  * ordering the sequence of MMIO to the tx send mailbox. The symptom
4334  * is bogus tx completions. We try to recover by setting the
4335  * TG3_FLAG_MBOX_WRITE_REORDER flag and resetting the chip later
4336  * in the workqueue.
4337  */
4338 static void tg3_tx_recover(struct tg3 *tp)
4339 {
4340         BUG_ON((tp->tg3_flags & TG3_FLAG_MBOX_WRITE_REORDER) ||
4341                tp->write32_tx_mbox == tg3_write_indirect_mbox);
4342
4343         printk(KERN_WARNING PFX "%s: The system may be re-ordering memory-"
4344                "mapped I/O cycles to the network device, attempting to "
4345                "recover. Please report the problem to the driver maintainer "
4346                "and include system chipset information.\n", tp->dev->name);
4347
4348         spin_lock(&tp->lock);
4349         tp->tg3_flags |= TG3_FLAG_TX_RECOVERY_PENDING;
4350         spin_unlock(&tp->lock);
4351 }
4352
4353 static inline u32 tg3_tx_avail(struct tg3_napi *tnapi)
4354 {
4355         smp_mb();
4356         return tnapi->tx_pending -
4357                ((tnapi->tx_prod - tnapi->tx_cons) & (TG3_TX_RING_SIZE - 1));
4358 }
4359
4360 /* Tigon3 never reports partial packet sends.  So we do not
4361  * need special logic to handle SKBs that have not had all
4362  * of their frags sent yet, like SunGEM does.
4363  */
4364 static void tg3_tx(struct tg3_napi *tnapi)
4365 {
4366         struct tg3 *tp = tnapi->tp;
4367         u32 hw_idx = tnapi->hw_status->idx[0].tx_consumer;
4368         u32 sw_idx = tnapi->tx_cons;
4369         struct netdev_queue *txq;
4370         int index = tnapi - tp->napi;
4371
4372         if (tp->tg3_flags3 & TG3_FLG3_ENABLE_TSS)
4373                 index--;
4374
4375         txq = netdev_get_tx_queue(tp->dev, index);
4376
4377         while (sw_idx != hw_idx) {
4378                 struct ring_info *ri = &tnapi->tx_buffers[sw_idx];
4379                 struct sk_buff *skb = ri->skb;
4380                 int i, tx_bug = 0;
4381
4382                 if (unlikely(skb == NULL)) {
4383                         tg3_tx_recover(tp);
4384                         return;
4385                 }
4386
4387                 pci_unmap_single(tp->pdev,
4388                                  pci_unmap_addr(ri, mapping),
4389                                  skb_headlen(skb),
4390                                  PCI_DMA_TODEVICE);
4391
4392                 ri->skb = NULL;
4393
4394                 sw_idx = NEXT_TX(sw_idx);
4395
4396                 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
4397                         ri = &tnapi->tx_buffers[sw_idx];
4398                         if (unlikely(ri->skb != NULL || sw_idx == hw_idx))
4399                                 tx_bug = 1;
4400
4401                         pci_unmap_page(tp->pdev,
4402                                        pci_unmap_addr(ri, mapping),
4403                                        skb_shinfo(skb)->frags[i].size,
4404                                        PCI_DMA_TODEVICE);
4405                         sw_idx = NEXT_TX(sw_idx);
4406                 }
4407
4408                 dev_kfree_skb(skb);
4409
4410                 if (unlikely(tx_bug)) {
4411                         tg3_tx_recover(tp);
4412                         return;
4413                 }
4414         }
4415
4416         tnapi->tx_cons = sw_idx;
4417
4418         /* Need to make the tx_cons update visible to tg3_start_xmit()
4419          * before checking for netif_queue_stopped().  Without the
4420          * memory barrier, there is a small possibility that tg3_start_xmit()
4421          * will miss it and cause the queue to be stopped forever.
4422          */
4423         smp_mb();
4424
4425         if (unlikely(netif_tx_queue_stopped(txq) &&
4426                      (tg3_tx_avail(tnapi) > TG3_TX_WAKEUP_THRESH(tnapi)))) {
4427                 __netif_tx_lock(txq, smp_processor_id());
4428                 if (netif_tx_queue_stopped(txq) &&
4429                     (tg3_tx_avail(tnapi) > TG3_TX_WAKEUP_THRESH(tnapi)))
4430                         netif_tx_wake_queue(txq);
4431                 __netif_tx_unlock(txq);
4432         }
4433 }
4434
4435 static void tg3_rx_skb_free(struct tg3 *tp, struct ring_info *ri, u32 map_sz)
4436 {
4437         if (!ri->skb)
4438                 return;
4439
4440         pci_unmap_single(tp->pdev, pci_unmap_addr(ri, mapping),
4441                          map_sz, PCI_DMA_FROMDEVICE);
4442         dev_kfree_skb_any(ri->skb);
4443         ri->skb = NULL;
4444 }
4445
4446 /* Returns size of skb allocated or < 0 on error.
4447  *
4448  * We only need to fill in the address because the other members
4449  * of the RX descriptor are invariant, see tg3_init_rings.
4450  *
4451  * Note the purposeful assymetry of cpu vs. chip accesses.  For
4452  * posting buffers we only dirty the first cache line of the RX
4453  * descriptor (containing the address).  Whereas for the RX status
4454  * buffers the cpu only reads the last cacheline of the RX descriptor
4455  * (to fetch the error flags, vlan tag, checksum, and opaque cookie).
4456  */
4457 static int tg3_alloc_rx_skb(struct tg3 *tp, struct tg3_rx_prodring_set *tpr,
4458                             u32 opaque_key, u32 dest_idx_unmasked)
4459 {
4460         struct tg3_rx_buffer_desc *desc;
4461         struct ring_info *map, *src_map;
4462         struct sk_buff *skb;
4463         dma_addr_t mapping;
4464         int skb_size, dest_idx;
4465
4466         src_map = NULL;
4467         switch (opaque_key) {
4468         case RXD_OPAQUE_RING_STD:
4469                 dest_idx = dest_idx_unmasked % TG3_RX_RING_SIZE;
4470                 desc = &tpr->rx_std[dest_idx];
4471                 map = &tpr->rx_std_buffers[dest_idx];
4472                 skb_size = tp->rx_pkt_map_sz;
4473                 break;
4474
4475         case RXD_OPAQUE_RING_JUMBO:
4476                 dest_idx = dest_idx_unmasked % TG3_RX_JUMBO_RING_SIZE;
4477                 desc = &tpr->rx_jmb[dest_idx].std;
4478                 map = &tpr->rx_jmb_buffers[dest_idx];
4479                 skb_size = TG3_RX_JMB_MAP_SZ;
4480                 break;
4481
4482         default:
4483                 return -EINVAL;
4484         }
4485
4486         /* Do not overwrite any of the map or rp information
4487          * until we are sure we can commit to a new buffer.
4488          *
4489          * Callers depend upon this behavior and assume that
4490          * we leave everything unchanged if we fail.
4491          */
4492         skb = netdev_alloc_skb(tp->dev, skb_size + tp->rx_offset);
4493         if (skb == NULL)
4494                 return -ENOMEM;
4495
4496         skb_reserve(skb, tp->rx_offset);
4497
4498         mapping = pci_map_single(tp->pdev, skb->data, skb_size,
4499                                  PCI_DMA_FROMDEVICE);
4500         if (pci_dma_mapping_error(tp->pdev, mapping)) {
4501                 dev_kfree_skb(skb);
4502                 return -EIO;
4503         }
4504
4505         map->skb = skb;
4506         pci_unmap_addr_set(map, mapping, mapping);
4507
4508         desc->addr_hi = ((u64)mapping >> 32);
4509         desc->addr_lo = ((u64)mapping & 0xffffffff);
4510
4511         return skb_size;
4512 }
4513
4514 /* We only need to move over in the address because the other
4515  * members of the RX descriptor are invariant.  See notes above
4516  * tg3_alloc_rx_skb for full details.
4517  */
4518 static void tg3_recycle_rx(struct tg3_napi *tnapi,
4519                            struct tg3_rx_prodring_set *dpr,
4520                            u32 opaque_key, int src_idx,
4521                            u32 dest_idx_unmasked)
4522 {
4523         struct tg3 *tp = tnapi->tp;
4524         struct tg3_rx_buffer_desc *src_desc, *dest_desc;
4525         struct ring_info *src_map, *dest_map;
4526         int dest_idx;
4527         struct tg3_rx_prodring_set *spr = &tp->prodring[0];
4528
4529         switch (opaque_key) {
4530         case RXD_OPAQUE_RING_STD:
4531                 dest_idx = dest_idx_unmasked % TG3_RX_RING_SIZE;
4532                 dest_desc = &dpr->rx_std[dest_idx];
4533                 dest_map = &dpr->rx_std_buffers[dest_idx];
4534                 src_desc = &spr->rx_std[src_idx];
4535                 src_map = &spr->rx_std_buffers[src_idx];
4536                 break;
4537
4538         case RXD_OPAQUE_RING_JUMBO:
4539                 dest_idx = dest_idx_unmasked % TG3_RX_JUMBO_RING_SIZE;
4540                 dest_desc = &dpr->rx_jmb[dest_idx].std;
4541                 dest_map = &dpr->rx_jmb_buffers[dest_idx];
4542                 src_desc = &spr->rx_jmb[src_idx].std;
4543                 src_map = &spr->rx_jmb_buffers[src_idx];
4544                 break;
4545
4546         default:
4547                 return;
4548         }
4549
4550         dest_map->skb = src_map->skb;
4551         pci_unmap_addr_set(dest_map, mapping,
4552                            pci_unmap_addr(src_map, mapping));
4553         dest_desc->addr_hi = src_desc->addr_hi;
4554         dest_desc->addr_lo = src_desc->addr_lo;
4555
4556         /* Ensure that the update to the skb happens after the physical
4557          * addresses have been transferred to the new BD location.
4558          */
4559         smp_wmb();
4560
4561         src_map->skb = NULL;
4562 }
4563
4564 /* The RX ring scheme is composed of multiple rings which post fresh
4565  * buffers to the chip, and one special ring the chip uses to report
4566  * status back to the host.
4567  *
4568  * The special ring reports the status of received packets to the
4569  * host.  The chip does not write into the original descriptor the
4570  * RX buffer was obtained from.  The chip simply takes the original
4571  * descriptor as provided by the host, updates the status and length
4572  * field, then writes this into the next status ring entry.
4573  *
4574  * Each ring the host uses to post buffers to the chip is described
4575  * by a TG3_BDINFO entry in the chips SRAM area.  When a packet arrives,
4576  * it is first placed into the on-chip ram.  When the packet's length
4577  * is known, it walks down the TG3_BDINFO entries to select the ring.
4578  * Each TG3_BDINFO specifies a MAXLEN field and the first TG3_BDINFO
4579  * which is within the range of the new packet's length is chosen.
4580  *
4581  * The "separate ring for rx status" scheme may sound queer, but it makes
4582  * sense from a cache coherency perspective.  If only the host writes
4583  * to the buffer post rings, and only the chip writes to the rx status
4584  * rings, then cache lines never move beyond shared-modified state.
4585  * If both the host and chip were to write into the same ring, cache line
4586  * eviction could occur since both entities want it in an exclusive state.
4587  */
4588 static int tg3_rx(struct tg3_napi *tnapi, int budget)
4589 {
4590         struct tg3 *tp = tnapi->tp;
4591         u32 work_mask, rx_std_posted = 0;
4592         u32 std_prod_idx, jmb_prod_idx;
4593         u32 sw_idx = tnapi->rx_rcb_ptr;
4594         u16 hw_idx;
4595         int received;
4596         struct tg3_rx_prodring_set *tpr = tnapi->prodring;
4597
4598         hw_idx = *(tnapi->rx_rcb_prod_idx);
4599         /*
4600          * We need to order the read of hw_idx and the read of
4601          * the opaque cookie.
4602          */
4603         rmb();
4604         work_mask = 0;
4605         received = 0;
4606         std_prod_idx = tpr->rx_std_prod_idx;
4607         jmb_prod_idx = tpr->rx_jmb_prod_idx;
4608         while (sw_idx != hw_idx && budget > 0) {
4609                 struct ring_info *ri;
4610                 struct tg3_rx_buffer_desc *desc = &tnapi->rx_rcb[sw_idx];
4611                 unsigned int len;
4612                 struct sk_buff *skb;
4613                 dma_addr_t dma_addr;
4614                 u32 opaque_key, desc_idx, *post_ptr;
4615
4616                 desc_idx = desc->opaque & RXD_OPAQUE_INDEX_MASK;
4617                 opaque_key = desc->opaque & RXD_OPAQUE_RING_MASK;
4618                 if (opaque_key == RXD_OPAQUE_RING_STD) {
4619                         ri = &tp->prodring[0].rx_std_buffers[desc_idx];
4620                         dma_addr = pci_unmap_addr(ri, mapping);
4621                         skb = ri->skb;
4622                         post_ptr = &std_prod_idx;
4623                         rx_std_posted++;
4624                 } else if (opaque_key == RXD_OPAQUE_RING_JUMBO) {
4625                         ri = &tp->prodring[0].rx_jmb_buffers[desc_idx];
4626                         dma_addr = pci_unmap_addr(ri, mapping);
4627                         skb = ri->skb;
4628                         post_ptr = &jmb_prod_idx;
4629                 } else
4630                         goto next_pkt_nopost;
4631
4632                 work_mask |= opaque_key;
4633
4634                 if ((desc->err_vlan & RXD_ERR_MASK) != 0 &&
4635                     (desc->err_vlan != RXD_ERR_ODD_NIBBLE_RCVD_MII)) {
4636                 drop_it:
4637                         tg3_recycle_rx(tnapi, tpr, opaque_key,
4638                                        desc_idx, *post_ptr);
4639                 drop_it_no_recycle:
4640                         /* Other statistics kept track of by card. */
4641                         tp->net_stats.rx_dropped++;
4642                         goto next_pkt;
4643                 }
4644
4645                 len = ((desc->idx_len & RXD_LEN_MASK) >> RXD_LEN_SHIFT) -
4646                       ETH_FCS_LEN;
4647
4648                 if (len > RX_COPY_THRESHOLD &&
4649                     tp->rx_offset == NET_IP_ALIGN) {
4650                     /* rx_offset will likely not equal NET_IP_ALIGN
4651                      * if this is a 5701 card running in PCI-X mode
4652                      * [see tg3_get_invariants()]
4653                      */
4654                         int skb_size;
4655
4656                         skb_size = tg3_alloc_rx_skb(tp, tpr, opaque_key,
4657                                                     *post_ptr);
4658                         if (skb_size < 0)
4659                                 goto drop_it;
4660
4661                         ri->skb = NULL;
4662
4663                         pci_unmap_single(tp->pdev, dma_addr, skb_size,
4664                                          PCI_DMA_FROMDEVICE);
4665
4666                         skb_put(skb, len);
4667                 } else {
4668                         struct sk_buff *copy_skb;
4669
4670                         tg3_recycle_rx(tnapi, tpr, opaque_key,
4671                                        desc_idx, *post_ptr);
4672
4673                         copy_skb = netdev_alloc_skb(tp->dev,
4674                                                     len + TG3_RAW_IP_ALIGN);
4675                         if (copy_skb == NULL)
4676                                 goto drop_it_no_recycle;
4677
4678                         skb_reserve(copy_skb, TG3_RAW_IP_ALIGN);
4679                         skb_put(copy_skb, len);
4680                         pci_dma_sync_single_for_cpu(tp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE);
4681                         skb_copy_from_linear_data(skb, copy_skb->data, len);
4682                         pci_dma_sync_single_for_device(tp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE);
4683
4684                         /* We'll reuse the original ring buffer. */
4685                         skb = copy_skb;
4686                 }
4687
4688                 if ((tp->tg3_flags & TG3_FLAG_RX_CHECKSUMS) &&
4689                     (desc->type_flags & RXD_FLAG_TCPUDP_CSUM) &&
4690                     (((desc->ip_tcp_csum & RXD_TCPCSUM_MASK)
4691                       >> RXD_TCPCSUM_SHIFT) == 0xffff))
4692                         skb->ip_summed = CHECKSUM_UNNECESSARY;
4693                 else
4694                         skb->ip_summed = CHECKSUM_NONE;
4695
4696                 skb->protocol = eth_type_trans(skb, tp->dev);
4697
4698                 if (len > (tp->dev->mtu + ETH_HLEN) &&
4699                     skb->protocol != htons(ETH_P_8021Q)) {
4700                         dev_kfree_skb(skb);
4701                         goto next_pkt;
4702                 }
4703
4704 #if TG3_VLAN_TAG_USED
4705                 if (tp->vlgrp != NULL &&
4706                     desc->type_flags & RXD_FLAG_VLAN) {
4707                         vlan_gro_receive(&tnapi->napi, tp->vlgrp,
4708                                          desc->err_vlan & RXD_VLAN_MASK, skb);
4709                 } else
4710 #endif
4711                         napi_gro_receive(&tnapi->napi, skb);
4712
4713                 received++;
4714                 budget--;
4715
4716 next_pkt:
4717                 (*post_ptr)++;
4718
4719                 if (unlikely(rx_std_posted >= tp->rx_std_max_post)) {
4720                         tpr->rx_std_prod_idx = std_prod_idx % TG3_RX_RING_SIZE;
4721                         tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG,
4722                                      tpr->rx_std_prod_idx);
4723                         work_mask &= ~RXD_OPAQUE_RING_STD;
4724                         rx_std_posted = 0;
4725                 }
4726 next_pkt_nopost:
4727                 sw_idx++;
4728                 sw_idx &= (TG3_RX_RCB_RING_SIZE(tp) - 1);
4729
4730                 /* Refresh hw_idx to see if there is new work */
4731                 if (sw_idx == hw_idx) {
4732                         hw_idx = *(tnapi->rx_rcb_prod_idx);
4733                         rmb();
4734                 }
4735         }
4736
4737         /* ACK the status ring. */
4738         tnapi->rx_rcb_ptr = sw_idx;
4739         tw32_rx_mbox(tnapi->consmbox, sw_idx);
4740
4741         /* Refill RX ring(s). */
4742         if (!(tp->tg3_flags3 & TG3_FLG3_ENABLE_RSS)) {
4743                 if (work_mask & RXD_OPAQUE_RING_STD) {
4744                         tpr->rx_std_prod_idx = std_prod_idx % TG3_RX_RING_SIZE;
4745                         tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG,
4746                                      tpr->rx_std_prod_idx);
4747                 }
4748                 if (work_mask & RXD_OPAQUE_RING_JUMBO) {
4749                         tpr->rx_jmb_prod_idx = jmb_prod_idx %
4750                                                TG3_RX_JUMBO_RING_SIZE;
4751                         tw32_rx_mbox(TG3_RX_JMB_PROD_IDX_REG,
4752                                      tpr->rx_jmb_prod_idx);
4753                 }
4754                 mmiowb();
4755         } else if (work_mask) {
4756                 /* rx_std_buffers[] and rx_jmb_buffers[] entries must be
4757                  * updated before the producer indices can be updated.
4758                  */
4759                 smp_wmb();
4760
4761                 tpr->rx_std_prod_idx = std_prod_idx % TG3_RX_RING_SIZE;
4762                 tpr->rx_jmb_prod_idx = jmb_prod_idx % TG3_RX_JUMBO_RING_SIZE;
4763
4764                 if (tnapi != &tp->napi[1])
4765                         napi_schedule(&tp->napi[1].napi);
4766         }
4767
4768         return received;
4769 }
4770
4771 static void tg3_poll_link(struct tg3 *tp)
4772 {
4773         /* handle link change and other phy events */
4774         if (!(tp->tg3_flags &
4775               (TG3_FLAG_USE_LINKCHG_REG |
4776                TG3_FLAG_POLL_SERDES))) {
4777                 struct tg3_hw_status *sblk = tp->napi[0].hw_status;
4778
4779                 if (sblk->status & SD_STATUS_LINK_CHG) {
4780                         sblk->status = SD_STATUS_UPDATED |
4781                                        (sblk->status & ~SD_STATUS_LINK_CHG);
4782                         spin_lock(&tp->lock);
4783                         if (tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB) {
4784                                 tw32_f(MAC_STATUS,
4785                                      (MAC_STATUS_SYNC_CHANGED |
4786                                       MAC_STATUS_CFG_CHANGED |
4787                                       MAC_STATUS_MI_COMPLETION |
4788                                       MAC_STATUS_LNKSTATE_CHANGED));
4789                                 udelay(40);
4790                         } else
4791                                 tg3_setup_phy(tp, 0);
4792                         spin_unlock(&tp->lock);
4793                 }
4794         }
4795 }
4796
4797 static void tg3_rx_prodring_xfer(struct tg3 *tp,
4798                                  struct tg3_rx_prodring_set *dpr,
4799                                  struct tg3_rx_prodring_set *spr)
4800 {
4801         u32 si, di, cpycnt, src_prod_idx;
4802         int i;
4803
4804         while (1) {
4805                 src_prod_idx = spr->rx_std_prod_idx;
4806
4807                 /* Make sure updates to the rx_std_buffers[] entries and the
4808                  * standard producer index are seen in the correct order.
4809                  */
4810                 smp_rmb();
4811
4812                 if (spr->rx_std_cons_idx == src_prod_idx)
4813                         break;
4814
4815                 if (spr->rx_std_cons_idx < src_prod_idx)
4816                         cpycnt = src_prod_idx - spr->rx_std_cons_idx;
4817                 else
4818                         cpycnt = TG3_RX_RING_SIZE - spr->rx_std_cons_idx;
4819
4820                 cpycnt = min(cpycnt, TG3_RX_RING_SIZE - dpr->rx_std_prod_idx);
4821
4822                 si = spr->rx_std_cons_idx;
4823                 di = dpr->rx_std_prod_idx;
4824
4825                 for (i = di; i < di + cpycnt; i++) {
4826                         if (dpr->rx_std_buffers[i].skb) {
4827                                 cpycnt = i - di;
4828                                 break;
4829                         }
4830                 }
4831
4832                 if (!cpycnt)
4833                         break;
4834
4835                 /* Ensure that updates to the rx_std_buffers ring and the
4836                  * shadowed hardware producer ring from tg3_recycle_skb() are
4837                  * ordered correctly WRT the skb check above.
4838                  */
4839                 smp_rmb();
4840
4841                 memcpy(&dpr->rx_std_buffers[di],
4842                        &spr->rx_std_buffers[si],
4843                        cpycnt * sizeof(struct ring_info));
4844
4845                 for (i = 0; i < cpycnt; i++, di++, si++) {
4846                         struct tg3_rx_buffer_desc *sbd, *dbd;
4847                         sbd = &spr->rx_std[si];
4848                         dbd = &dpr->rx_std[di];
4849                         dbd->addr_hi = sbd->addr_hi;
4850                         dbd->addr_lo = sbd->addr_lo;
4851                 }
4852
4853                 spr->rx_std_cons_idx = (spr->rx_std_cons_idx + cpycnt) %
4854                                        TG3_RX_RING_SIZE;
4855                 dpr->rx_std_prod_idx = (dpr->rx_std_prod_idx + cpycnt) %
4856                                        TG3_RX_RING_SIZE;
4857         }
4858
4859         while (1) {
4860                 src_prod_idx = spr->rx_jmb_prod_idx;
4861
4862                 /* Make sure updates to the rx_jmb_buffers[] entries and
4863                  * the jumbo producer index are seen in the correct order.
4864                  */
4865                 smp_rmb();
4866
4867                 if (spr->rx_jmb_cons_idx == src_prod_idx)
4868                         break;
4869
4870                 if (spr->rx_jmb_cons_idx < src_prod_idx)
4871                         cpycnt = src_prod_idx - spr->rx_jmb_cons_idx;
4872                 else
4873                         cpycnt = TG3_RX_JUMBO_RING_SIZE - spr->rx_jmb_cons_idx;
4874
4875                 cpycnt = min(cpycnt,
4876                              TG3_RX_JUMBO_RING_SIZE - dpr->rx_jmb_prod_idx);
4877
4878                 si = spr->rx_jmb_cons_idx;
4879                 di = dpr->rx_jmb_prod_idx;
4880
4881                 for (i = di; i < di + cpycnt; i++) {
4882                         if (dpr->rx_jmb_buffers[i].skb) {
4883                                 cpycnt = i - di;
4884                                 break;
4885                         }
4886                 }
4887
4888                 if (!cpycnt)
4889                         break;
4890
4891                 /* Ensure that updates to the rx_jmb_buffers ring and the
4892                  * shadowed hardware producer ring from tg3_recycle_skb() are
4893                  * ordered correctly WRT the skb check above.
4894                  */
4895                 smp_rmb();
4896
4897                 memcpy(&dpr->rx_jmb_buffers[di],
4898                        &spr->rx_jmb_buffers[si],
4899                        cpycnt * sizeof(struct ring_info));
4900
4901                 for (i = 0; i < cpycnt; i++, di++, si++) {
4902                         struct tg3_rx_buffer_desc *sbd, *dbd;
4903                         sbd = &spr->rx_jmb[si].std;
4904                         dbd = &dpr->rx_jmb[di].std;
4905                         dbd->addr_hi = sbd->addr_hi;
4906                         dbd->addr_lo = sbd->addr_lo;
4907                 }
4908
4909                 spr->rx_jmb_cons_idx = (spr->rx_jmb_cons_idx + cpycnt) %
4910                                        TG3_RX_JUMBO_RING_SIZE;
4911                 dpr->rx_jmb_prod_idx = (dpr->rx_jmb_prod_idx + cpycnt) %
4912                                        TG3_RX_JUMBO_RING_SIZE;
4913         }
4914 }
4915
4916 static int tg3_poll_work(struct tg3_napi *tnapi, int work_done, int budget)
4917 {
4918         struct tg3 *tp = tnapi->tp;
4919
4920         /* run TX completion thread */
4921         if (tnapi->hw_status->idx[0].tx_consumer != tnapi->tx_cons) {
4922                 tg3_tx(tnapi);
4923                 if (unlikely(tp->tg3_flags & TG3_FLAG_TX_RECOVERY_PENDING))
4924                         return work_done;
4925         }
4926
4927         /* run RX thread, within the bounds set by NAPI.
4928          * All RX "locking" is done by ensuring outside
4929          * code synchronizes with tg3->napi.poll()
4930          */
4931         if (*(tnapi->rx_rcb_prod_idx) != tnapi->rx_rcb_ptr)
4932                 work_done += tg3_rx(tnapi, budget - work_done);
4933
4934         if ((tp->tg3_flags3 & TG3_FLG3_ENABLE_RSS) && tnapi == &tp->napi[1]) {
4935                 struct tg3_rx_prodring_set *dpr = &tp->prodring[0];
4936                 int i;
4937                 u32 std_prod_idx = dpr->rx_std_prod_idx;
4938                 u32 jmb_prod_idx = dpr->rx_jmb_prod_idx;
4939
4940                 for (i = 1; i < tp->irq_cnt; i++)
4941                         tg3_rx_prodring_xfer(tp, dpr, tp->napi[i].prodring);
4942
4943                 wmb();
4944
4945                 if (std_prod_idx != dpr->rx_std_prod_idx)
4946                         tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG,
4947                                      dpr->rx_std_prod_idx);
4948
4949                 if (jmb_prod_idx != dpr->rx_jmb_prod_idx)
4950                         tw32_rx_mbox(TG3_RX_JMB_PROD_IDX_REG,
4951                                      dpr->rx_jmb_prod_idx);
4952
4953                 mmiowb();
4954         }
4955
4956         return work_done;
4957 }
4958
4959 static int tg3_poll_msix(struct napi_struct *napi, int budget)
4960 {
4961         struct tg3_napi *tnapi = container_of(napi, struct tg3_napi, napi);
4962         struct tg3 *tp = tnapi->tp;
4963         int work_done = 0;
4964         struct tg3_hw_status *sblk = tnapi->hw_status;
4965
4966         while (1) {
4967                 work_done = tg3_poll_work(tnapi, work_done, budget);
4968
4969                 if (unlikely(tp->tg3_flags & TG3_FLAG_TX_RECOVERY_PENDING))
4970                         goto tx_recovery;
4971
4972                 if (unlikely(work_done >= budget))
4973                         break;
4974
4975                 /* tp->last_tag is used in tg3_restart_ints() below
4976                  * to tell the hw how much work has been processed,
4977                  * so we must read it before checking for more work.
4978                  */
4979                 tnapi->last_tag = sblk->status_tag;
4980                 tnapi->last_irq_tag = tnapi->last_tag;
4981                 rmb();
4982
4983                 /* check for RX/TX work to do */
4984                 if (sblk->idx[0].tx_consumer == tnapi->tx_cons &&
4985                     *(tnapi->rx_rcb_prod_idx) == tnapi->rx_rcb_ptr) {
4986                         napi_complete(napi);
4987                         /* Reenable interrupts. */
4988                         tw32_mailbox(tnapi->int_mbox, tnapi->last_tag << 24);
4989                         mmiowb();
4990                         break;
4991                 }
4992         }
4993
4994         return work_done;
4995
4996 tx_recovery:
4997         /* work_done is guaranteed to be less than budget. */
4998         napi_complete(napi);
4999         schedule_work(&tp->reset_task);
5000         return work_done;
5001 }
5002
5003 static int tg3_poll(struct napi_struct *napi, int budget)
5004 {
5005         struct tg3_napi *tnapi = container_of(napi, struct tg3_napi, napi);
5006         struct tg3 *tp = tnapi->tp;
5007         int work_done = 0;
5008         struct tg3_hw_status *sblk = tnapi->hw_status;
5009
5010         while (1) {
5011                 tg3_poll_link(tp);
5012
5013                 work_done = tg3_poll_work(tnapi, work_done, budget);
5014
5015                 if (unlikely(tp->tg3_flags & TG3_FLAG_TX_RECOVERY_PENDING))
5016                         goto tx_recovery;
5017
5018                 if (unlikely(work_done >= budget))
5019                         break;
5020
5021                 if (tp->tg3_flags & TG3_FLAG_TAGGED_STATUS) {
5022                         /* tp->last_tag is used in tg3_int_reenable() below
5023                          * to tell the hw how much work has been processed,
5024                          * so we must read it before checking for more work.
5025                          */
5026                         tnapi->last_tag = sblk->status_tag;
5027                         tnapi->last_irq_tag = tnapi->last_tag;
5028                         rmb();
5029                 } else
5030                         sblk->status &= ~SD_STATUS_UPDATED;
5031
5032                 if (likely(!tg3_has_work(tnapi))) {
5033                         napi_complete(napi);
5034                         tg3_int_reenable(tnapi);
5035                         break;
5036                 }
5037         }
5038
5039         return work_done;
5040
5041 tx_recovery:
5042         /* work_done is guaranteed to be less than budget. */
5043         napi_complete(napi);
5044         schedule_work(&tp->reset_task);
5045         return work_done;
5046 }
5047
5048 static void tg3_irq_quiesce(struct tg3 *tp)
5049 {
5050         int i;
5051
5052         BUG_ON(tp->irq_sync);
5053
5054         tp->irq_sync = 1;
5055         smp_mb();
5056
5057         for (i = 0; i < tp->irq_cnt; i++)
5058                 synchronize_irq(tp->napi[i].irq_vec);
5059 }
5060
5061 static inline int tg3_irq_sync(struct tg3 *tp)
5062 {
5063         return tp->irq_sync;
5064 }
5065
5066 /* Fully shutdown all tg3 driver activity elsewhere in the system.
5067  * If irq_sync is non-zero, then the IRQ handler must be synchronized
5068  * with as well.  Most of the time, this is not necessary except when
5069  * shutting down the device.
5070  */
5071 static inline void tg3_full_lock(struct tg3 *tp, int irq_sync)
5072 {
5073         spin_lock_bh(&tp->lock);
5074         if (irq_sync)
5075                 tg3_irq_quiesce(tp);
5076 }
5077
5078 static inline void tg3_full_unlock(struct tg3 *tp)
5079 {
5080         spin_unlock_bh(&tp->lock);
5081 }
5082
5083 /* One-shot MSI handler - Chip automatically disables interrupt
5084  * after sending MSI so driver doesn't have to do it.
5085  */
5086 static irqreturn_t tg3_msi_1shot(int irq, void *dev_id)
5087 {
5088         struct tg3_napi *tnapi = dev_id;
5089         struct tg3 *tp = tnapi->tp;
5090
5091         prefetch(tnapi->hw_status);
5092         if (tnapi->rx_rcb)
5093                 prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
5094
5095         if (likely(!tg3_irq_sync(tp)))
5096                 napi_schedule(&tnapi->napi);
5097
5098         return IRQ_HANDLED;
5099 }
5100
5101 /* MSI ISR - No need to check for interrupt sharing and no need to
5102  * flush status block and interrupt mailbox. PCI ordering rules
5103  * guarantee that MSI will arrive after the status block.
5104  */
5105 static irqreturn_t tg3_msi(int irq, void *dev_id)
5106 {
5107         struct tg3_napi *tnapi = dev_id;
5108         struct tg3 *tp = tnapi->tp;
5109
5110         prefetch(tnapi->hw_status);
5111         if (tnapi->rx_rcb)
5112                 prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
5113         /*
5114          * Writing any value to intr-mbox-0 clears PCI INTA# and
5115          * chip-internal interrupt pending events.
5116          * Writing non-zero to intr-mbox-0 additional tells the
5117          * NIC to stop sending us irqs, engaging "in-intr-handler"
5118          * event coalescing.
5119          */
5120         tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
5121         if (likely(!tg3_irq_sync(tp)))
5122                 napi_schedule(&tnapi->napi);
5123
5124         return IRQ_RETVAL(1);
5125 }
5126
5127 static irqreturn_t tg3_interrupt(int irq, void *dev_id)
5128 {
5129         struct tg3_napi *tnapi = dev_id;
5130         struct tg3 *tp = tnapi->tp;
5131         struct tg3_hw_status *sblk = tnapi->hw_status;
5132         unsigned int handled = 1;
5133
5134         /* In INTx mode, it is possible for the interrupt to arrive at
5135          * the CPU before the status block posted prior to the interrupt.
5136          * Reading the PCI State register will confirm whether the
5137          * interrupt is ours and will flush the status block.
5138          */
5139         if (unlikely(!(sblk->status & SD_STATUS_UPDATED))) {
5140                 if ((tp->tg3_flags & TG3_FLAG_CHIP_RESETTING) ||
5141                     (tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
5142                         handled = 0;
5143                         goto out;
5144                 }
5145         }
5146
5147         /*
5148          * Writing any value to intr-mbox-0 clears PCI INTA# and
5149          * chip-internal interrupt pending events.
5150          * Writing non-zero to intr-mbox-0 additional tells the
5151          * NIC to stop sending us irqs, engaging "in-intr-handler"
5152          * event coalescing.
5153          *
5154          * Flush the mailbox to de-assert the IRQ immediately to prevent
5155          * spurious interrupts.  The flush impacts performance but
5156          * excessive spurious interrupts can be worse in some cases.
5157          */
5158         tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
5159         if (tg3_irq_sync(tp))
5160                 goto out;
5161         sblk->status &= ~SD_STATUS_UPDATED;
5162         if (likely(tg3_has_work(tnapi))) {
5163                 prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
5164                 napi_schedule(&tnapi->napi);
5165         } else {
5166                 /* No work, shared interrupt perhaps?  re-enable
5167                  * interrupts, and flush that PCI write
5168                  */
5169                 tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
5170                                0x00000000);
5171         }
5172 out:
5173         return IRQ_RETVAL(handled);
5174 }
5175
5176 static irqreturn_t tg3_interrupt_tagged(int irq, void *dev_id)
5177 {
5178         struct tg3_napi *tnapi = dev_id;
5179         struct tg3 *tp = tnapi->tp;
5180         struct tg3_hw_status *sblk = tnapi->hw_status;
5181         unsigned int handled = 1;
5182
5183         /* In INTx mode, it is possible for the interrupt to arrive at
5184          * the CPU before the status block posted prior to the interrupt.
5185          * Reading the PCI State register will confirm whether the
5186          * interrupt is ours and will flush the status block.
5187          */
5188         if (unlikely(sblk->status_tag == tnapi->last_irq_tag)) {
5189                 if ((tp->tg3_flags & TG3_FLAG_CHIP_RESETTING) ||
5190                     (tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
5191                         handled = 0;
5192                         goto out;
5193                 }
5194         }
5195
5196         /*
5197          * writing any value to intr-mbox-0 clears PCI INTA# and
5198          * chip-internal interrupt pending events.
5199          * writing non-zero to intr-mbox-0 additional tells the
5200          * NIC to stop sending us irqs, engaging "in-intr-handler"
5201          * event coalescing.
5202          *
5203          * Flush the mailbox to de-assert the IRQ immediately to prevent
5204          * spurious interrupts.  The flush impacts performance but
5205          * excessive spurious interrupts can be worse in some cases.
5206          */
5207         tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
5208
5209         /*
5210          * In a shared interrupt configuration, sometimes other devices'
5211          * interrupts will scream.  We record the current status tag here
5212          * so that the above check can report that the screaming interrupts
5213          * are unhandled.  Eventually they will be silenced.
5214          */
5215         tnapi->last_irq_tag = sblk->status_tag;
5216
5217         if (tg3_irq_sync(tp))
5218                 goto out;
5219
5220         prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
5221
5222         napi_schedule(&tnapi->napi);
5223
5224 out:
5225         return IRQ_RETVAL(handled);
5226 }
5227
5228 /* ISR for interrupt test */
5229 static irqreturn_t tg3_test_isr(int irq, void *dev_id)
5230 {
5231         struct tg3_napi *tnapi = dev_id;
5232         struct tg3 *tp = tnapi->tp;
5233         struct tg3_hw_status *sblk = tnapi->hw_status;
5234
5235         if ((sblk->status & SD_STATUS_UPDATED) ||
5236             !(tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
5237                 tg3_disable_ints(tp);
5238                 return IRQ_RETVAL(1);
5239         }
5240         return IRQ_RETVAL(0);
5241 }
5242
5243 static int tg3_init_hw(struct tg3 *, int);
5244 static int tg3_halt(struct tg3 *, int, int);
5245
5246 /* Restart hardware after configuration changes, self-test, etc.
5247  * Invoked with tp->lock held.
5248  */
5249 static int tg3_restart_hw(struct tg3 *tp, int reset_phy)
5250         __releases(tp->lock)
5251         __acquires(tp->lock)
5252 {
5253         int err;
5254
5255         err = tg3_init_hw(tp, reset_phy);
5256         if (err) {
5257                 printk(KERN_ERR PFX "%s: Failed to re-initialize device, "
5258                        "aborting.\n", tp->dev->name);
5259                 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
5260                 tg3_full_unlock(tp);
5261                 del_timer_sync(&tp->timer);
5262                 tp->irq_sync = 0;
5263                 tg3_napi_enable(tp);
5264                 dev_close(tp->dev);
5265                 tg3_full_lock(tp, 0);
5266         }
5267         return err;
5268 }
5269
5270 #ifdef CONFIG_NET_POLL_CONTROLLER
5271 static void tg3_poll_controller(struct net_device *dev)
5272 {
5273         int i;
5274         struct tg3 *tp = netdev_priv(dev);
5275
5276         for (i = 0; i < tp->irq_cnt; i++)
5277                 tg3_interrupt(tp->napi[i].irq_vec, dev);
5278 }
5279 #endif
5280
5281 static void tg3_reset_task(struct work_struct *work)
5282 {
5283         struct tg3 *tp = container_of(work, struct tg3, reset_task);
5284         int err;
5285         unsigned int restart_timer;
5286
5287         tg3_full_lock(tp, 0);
5288
5289         if (!netif_running(tp->dev)) {
5290                 tg3_full_unlock(tp);
5291                 return;
5292         }
5293
5294         tg3_full_unlock(tp);
5295
5296         tg3_phy_stop(tp);
5297
5298         tg3_netif_stop(tp);
5299
5300         tg3_full_lock(tp, 1);
5301
5302         restart_timer = tp->tg3_flags2 & TG3_FLG2_RESTART_TIMER;
5303         tp->tg3_flags2 &= ~TG3_FLG2_RESTART_TIMER;
5304
5305         if (tp->tg3_flags & TG3_FLAG_TX_RECOVERY_PENDING) {
5306                 tp->write32_tx_mbox = tg3_write32_tx_mbox;
5307                 tp->write32_rx_mbox = tg3_write_flush_reg32;
5308                 tp->tg3_flags |= TG3_FLAG_MBOX_WRITE_REORDER;
5309                 tp->tg3_flags &= ~TG3_FLAG_TX_RECOVERY_PENDING;
5310         }
5311
5312         tg3_halt(tp, RESET_KIND_SHUTDOWN, 0);
5313         err = tg3_init_hw(tp, 1);
5314         if (err)
5315                 goto out;
5316
5317         tg3_netif_start(tp);
5318
5319         if (restart_timer)
5320                 mod_timer(&tp->timer, jiffies + 1);
5321
5322 out:
5323         tg3_full_unlock(tp);
5324
5325         if (!err)
5326                 tg3_phy_start(tp);
5327 }
5328
5329 static void tg3_dump_short_state(struct tg3 *tp)
5330 {
5331         printk(KERN_ERR PFX "DEBUG: MAC_TX_STATUS[%08x] MAC_RX_STATUS[%08x]\n",
5332                tr32(MAC_TX_STATUS), tr32(MAC_RX_STATUS));
5333         printk(KERN_ERR PFX "DEBUG: RDMAC_STATUS[%08x] WDMAC_STATUS[%08x]\n",
5334                tr32(RDMAC_STATUS), tr32(WDMAC_STATUS));
5335 }
5336
5337 static void tg3_tx_timeout(struct net_device *dev)
5338 {
5339         struct tg3 *tp = netdev_priv(dev);
5340
5341         if (netif_msg_tx_err(tp)) {
5342                 printk(KERN_ERR PFX "%s: transmit timed out, resetting\n",
5343                        dev->name);
5344                 tg3_dump_short_state(tp);
5345         }
5346
5347         schedule_work(&tp->reset_task);
5348 }
5349
5350 /* Test for DMA buffers crossing any 4GB boundaries: 4G, 8G, etc */
5351 static inline int tg3_4g_overflow_test(dma_addr_t mapping, int len)
5352 {
5353         u32 base = (u32) mapping & 0xffffffff;
5354
5355         return ((base > 0xffffdcc0) &&
5356                 (base + len + 8 < base));
5357 }
5358
5359 /* Test for DMA addresses > 40-bit */
5360 static inline int tg3_40bit_overflow_test(struct tg3 *tp, dma_addr_t mapping,
5361                                           int len)
5362 {
5363 #if defined(CONFIG_HIGHMEM) && (BITS_PER_LONG == 64)
5364         if (tp->tg3_flags & TG3_FLAG_40BIT_DMA_BUG)
5365                 return (((u64) mapping + len) > DMA_BIT_MASK(40));
5366         return 0;
5367 #else
5368         return 0;
5369 #endif
5370 }
5371
5372 static void tg3_set_txd(struct tg3_napi *, int, dma_addr_t, int, u32, u32);
5373
5374 /* Workaround 4GB and 40-bit hardware DMA bugs. */
5375 static int tigon3_dma_hwbug_workaround(struct tg3_napi *tnapi,
5376                                        struct sk_buff *skb, u32 last_plus_one,
5377                                        u32 *start, u32 base_flags, u32 mss)
5378 {
5379         struct tg3 *tp = tnapi->tp;
5380         struct sk_buff *new_skb;
5381         dma_addr_t new_addr = 0;
5382         u32 entry = *start;
5383         int i, ret = 0;
5384
5385         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701)
5386                 new_skb = skb_copy(skb, GFP_ATOMIC);
5387         else {
5388                 int more_headroom = 4 - ((unsigned long)skb->data & 3);
5389
5390                 new_skb = skb_copy_expand(skb,
5391                                           skb_headroom(skb) + more_headroom,
5392                                           skb_tailroom(skb), GFP_ATOMIC);
5393         }
5394
5395         if (!new_skb) {
5396                 ret = -1;
5397         } else {
5398                 /* New SKB is guaranteed to be linear. */
5399                 entry = *start;
5400                 new_addr = pci_map_single(tp->pdev, new_skb->data, new_skb->len,
5401                                           PCI_DMA_TODEVICE);
5402                 /* Make sure the mapping succeeded */
5403                 if (pci_dma_mapping_error(tp->pdev, new_addr)) {
5404                         ret = -1;
5405                         dev_kfree_skb(new_skb);
5406                         new_skb = NULL;
5407
5408                 /* Make sure new skb does not cross any 4G boundaries.
5409                  * Drop the packet if it does.
5410                  */
5411                 } else if ((tp->tg3_flags3 & TG3_FLG3_4G_DMA_BNDRY_BUG) &&
5412                             tg3_4g_overflow_test(new_addr, new_skb->len)) {
5413                         pci_unmap_single(tp->pdev, new_addr, new_skb->len,
5414                                          PCI_DMA_TODEVICE);
5415                         ret = -1;
5416                         dev_kfree_skb(new_skb);
5417                         new_skb = NULL;
5418                 } else {
5419                         tg3_set_txd(tnapi, entry, new_addr, new_skb->len,
5420                                     base_flags, 1 | (mss << 1));
5421                         *start = NEXT_TX(entry);
5422                 }
5423         }
5424
5425         /* Now clean up the sw ring entries. */
5426         i = 0;
5427         while (entry != last_plus_one) {
5428                 int len;
5429
5430                 if (i == 0)
5431                         len = skb_headlen(skb);
5432                 else
5433                         len = skb_shinfo(skb)->frags[i-1].size;
5434
5435                 pci_unmap_single(tp->pdev,
5436                                  pci_unmap_addr(&tnapi->tx_buffers[entry],
5437                                                 mapping),
5438                                  len, PCI_DMA_TODEVICE);
5439                 if (i == 0) {
5440                         tnapi->tx_buffers[entry].skb = new_skb;
5441                         pci_unmap_addr_set(&tnapi->tx_buffers[entry], mapping,
5442                                            new_addr);
5443                 } else {
5444                         tnapi->tx_buffers[entry].skb = NULL;
5445                 }
5446                 entry = NEXT_TX(entry);
5447                 i++;
5448         }
5449
5450         dev_kfree_skb(skb);
5451
5452         return ret;
5453 }
5454
5455 static void tg3_set_txd(struct tg3_napi *tnapi, int entry,
5456                         dma_addr_t mapping, int len, u32 flags,
5457                         u32 mss_and_is_end)
5458 {
5459         struct tg3_tx_buffer_desc *txd = &tnapi->tx_ring[entry];
5460         int is_end = (mss_and_is_end & 0x1);
5461         u32 mss = (mss_and_is_end >> 1);
5462         u32 vlan_tag = 0;
5463
5464         if (is_end)
5465                 flags |= TXD_FLAG_END;
5466         if (flags & TXD_FLAG_VLAN) {
5467                 vlan_tag = flags >> 16;
5468                 flags &= 0xffff;
5469         }
5470         vlan_tag |= (mss << TXD_MSS_SHIFT);
5471
5472         txd->addr_hi = ((u64) mapping >> 32);
5473         txd->addr_lo = ((u64) mapping & 0xffffffff);
5474         txd->len_flags = (len << TXD_LEN_SHIFT) | flags;
5475         txd->vlan_tag = vlan_tag << TXD_VLAN_TAG_SHIFT;
5476 }
5477
5478 /* hard_start_xmit for devices that don't have any bugs and
5479  * support TG3_FLG2_HW_TSO_2 and TG3_FLG2_HW_TSO_3 only.
5480  */
5481 static netdev_tx_t tg3_start_xmit(struct sk_buff *skb,
5482                                   struct net_device *dev)
5483 {
5484         struct tg3 *tp = netdev_priv(dev);
5485         u32 len, entry, base_flags, mss;
5486         dma_addr_t mapping;
5487         struct tg3_napi *tnapi;
5488         struct netdev_queue *txq;
5489         unsigned int i, last;
5490
5491
5492         txq = netdev_get_tx_queue(dev, skb_get_queue_mapping(skb));
5493         tnapi = &tp->napi[skb_get_queue_mapping(skb)];
5494         if (tp->tg3_flags3 & TG3_FLG3_ENABLE_TSS)
5495                 tnapi++;
5496
5497         /* We are running in BH disabled context with netif_tx_lock
5498          * and TX reclaim runs via tp->napi.poll inside of a software
5499          * interrupt.  Furthermore, IRQ processing runs lockless so we have
5500          * no IRQ context deadlocks to worry about either.  Rejoice!
5501          */
5502         if (unlikely(tg3_tx_avail(tnapi) <= (skb_shinfo(skb)->nr_frags + 1))) {
5503                 if (!netif_tx_queue_stopped(txq)) {
5504                         netif_tx_stop_queue(txq);
5505
5506                         /* This is a hard error, log it. */
5507                         printk(KERN_ERR PFX "%s: BUG! Tx Ring full when "
5508                                "queue awake!\n", dev->name);
5509                 }
5510                 return NETDEV_TX_BUSY;
5511         }
5512
5513         entry = tnapi->tx_prod;
5514         base_flags = 0;
5515         mss = 0;
5516         if ((mss = skb_shinfo(skb)->gso_size) != 0) {
5517                 int tcp_opt_len, ip_tcp_len;
5518                 u32 hdrlen;
5519
5520                 if (skb_header_cloned(skb) &&
5521                     pskb_expand_head(skb, 0, 0, GFP_ATOMIC)) {
5522                         dev_kfree_skb(skb);
5523                         goto out_unlock;
5524                 }
5525
5526                 if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6)
5527                         hdrlen = skb_headlen(skb) - ETH_HLEN;
5528                 else {
5529                         struct iphdr *iph = ip_hdr(skb);
5530
5531                         tcp_opt_len = tcp_optlen(skb);
5532                         ip_tcp_len = ip_hdrlen(skb) + sizeof(struct tcphdr);
5533
5534                         iph->check = 0;
5535                         iph->tot_len = htons(mss + ip_tcp_len + tcp_opt_len);
5536                         hdrlen = ip_tcp_len + tcp_opt_len;
5537                 }
5538
5539                 if (tp->tg3_flags2 & TG3_FLG2_HW_TSO_3) {
5540                         mss |= (hdrlen & 0xc) << 12;
5541                         if (hdrlen & 0x10)
5542                                 base_flags |= 0x00000010;
5543                         base_flags |= (hdrlen & 0x3e0) << 5;
5544                 } else
5545                         mss |= hdrlen << 9;
5546
5547                 base_flags |= (TXD_FLAG_CPU_PRE_DMA |
5548                                TXD_FLAG_CPU_POST_DMA);
5549
5550                 tcp_hdr(skb)->check = 0;
5551
5552         }
5553         else if (skb->ip_summed == CHECKSUM_PARTIAL)
5554                 base_flags |= TXD_FLAG_TCPUDP_CSUM;
5555 #if TG3_VLAN_TAG_USED
5556         if (tp->vlgrp != NULL && vlan_tx_tag_present(skb))
5557                 base_flags |= (TXD_FLAG_VLAN |
5558                                (vlan_tx_tag_get(skb) << 16));
5559 #endif
5560
5561         len = skb_headlen(skb);
5562
5563         /* Queue skb data, a.k.a. the main skb fragment. */
5564         mapping = pci_map_single(tp->pdev, skb->data, len, PCI_DMA_TODEVICE);
5565         if (pci_dma_mapping_error(tp->pdev, mapping)) {
5566                 dev_kfree_skb(skb);
5567                 goto out_unlock;
5568         }
5569
5570         tnapi->tx_buffers[entry].skb = skb;
5571         pci_unmap_addr_set(&tnapi->tx_buffers[entry], mapping, mapping);
5572
5573         if ((tp->tg3_flags3 & TG3_FLG3_USE_JUMBO_BDFLAG) &&
5574             !mss && skb->len > ETH_DATA_LEN)
5575                 base_flags |= TXD_FLAG_JMB_PKT;
5576
5577         tg3_set_txd(tnapi, entry, mapping, len, base_flags,
5578                     (skb_shinfo(skb)->nr_frags == 0) | (mss << 1));
5579
5580         entry = NEXT_TX(entry);
5581
5582         /* Now loop through additional data fragments, and queue them. */
5583         if (skb_shinfo(skb)->nr_frags > 0) {
5584                 last = skb_shinfo(skb)->nr_frags - 1;
5585                 for (i = 0; i <= last; i++) {
5586                         skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
5587
5588                         len = frag->size;
5589                         mapping = pci_map_page(tp->pdev,
5590                                                frag->page,
5591                                                frag->page_offset,
5592                                                len, PCI_DMA_TODEVICE);
5593                         if (pci_dma_mapping_error(tp->pdev, mapping))
5594                                 goto dma_error;
5595
5596                         tnapi->tx_buffers[entry].skb = NULL;
5597                         pci_unmap_addr_set(&tnapi->tx_buffers[entry], mapping,
5598                                            mapping);
5599
5600                         tg3_set_txd(tnapi, entry, mapping, len,
5601                                     base_flags, (i == last) | (mss << 1));
5602
5603                         entry = NEXT_TX(entry);
5604                 }
5605         }
5606
5607         /* Packets are ready, update Tx producer idx local and on card. */
5608         tw32_tx_mbox(tnapi->prodmbox, entry);
5609
5610         tnapi->tx_prod = entry;
5611         if (unlikely(tg3_tx_avail(tnapi) <= (MAX_SKB_FRAGS + 1))) {
5612                 netif_tx_stop_queue(txq);
5613                 if (tg3_tx_avail(tnapi) > TG3_TX_WAKEUP_THRESH(tnapi))
5614                         netif_tx_wake_queue(txq);
5615         }
5616
5617 out_unlock:
5618         mmiowb();
5619
5620         return NETDEV_TX_OK;
5621
5622 dma_error:
5623         last = i;
5624         entry = tnapi->tx_prod;
5625         tnapi->tx_buffers[entry].skb = NULL;
5626         pci_unmap_single(tp->pdev,
5627                          pci_unmap_addr(&tnapi->tx_buffers[entry], mapping),
5628                          skb_headlen(skb),
5629                          PCI_DMA_TODEVICE);
5630         for (i = 0; i <= last; i++) {
5631                 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
5632                 entry = NEXT_TX(entry);
5633
5634                 pci_unmap_page(tp->pdev,
5635                                pci_unmap_addr(&tnapi->tx_buffers[entry],
5636                                               mapping),
5637                                frag->size, PCI_DMA_TODEVICE);
5638         }
5639
5640         dev_kfree_skb(skb);
5641         return NETDEV_TX_OK;
5642 }
5643
5644 static netdev_tx_t tg3_start_xmit_dma_bug(struct sk_buff *,
5645                                           struct net_device *);
5646
5647 /* Use GSO to workaround a rare TSO bug that may be triggered when the
5648  * TSO header is greater than 80 bytes.
5649  */
5650 static int tg3_tso_bug(struct tg3 *tp, struct sk_buff *skb)
5651 {
5652         struct sk_buff *segs, *nskb;
5653         u32 frag_cnt_est = skb_shinfo(skb)->gso_segs * 3;
5654
5655         /* Estimate the number of fragments in the worst case */
5656         if (unlikely(tg3_tx_avail(&tp->napi[0]) <= frag_cnt_est)) {
5657                 netif_stop_queue(tp->dev);
5658                 if (tg3_tx_avail(&tp->napi[0]) <= frag_cnt_est)
5659                         return NETDEV_TX_BUSY;
5660
5661                 netif_wake_queue(tp->dev);
5662         }
5663
5664         segs = skb_gso_segment(skb, tp->dev->features & ~NETIF_F_TSO);
5665         if (IS_ERR(segs))
5666                 goto tg3_tso_bug_end;
5667
5668         do {
5669                 nskb = segs;
5670                 segs = segs->next;
5671                 nskb->next = NULL;
5672                 tg3_start_xmit_dma_bug(nskb, tp->dev);
5673         } while (segs);
5674
5675 tg3_tso_bug_end:
5676         dev_kfree_skb(skb);
5677
5678         return NETDEV_TX_OK;
5679 }
5680
5681 /* hard_start_xmit for devices that have the 4G bug and/or 40-bit bug and
5682  * support TG3_FLG2_HW_TSO_1 or firmware TSO only.
5683  */
5684 static netdev_tx_t tg3_start_xmit_dma_bug(struct sk_buff *skb,
5685                                           struct net_device *dev)
5686 {
5687         struct tg3 *tp = netdev_priv(dev);
5688         u32 len, entry, base_flags, mss;
5689         int would_hit_hwbug;
5690         dma_addr_t mapping;
5691         struct tg3_napi *tnapi;
5692         struct netdev_queue *txq;
5693         unsigned int i, last;
5694
5695
5696         txq = netdev_get_tx_queue(dev, skb_get_queue_mapping(skb));
5697         tnapi = &tp->napi[skb_get_queue_mapping(skb)];
5698         if (tp->tg3_flags3 & TG3_FLG3_ENABLE_TSS)
5699                 tnapi++;
5700
5701         /* We are running in BH disabled context with netif_tx_lock
5702          * and TX reclaim runs via tp->napi.poll inside of a software
5703          * interrupt.  Furthermore, IRQ processing runs lockless so we have
5704          * no IRQ context deadlocks to worry about either.  Rejoice!
5705          */
5706         if (unlikely(tg3_tx_avail(tnapi) <= (skb_shinfo(skb)->nr_frags + 1))) {
5707                 if (!netif_tx_queue_stopped(txq)) {
5708                         netif_tx_stop_queue(txq);
5709
5710                         /* This is a hard error, log it. */
5711                         printk(KERN_ERR PFX "%s: BUG! Tx Ring full when "
5712                                "queue awake!\n", dev->name);
5713                 }
5714                 return NETDEV_TX_BUSY;
5715         }
5716
5717         entry = tnapi->tx_prod;
5718         base_flags = 0;
5719         if (skb->ip_summed == CHECKSUM_PARTIAL)
5720                 base_flags |= TXD_FLAG_TCPUDP_CSUM;
5721
5722         if ((mss = skb_shinfo(skb)->gso_size) != 0) {
5723                 struct iphdr *iph;
5724                 u32 tcp_opt_len, ip_tcp_len, hdr_len;
5725
5726                 if (skb_header_cloned(skb) &&
5727                     pskb_expand_head(skb, 0, 0, GFP_ATOMIC)) {
5728                         dev_kfree_skb(skb);
5729                         goto out_unlock;
5730                 }
5731
5732                 tcp_opt_len = tcp_optlen(skb);
5733                 ip_tcp_len = ip_hdrlen(skb) + sizeof(struct tcphdr);
5734
5735                 hdr_len = ip_tcp_len + tcp_opt_len;
5736                 if (unlikely((ETH_HLEN + hdr_len) > 80) &&
5737                              (tp->tg3_flags2 & TG3_FLG2_TSO_BUG))
5738                         return (tg3_tso_bug(tp, skb));
5739
5740                 base_flags |= (TXD_FLAG_CPU_PRE_DMA |
5741                                TXD_FLAG_CPU_POST_DMA);
5742
5743                 iph = ip_hdr(skb);
5744                 iph->check = 0;
5745                 iph->tot_len = htons(mss + hdr_len);
5746                 if (tp->tg3_flags2 & TG3_FLG2_HW_TSO) {
5747                         tcp_hdr(skb)->check = 0;
5748                         base_flags &= ~TXD_FLAG_TCPUDP_CSUM;
5749                 } else
5750                         tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr,
5751                                                                  iph->daddr, 0,
5752                                                                  IPPROTO_TCP,
5753                                                                  0);
5754
5755                 if (tp->tg3_flags2 & TG3_FLG2_HW_TSO_3) {
5756                         mss |= (hdr_len & 0xc) << 12;
5757                    &