22cf1c446de3fbbe4afe37b3eaf7fc766e189a8b
[linux-2.6.git] / drivers / net / tg3.c
1 /*
2  * tg3.c: Broadcom Tigon3 ethernet driver.
3  *
4  * Copyright (C) 2001, 2002, 2003, 2004 David S. Miller (davem@redhat.com)
5  * Copyright (C) 2001, 2002, 2003 Jeff Garzik (jgarzik@pobox.com)
6  * Copyright (C) 2004 Sun Microsystems Inc.
7  * Copyright (C) 2005-2010 Broadcom Corporation.
8  *
9  * Firmware is:
10  *      Derived from proprietary unpublished source code,
11  *      Copyright (C) 2000-2003 Broadcom Corporation.
12  *
13  *      Permission is hereby granted for the distribution of this firmware
14  *      data in hexadecimal or equivalent format, provided this copyright
15  *      notice is accompanying it.
16  */
17
18
19 #include <linux/module.h>
20 #include <linux/moduleparam.h>
21 #include <linux/kernel.h>
22 #include <linux/types.h>
23 #include <linux/compiler.h>
24 #include <linux/slab.h>
25 #include <linux/delay.h>
26 #include <linux/in.h>
27 #include <linux/init.h>
28 #include <linux/ioport.h>
29 #include <linux/pci.h>
30 #include <linux/netdevice.h>
31 #include <linux/etherdevice.h>
32 #include <linux/skbuff.h>
33 #include <linux/ethtool.h>
34 #include <linux/mii.h>
35 #include <linux/phy.h>
36 #include <linux/brcmphy.h>
37 #include <linux/if_vlan.h>
38 #include <linux/ip.h>
39 #include <linux/tcp.h>
40 #include <linux/workqueue.h>
41 #include <linux/prefetch.h>
42 #include <linux/dma-mapping.h>
43 #include <linux/firmware.h>
44
45 #include <net/checksum.h>
46 #include <net/ip.h>
47
48 #include <asm/system.h>
49 #include <asm/io.h>
50 #include <asm/byteorder.h>
51 #include <asm/uaccess.h>
52
53 #ifdef CONFIG_SPARC
54 #include <asm/idprom.h>
55 #include <asm/prom.h>
56 #endif
57
58 #define BAR_0   0
59 #define BAR_2   2
60
61 #if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE)
62 #define TG3_VLAN_TAG_USED 1
63 #else
64 #define TG3_VLAN_TAG_USED 0
65 #endif
66
67 #include "tg3.h"
68
69 #define DRV_MODULE_NAME         "tg3"
70 #define DRV_MODULE_VERSION      "3.108"
71 #define DRV_MODULE_RELDATE      "February 17, 2010"
72
73 #define TG3_DEF_MAC_MODE        0
74 #define TG3_DEF_RX_MODE         0
75 #define TG3_DEF_TX_MODE         0
76 #define TG3_DEF_MSG_ENABLE        \
77         (NETIF_MSG_DRV          | \
78          NETIF_MSG_PROBE        | \
79          NETIF_MSG_LINK         | \
80          NETIF_MSG_TIMER        | \
81          NETIF_MSG_IFDOWN       | \
82          NETIF_MSG_IFUP         | \
83          NETIF_MSG_RX_ERR       | \
84          NETIF_MSG_TX_ERR)
85
86 /* length of time before we decide the hardware is borked,
87  * and dev->tx_timeout() should be called to fix the problem
88  */
89 #define TG3_TX_TIMEOUT                  (5 * HZ)
90
91 /* hardware minimum and maximum for a single frame's data payload */
92 #define TG3_MIN_MTU                     60
93 #define TG3_MAX_MTU(tp) \
94         ((tp->tg3_flags & TG3_FLAG_JUMBO_CAPABLE) ? 9000 : 1500)
95
96 /* These numbers seem to be hard coded in the NIC firmware somehow.
97  * You can't change the ring sizes, but you can change where you place
98  * them in the NIC onboard memory.
99  */
100 #define TG3_RX_RING_SIZE                512
101 #define TG3_DEF_RX_RING_PENDING         200
102 #define TG3_RX_JUMBO_RING_SIZE          256
103 #define TG3_DEF_RX_JUMBO_RING_PENDING   100
104 #define TG3_RSS_INDIR_TBL_SIZE 128
105
106 /* Do not place this n-ring entries value into the tp struct itself,
107  * we really want to expose these constants to GCC so that modulo et
108  * al.  operations are done with shifts and masks instead of with
109  * hw multiply/modulo instructions.  Another solution would be to
110  * replace things like '% foo' with '& (foo - 1)'.
111  */
112 #define TG3_RX_RCB_RING_SIZE(tp)        \
113         (((tp->tg3_flags & TG3_FLAG_JUMBO_CAPABLE) && \
114           !(tp->tg3_flags2 & TG3_FLG2_5780_CLASS)) ? 1024 : 512)
115
116 #define TG3_TX_RING_SIZE                512
117 #define TG3_DEF_TX_RING_PENDING         (TG3_TX_RING_SIZE - 1)
118
119 #define TG3_RX_RING_BYTES       (sizeof(struct tg3_rx_buffer_desc) * \
120                                  TG3_RX_RING_SIZE)
121 #define TG3_RX_JUMBO_RING_BYTES (sizeof(struct tg3_ext_rx_buffer_desc) * \
122                                  TG3_RX_JUMBO_RING_SIZE)
123 #define TG3_RX_RCB_RING_BYTES(tp) (sizeof(struct tg3_rx_buffer_desc) * \
124                                  TG3_RX_RCB_RING_SIZE(tp))
125 #define TG3_TX_RING_BYTES       (sizeof(struct tg3_tx_buffer_desc) * \
126                                  TG3_TX_RING_SIZE)
127 #define NEXT_TX(N)              (((N) + 1) & (TG3_TX_RING_SIZE - 1))
128
129 #define TG3_DMA_BYTE_ENAB               64
130
131 #define TG3_RX_STD_DMA_SZ               1536
132 #define TG3_RX_JMB_DMA_SZ               9046
133
134 #define TG3_RX_DMA_TO_MAP_SZ(x)         ((x) + TG3_DMA_BYTE_ENAB)
135
136 #define TG3_RX_STD_MAP_SZ               TG3_RX_DMA_TO_MAP_SZ(TG3_RX_STD_DMA_SZ)
137 #define TG3_RX_JMB_MAP_SZ               TG3_RX_DMA_TO_MAP_SZ(TG3_RX_JMB_DMA_SZ)
138
139 #define TG3_RX_STD_BUFF_RING_SIZE \
140         (sizeof(struct ring_info) * TG3_RX_RING_SIZE)
141
142 #define TG3_RX_JMB_BUFF_RING_SIZE \
143         (sizeof(struct ring_info) * TG3_RX_JUMBO_RING_SIZE)
144
145 /* minimum number of free TX descriptors required to wake up TX process */
146 #define TG3_TX_WAKEUP_THRESH(tnapi)             ((tnapi)->tx_pending / 4)
147
148 #define TG3_RAW_IP_ALIGN 2
149
150 /* number of ETHTOOL_GSTATS u64's */
151 #define TG3_NUM_STATS           (sizeof(struct tg3_ethtool_stats)/sizeof(u64))
152
153 #define TG3_NUM_TEST            6
154
155 #define FIRMWARE_TG3            "tigon/tg3.bin"
156 #define FIRMWARE_TG3TSO         "tigon/tg3_tso.bin"
157 #define FIRMWARE_TG3TSO5        "tigon/tg3_tso5.bin"
158
159 static char version[] __devinitdata =
160         DRV_MODULE_NAME ".c:v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")";
161
162 MODULE_AUTHOR("David S. Miller (davem@redhat.com) and Jeff Garzik (jgarzik@pobox.com)");
163 MODULE_DESCRIPTION("Broadcom Tigon3 ethernet driver");
164 MODULE_LICENSE("GPL");
165 MODULE_VERSION(DRV_MODULE_VERSION);
166 MODULE_FIRMWARE(FIRMWARE_TG3);
167 MODULE_FIRMWARE(FIRMWARE_TG3TSO);
168 MODULE_FIRMWARE(FIRMWARE_TG3TSO5);
169
170 #define TG3_RSS_MIN_NUM_MSIX_VECS       2
171
172 static int tg3_debug = -1;      /* -1 == use TG3_DEF_MSG_ENABLE as value */
173 module_param(tg3_debug, int, 0);
174 MODULE_PARM_DESC(tg3_debug, "Tigon3 bitmapped debugging message enable value");
175
176 static DEFINE_PCI_DEVICE_TABLE(tg3_pci_tbl) = {
177         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5700)},
178         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5701)},
179         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702)},
180         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703)},
181         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704)},
182         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702FE)},
183         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705)},
184         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705_2)},
185         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705M)},
186         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705M_2)},
187         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702X)},
188         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703X)},
189         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704S)},
190         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702A3)},
191         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703A3)},
192         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5782)},
193         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5788)},
194         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5789)},
195         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5901)},
196         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5901_2)},
197         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704S_2)},
198         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705F)},
199         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5720)},
200         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5721)},
201         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5722)},
202         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5750)},
203         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751)},
204         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5750M)},
205         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751M)},
206         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751F)},
207         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5752)},
208         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5752M)},
209         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753)},
210         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753M)},
211         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753F)},
212         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5754)},
213         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5754M)},
214         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5755)},
215         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5755M)},
216         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5756)},
217         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5786)},
218         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787)},
219         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787M)},
220         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787F)},
221         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5714)},
222         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5714S)},
223         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5715)},
224         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5715S)},
225         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5780)},
226         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5780S)},
227         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5781)},
228         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5906)},
229         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5906M)},
230         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5784)},
231         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5764)},
232         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5723)},
233         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5761)},
234         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5761E)},
235         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5761S)},
236         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5761SE)},
237         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5785_G)},
238         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5785_F)},
239         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57780)},
240         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57760)},
241         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57790)},
242         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57788)},
243         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5717)},
244         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5718)},
245         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5724)},
246         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57781)},
247         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57785)},
248         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57761)},
249         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57765)},
250         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57791)},
251         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57795)},
252         {PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9DXX)},
253         {PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9MXX)},
254         {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1000)},
255         {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1001)},
256         {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1003)},
257         {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC9100)},
258         {PCI_DEVICE(PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_TIGON3)},
259         {}
260 };
261
262 MODULE_DEVICE_TABLE(pci, tg3_pci_tbl);
263
264 static const struct {
265         const char string[ETH_GSTRING_LEN];
266 } ethtool_stats_keys[TG3_NUM_STATS] = {
267         { "rx_octets" },
268         { "rx_fragments" },
269         { "rx_ucast_packets" },
270         { "rx_mcast_packets" },
271         { "rx_bcast_packets" },
272         { "rx_fcs_errors" },
273         { "rx_align_errors" },
274         { "rx_xon_pause_rcvd" },
275         { "rx_xoff_pause_rcvd" },
276         { "rx_mac_ctrl_rcvd" },
277         { "rx_xoff_entered" },
278         { "rx_frame_too_long_errors" },
279         { "rx_jabbers" },
280         { "rx_undersize_packets" },
281         { "rx_in_length_errors" },
282         { "rx_out_length_errors" },
283         { "rx_64_or_less_octet_packets" },
284         { "rx_65_to_127_octet_packets" },
285         { "rx_128_to_255_octet_packets" },
286         { "rx_256_to_511_octet_packets" },
287         { "rx_512_to_1023_octet_packets" },
288         { "rx_1024_to_1522_octet_packets" },
289         { "rx_1523_to_2047_octet_packets" },
290         { "rx_2048_to_4095_octet_packets" },
291         { "rx_4096_to_8191_octet_packets" },
292         { "rx_8192_to_9022_octet_packets" },
293
294         { "tx_octets" },
295         { "tx_collisions" },
296
297         { "tx_xon_sent" },
298         { "tx_xoff_sent" },
299         { "tx_flow_control" },
300         { "tx_mac_errors" },
301         { "tx_single_collisions" },
302         { "tx_mult_collisions" },
303         { "tx_deferred" },
304         { "tx_excessive_collisions" },
305         { "tx_late_collisions" },
306         { "tx_collide_2times" },
307         { "tx_collide_3times" },
308         { "tx_collide_4times" },
309         { "tx_collide_5times" },
310         { "tx_collide_6times" },
311         { "tx_collide_7times" },
312         { "tx_collide_8times" },
313         { "tx_collide_9times" },
314         { "tx_collide_10times" },
315         { "tx_collide_11times" },
316         { "tx_collide_12times" },
317         { "tx_collide_13times" },
318         { "tx_collide_14times" },
319         { "tx_collide_15times" },
320         { "tx_ucast_packets" },
321         { "tx_mcast_packets" },
322         { "tx_bcast_packets" },
323         { "tx_carrier_sense_errors" },
324         { "tx_discards" },
325         { "tx_errors" },
326
327         { "dma_writeq_full" },
328         { "dma_write_prioq_full" },
329         { "rxbds_empty" },
330         { "rx_discards" },
331         { "rx_errors" },
332         { "rx_threshold_hit" },
333
334         { "dma_readq_full" },
335         { "dma_read_prioq_full" },
336         { "tx_comp_queue_full" },
337
338         { "ring_set_send_prod_index" },
339         { "ring_status_update" },
340         { "nic_irqs" },
341         { "nic_avoided_irqs" },
342         { "nic_tx_threshold_hit" }
343 };
344
345 static const struct {
346         const char string[ETH_GSTRING_LEN];
347 } ethtool_test_keys[TG3_NUM_TEST] = {
348         { "nvram test     (online) " },
349         { "link test      (online) " },
350         { "register test  (offline)" },
351         { "memory test    (offline)" },
352         { "loopback test  (offline)" },
353         { "interrupt test (offline)" },
354 };
355
356 static void tg3_write32(struct tg3 *tp, u32 off, u32 val)
357 {
358         writel(val, tp->regs + off);
359 }
360
361 static u32 tg3_read32(struct tg3 *tp, u32 off)
362 {
363         return (readl(tp->regs + off));
364 }
365
366 static void tg3_ape_write32(struct tg3 *tp, u32 off, u32 val)
367 {
368         writel(val, tp->aperegs + off);
369 }
370
371 static u32 tg3_ape_read32(struct tg3 *tp, u32 off)
372 {
373         return (readl(tp->aperegs + off));
374 }
375
376 static void tg3_write_indirect_reg32(struct tg3 *tp, u32 off, u32 val)
377 {
378         unsigned long flags;
379
380         spin_lock_irqsave(&tp->indirect_lock, flags);
381         pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off);
382         pci_write_config_dword(tp->pdev, TG3PCI_REG_DATA, val);
383         spin_unlock_irqrestore(&tp->indirect_lock, flags);
384 }
385
386 static void tg3_write_flush_reg32(struct tg3 *tp, u32 off, u32 val)
387 {
388         writel(val, tp->regs + off);
389         readl(tp->regs + off);
390 }
391
392 static u32 tg3_read_indirect_reg32(struct tg3 *tp, u32 off)
393 {
394         unsigned long flags;
395         u32 val;
396
397         spin_lock_irqsave(&tp->indirect_lock, flags);
398         pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off);
399         pci_read_config_dword(tp->pdev, TG3PCI_REG_DATA, &val);
400         spin_unlock_irqrestore(&tp->indirect_lock, flags);
401         return val;
402 }
403
404 static void tg3_write_indirect_mbox(struct tg3 *tp, u32 off, u32 val)
405 {
406         unsigned long flags;
407
408         if (off == (MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW)) {
409                 pci_write_config_dword(tp->pdev, TG3PCI_RCV_RET_RING_CON_IDX +
410                                        TG3_64BIT_REG_LOW, val);
411                 return;
412         }
413         if (off == TG3_RX_STD_PROD_IDX_REG) {
414                 pci_write_config_dword(tp->pdev, TG3PCI_STD_RING_PROD_IDX +
415                                        TG3_64BIT_REG_LOW, val);
416                 return;
417         }
418
419         spin_lock_irqsave(&tp->indirect_lock, flags);
420         pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off + 0x5600);
421         pci_write_config_dword(tp->pdev, TG3PCI_REG_DATA, val);
422         spin_unlock_irqrestore(&tp->indirect_lock, flags);
423
424         /* In indirect mode when disabling interrupts, we also need
425          * to clear the interrupt bit in the GRC local ctrl register.
426          */
427         if ((off == (MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW)) &&
428             (val == 0x1)) {
429                 pci_write_config_dword(tp->pdev, TG3PCI_MISC_LOCAL_CTRL,
430                                        tp->grc_local_ctrl|GRC_LCLCTRL_CLEARINT);
431         }
432 }
433
434 static u32 tg3_read_indirect_mbox(struct tg3 *tp, u32 off)
435 {
436         unsigned long flags;
437         u32 val;
438
439         spin_lock_irqsave(&tp->indirect_lock, flags);
440         pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off + 0x5600);
441         pci_read_config_dword(tp->pdev, TG3PCI_REG_DATA, &val);
442         spin_unlock_irqrestore(&tp->indirect_lock, flags);
443         return val;
444 }
445
446 /* usec_wait specifies the wait time in usec when writing to certain registers
447  * where it is unsafe to read back the register without some delay.
448  * GRC_LOCAL_CTRL is one example if the GPIOs are toggled to switch power.
449  * TG3PCI_CLOCK_CTRL is another example if the clock frequencies are changed.
450  */
451 static void _tw32_flush(struct tg3 *tp, u32 off, u32 val, u32 usec_wait)
452 {
453         if ((tp->tg3_flags & TG3_FLAG_PCIX_TARGET_HWBUG) ||
454             (tp->tg3_flags2 & TG3_FLG2_ICH_WORKAROUND))
455                 /* Non-posted methods */
456                 tp->write32(tp, off, val);
457         else {
458                 /* Posted method */
459                 tg3_write32(tp, off, val);
460                 if (usec_wait)
461                         udelay(usec_wait);
462                 tp->read32(tp, off);
463         }
464         /* Wait again after the read for the posted method to guarantee that
465          * the wait time is met.
466          */
467         if (usec_wait)
468                 udelay(usec_wait);
469 }
470
471 static inline void tw32_mailbox_flush(struct tg3 *tp, u32 off, u32 val)
472 {
473         tp->write32_mbox(tp, off, val);
474         if (!(tp->tg3_flags & TG3_FLAG_MBOX_WRITE_REORDER) &&
475             !(tp->tg3_flags2 & TG3_FLG2_ICH_WORKAROUND))
476                 tp->read32_mbox(tp, off);
477 }
478
479 static void tg3_write32_tx_mbox(struct tg3 *tp, u32 off, u32 val)
480 {
481         void __iomem *mbox = tp->regs + off;
482         writel(val, mbox);
483         if (tp->tg3_flags & TG3_FLAG_TXD_MBOX_HWBUG)
484                 writel(val, mbox);
485         if (tp->tg3_flags & TG3_FLAG_MBOX_WRITE_REORDER)
486                 readl(mbox);
487 }
488
489 static u32 tg3_read32_mbox_5906(struct tg3 *tp, u32 off)
490 {
491         return (readl(tp->regs + off + GRCMBOX_BASE));
492 }
493
494 static void tg3_write32_mbox_5906(struct tg3 *tp, u32 off, u32 val)
495 {
496         writel(val, tp->regs + off + GRCMBOX_BASE);
497 }
498
499 #define tw32_mailbox(reg, val)  tp->write32_mbox(tp, reg, val)
500 #define tw32_mailbox_f(reg, val)        tw32_mailbox_flush(tp, (reg), (val))
501 #define tw32_rx_mbox(reg, val)  tp->write32_rx_mbox(tp, reg, val)
502 #define tw32_tx_mbox(reg, val)  tp->write32_tx_mbox(tp, reg, val)
503 #define tr32_mailbox(reg)       tp->read32_mbox(tp, reg)
504
505 #define tw32(reg,val)           tp->write32(tp, reg, val)
506 #define tw32_f(reg,val)         _tw32_flush(tp,(reg),(val), 0)
507 #define tw32_wait_f(reg,val,us) _tw32_flush(tp,(reg),(val), (us))
508 #define tr32(reg)               tp->read32(tp, reg)
509
510 static void tg3_write_mem(struct tg3 *tp, u32 off, u32 val)
511 {
512         unsigned long flags;
513
514         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) &&
515             (off >= NIC_SRAM_STATS_BLK) && (off < NIC_SRAM_TX_BUFFER_DESC))
516                 return;
517
518         spin_lock_irqsave(&tp->indirect_lock, flags);
519         if (tp->tg3_flags & TG3_FLAG_SRAM_USE_CONFIG) {
520                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off);
521                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
522
523                 /* Always leave this as zero. */
524                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
525         } else {
526                 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, off);
527                 tw32_f(TG3PCI_MEM_WIN_DATA, val);
528
529                 /* Always leave this as zero. */
530                 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, 0);
531         }
532         spin_unlock_irqrestore(&tp->indirect_lock, flags);
533 }
534
535 static void tg3_read_mem(struct tg3 *tp, u32 off, u32 *val)
536 {
537         unsigned long flags;
538
539         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) &&
540             (off >= NIC_SRAM_STATS_BLK) && (off < NIC_SRAM_TX_BUFFER_DESC)) {
541                 *val = 0;
542                 return;
543         }
544
545         spin_lock_irqsave(&tp->indirect_lock, flags);
546         if (tp->tg3_flags & TG3_FLAG_SRAM_USE_CONFIG) {
547                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off);
548                 pci_read_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
549
550                 /* Always leave this as zero. */
551                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
552         } else {
553                 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, off);
554                 *val = tr32(TG3PCI_MEM_WIN_DATA);
555
556                 /* Always leave this as zero. */
557                 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, 0);
558         }
559         spin_unlock_irqrestore(&tp->indirect_lock, flags);
560 }
561
562 static void tg3_ape_lock_init(struct tg3 *tp)
563 {
564         int i;
565
566         /* Make sure the driver hasn't any stale locks. */
567         for (i = 0; i < 8; i++)
568                 tg3_ape_write32(tp, TG3_APE_LOCK_GRANT + 4 * i,
569                                 APE_LOCK_GRANT_DRIVER);
570 }
571
572 static int tg3_ape_lock(struct tg3 *tp, int locknum)
573 {
574         int i, off;
575         int ret = 0;
576         u32 status;
577
578         if (!(tp->tg3_flags3 & TG3_FLG3_ENABLE_APE))
579                 return 0;
580
581         switch (locknum) {
582                 case TG3_APE_LOCK_GRC:
583                 case TG3_APE_LOCK_MEM:
584                         break;
585                 default:
586                         return -EINVAL;
587         }
588
589         off = 4 * locknum;
590
591         tg3_ape_write32(tp, TG3_APE_LOCK_REQ + off, APE_LOCK_REQ_DRIVER);
592
593         /* Wait for up to 1 millisecond to acquire lock. */
594         for (i = 0; i < 100; i++) {
595                 status = tg3_ape_read32(tp, TG3_APE_LOCK_GRANT + off);
596                 if (status == APE_LOCK_GRANT_DRIVER)
597                         break;
598                 udelay(10);
599         }
600
601         if (status != APE_LOCK_GRANT_DRIVER) {
602                 /* Revoke the lock request. */
603                 tg3_ape_write32(tp, TG3_APE_LOCK_GRANT + off,
604                                 APE_LOCK_GRANT_DRIVER);
605
606                 ret = -EBUSY;
607         }
608
609         return ret;
610 }
611
612 static void tg3_ape_unlock(struct tg3 *tp, int locknum)
613 {
614         int off;
615
616         if (!(tp->tg3_flags3 & TG3_FLG3_ENABLE_APE))
617                 return;
618
619         switch (locknum) {
620                 case TG3_APE_LOCK_GRC:
621                 case TG3_APE_LOCK_MEM:
622                         break;
623                 default:
624                         return;
625         }
626
627         off = 4 * locknum;
628         tg3_ape_write32(tp, TG3_APE_LOCK_GRANT + off, APE_LOCK_GRANT_DRIVER);
629 }
630
631 static void tg3_disable_ints(struct tg3 *tp)
632 {
633         int i;
634
635         tw32(TG3PCI_MISC_HOST_CTRL,
636              (tp->misc_host_ctrl | MISC_HOST_CTRL_MASK_PCI_INT));
637         for (i = 0; i < tp->irq_max; i++)
638                 tw32_mailbox_f(tp->napi[i].int_mbox, 0x00000001);
639 }
640
641 static void tg3_enable_ints(struct tg3 *tp)
642 {
643         int i;
644
645         tp->irq_sync = 0;
646         wmb();
647
648         tw32(TG3PCI_MISC_HOST_CTRL,
649              (tp->misc_host_ctrl & ~MISC_HOST_CTRL_MASK_PCI_INT));
650
651         tp->coal_now = tp->coalesce_mode | HOSTCC_MODE_ENABLE;
652         for (i = 0; i < tp->irq_cnt; i++) {
653                 struct tg3_napi *tnapi = &tp->napi[i];
654                 tw32_mailbox_f(tnapi->int_mbox, tnapi->last_tag << 24);
655                 if (tp->tg3_flags2 & TG3_FLG2_1SHOT_MSI)
656                         tw32_mailbox_f(tnapi->int_mbox, tnapi->last_tag << 24);
657
658                 tp->coal_now |= tnapi->coal_now;
659         }
660
661         /* Force an initial interrupt */
662         if (!(tp->tg3_flags & TG3_FLAG_TAGGED_STATUS) &&
663             (tp->napi[0].hw_status->status & SD_STATUS_UPDATED))
664                 tw32(GRC_LOCAL_CTRL, tp->grc_local_ctrl | GRC_LCLCTRL_SETINT);
665         else
666                 tw32(HOSTCC_MODE, tp->coal_now);
667
668         tp->coal_now &= ~(tp->napi[0].coal_now | tp->napi[1].coal_now);
669 }
670
671 static inline unsigned int tg3_has_work(struct tg3_napi *tnapi)
672 {
673         struct tg3 *tp = tnapi->tp;
674         struct tg3_hw_status *sblk = tnapi->hw_status;
675         unsigned int work_exists = 0;
676
677         /* check for phy events */
678         if (!(tp->tg3_flags &
679               (TG3_FLAG_USE_LINKCHG_REG |
680                TG3_FLAG_POLL_SERDES))) {
681                 if (sblk->status & SD_STATUS_LINK_CHG)
682                         work_exists = 1;
683         }
684         /* check for RX/TX work to do */
685         if (sblk->idx[0].tx_consumer != tnapi->tx_cons ||
686             *(tnapi->rx_rcb_prod_idx) != tnapi->rx_rcb_ptr)
687                 work_exists = 1;
688
689         return work_exists;
690 }
691
692 /* tg3_int_reenable
693  *  similar to tg3_enable_ints, but it accurately determines whether there
694  *  is new work pending and can return without flushing the PIO write
695  *  which reenables interrupts
696  */
697 static void tg3_int_reenable(struct tg3_napi *tnapi)
698 {
699         struct tg3 *tp = tnapi->tp;
700
701         tw32_mailbox(tnapi->int_mbox, tnapi->last_tag << 24);
702         mmiowb();
703
704         /* When doing tagged status, this work check is unnecessary.
705          * The last_tag we write above tells the chip which piece of
706          * work we've completed.
707          */
708         if (!(tp->tg3_flags & TG3_FLAG_TAGGED_STATUS) &&
709             tg3_has_work(tnapi))
710                 tw32(HOSTCC_MODE, tp->coalesce_mode |
711                      HOSTCC_MODE_ENABLE | tnapi->coal_now);
712 }
713
714 static void tg3_napi_disable(struct tg3 *tp)
715 {
716         int i;
717
718         for (i = tp->irq_cnt - 1; i >= 0; i--)
719                 napi_disable(&tp->napi[i].napi);
720 }
721
722 static void tg3_napi_enable(struct tg3 *tp)
723 {
724         int i;
725
726         for (i = 0; i < tp->irq_cnt; i++)
727                 napi_enable(&tp->napi[i].napi);
728 }
729
730 static inline void tg3_netif_stop(struct tg3 *tp)
731 {
732         tp->dev->trans_start = jiffies; /* prevent tx timeout */
733         tg3_napi_disable(tp);
734         netif_tx_disable(tp->dev);
735 }
736
737 static inline void tg3_netif_start(struct tg3 *tp)
738 {
739         /* NOTE: unconditional netif_tx_wake_all_queues is only
740          * appropriate so long as all callers are assured to
741          * have free tx slots (such as after tg3_init_hw)
742          */
743         netif_tx_wake_all_queues(tp->dev);
744
745         tg3_napi_enable(tp);
746         tp->napi[0].hw_status->status |= SD_STATUS_UPDATED;
747         tg3_enable_ints(tp);
748 }
749
750 static void tg3_switch_clocks(struct tg3 *tp)
751 {
752         u32 clock_ctrl;
753         u32 orig_clock_ctrl;
754
755         if ((tp->tg3_flags & TG3_FLAG_CPMU_PRESENT) ||
756             (tp->tg3_flags2 & TG3_FLG2_5780_CLASS))
757                 return;
758
759         clock_ctrl = tr32(TG3PCI_CLOCK_CTRL);
760
761         orig_clock_ctrl = clock_ctrl;
762         clock_ctrl &= (CLOCK_CTRL_FORCE_CLKRUN |
763                        CLOCK_CTRL_CLKRUN_OENABLE |
764                        0x1f);
765         tp->pci_clock_ctrl = clock_ctrl;
766
767         if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
768                 if (orig_clock_ctrl & CLOCK_CTRL_625_CORE) {
769                         tw32_wait_f(TG3PCI_CLOCK_CTRL,
770                                     clock_ctrl | CLOCK_CTRL_625_CORE, 40);
771                 }
772         } else if ((orig_clock_ctrl & CLOCK_CTRL_44MHZ_CORE) != 0) {
773                 tw32_wait_f(TG3PCI_CLOCK_CTRL,
774                             clock_ctrl |
775                             (CLOCK_CTRL_44MHZ_CORE | CLOCK_CTRL_ALTCLK),
776                             40);
777                 tw32_wait_f(TG3PCI_CLOCK_CTRL,
778                             clock_ctrl | (CLOCK_CTRL_ALTCLK),
779                             40);
780         }
781         tw32_wait_f(TG3PCI_CLOCK_CTRL, clock_ctrl, 40);
782 }
783
784 #define PHY_BUSY_LOOPS  5000
785
786 static int tg3_readphy(struct tg3 *tp, int reg, u32 *val)
787 {
788         u32 frame_val;
789         unsigned int loops;
790         int ret;
791
792         if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
793                 tw32_f(MAC_MI_MODE,
794                      (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
795                 udelay(80);
796         }
797
798         *val = 0x0;
799
800         frame_val  = ((tp->phy_addr << MI_COM_PHY_ADDR_SHIFT) &
801                       MI_COM_PHY_ADDR_MASK);
802         frame_val |= ((reg << MI_COM_REG_ADDR_SHIFT) &
803                       MI_COM_REG_ADDR_MASK);
804         frame_val |= (MI_COM_CMD_READ | MI_COM_START);
805
806         tw32_f(MAC_MI_COM, frame_val);
807
808         loops = PHY_BUSY_LOOPS;
809         while (loops != 0) {
810                 udelay(10);
811                 frame_val = tr32(MAC_MI_COM);
812
813                 if ((frame_val & MI_COM_BUSY) == 0) {
814                         udelay(5);
815                         frame_val = tr32(MAC_MI_COM);
816                         break;
817                 }
818                 loops -= 1;
819         }
820
821         ret = -EBUSY;
822         if (loops != 0) {
823                 *val = frame_val & MI_COM_DATA_MASK;
824                 ret = 0;
825         }
826
827         if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
828                 tw32_f(MAC_MI_MODE, tp->mi_mode);
829                 udelay(80);
830         }
831
832         return ret;
833 }
834
835 static int tg3_writephy(struct tg3 *tp, int reg, u32 val)
836 {
837         u32 frame_val;
838         unsigned int loops;
839         int ret;
840
841         if ((tp->tg3_flags3 & TG3_FLG3_PHY_IS_FET) &&
842             (reg == MII_TG3_CTRL || reg == MII_TG3_AUX_CTRL))
843                 return 0;
844
845         if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
846                 tw32_f(MAC_MI_MODE,
847                      (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
848                 udelay(80);
849         }
850
851         frame_val  = ((tp->phy_addr << MI_COM_PHY_ADDR_SHIFT) &
852                       MI_COM_PHY_ADDR_MASK);
853         frame_val |= ((reg << MI_COM_REG_ADDR_SHIFT) &
854                       MI_COM_REG_ADDR_MASK);
855         frame_val |= (val & MI_COM_DATA_MASK);
856         frame_val |= (MI_COM_CMD_WRITE | MI_COM_START);
857
858         tw32_f(MAC_MI_COM, frame_val);
859
860         loops = PHY_BUSY_LOOPS;
861         while (loops != 0) {
862                 udelay(10);
863                 frame_val = tr32(MAC_MI_COM);
864                 if ((frame_val & MI_COM_BUSY) == 0) {
865                         udelay(5);
866                         frame_val = tr32(MAC_MI_COM);
867                         break;
868                 }
869                 loops -= 1;
870         }
871
872         ret = -EBUSY;
873         if (loops != 0)
874                 ret = 0;
875
876         if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
877                 tw32_f(MAC_MI_MODE, tp->mi_mode);
878                 udelay(80);
879         }
880
881         return ret;
882 }
883
884 static int tg3_bmcr_reset(struct tg3 *tp)
885 {
886         u32 phy_control;
887         int limit, err;
888
889         /* OK, reset it, and poll the BMCR_RESET bit until it
890          * clears or we time out.
891          */
892         phy_control = BMCR_RESET;
893         err = tg3_writephy(tp, MII_BMCR, phy_control);
894         if (err != 0)
895                 return -EBUSY;
896
897         limit = 5000;
898         while (limit--) {
899                 err = tg3_readphy(tp, MII_BMCR, &phy_control);
900                 if (err != 0)
901                         return -EBUSY;
902
903                 if ((phy_control & BMCR_RESET) == 0) {
904                         udelay(40);
905                         break;
906                 }
907                 udelay(10);
908         }
909         if (limit < 0)
910                 return -EBUSY;
911
912         return 0;
913 }
914
915 static int tg3_mdio_read(struct mii_bus *bp, int mii_id, int reg)
916 {
917         struct tg3 *tp = bp->priv;
918         u32 val;
919
920         spin_lock_bh(&tp->lock);
921
922         if (tg3_readphy(tp, reg, &val))
923                 val = -EIO;
924
925         spin_unlock_bh(&tp->lock);
926
927         return val;
928 }
929
930 static int tg3_mdio_write(struct mii_bus *bp, int mii_id, int reg, u16 val)
931 {
932         struct tg3 *tp = bp->priv;
933         u32 ret = 0;
934
935         spin_lock_bh(&tp->lock);
936
937         if (tg3_writephy(tp, reg, val))
938                 ret = -EIO;
939
940         spin_unlock_bh(&tp->lock);
941
942         return ret;
943 }
944
945 static int tg3_mdio_reset(struct mii_bus *bp)
946 {
947         return 0;
948 }
949
950 static void tg3_mdio_config_5785(struct tg3 *tp)
951 {
952         u32 val;
953         struct phy_device *phydev;
954
955         phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
956         switch (phydev->drv->phy_id & phydev->drv->phy_id_mask) {
957         case PHY_ID_BCM50610:
958         case PHY_ID_BCM50610M:
959                 val = MAC_PHYCFG2_50610_LED_MODES;
960                 break;
961         case PHY_ID_BCMAC131:
962                 val = MAC_PHYCFG2_AC131_LED_MODES;
963                 break;
964         case PHY_ID_RTL8211C:
965                 val = MAC_PHYCFG2_RTL8211C_LED_MODES;
966                 break;
967         case PHY_ID_RTL8201E:
968                 val = MAC_PHYCFG2_RTL8201E_LED_MODES;
969                 break;
970         default:
971                 return;
972         }
973
974         if (phydev->interface != PHY_INTERFACE_MODE_RGMII) {
975                 tw32(MAC_PHYCFG2, val);
976
977                 val = tr32(MAC_PHYCFG1);
978                 val &= ~(MAC_PHYCFG1_RGMII_INT |
979                          MAC_PHYCFG1_RXCLK_TO_MASK | MAC_PHYCFG1_TXCLK_TO_MASK);
980                 val |= MAC_PHYCFG1_RXCLK_TIMEOUT | MAC_PHYCFG1_TXCLK_TIMEOUT;
981                 tw32(MAC_PHYCFG1, val);
982
983                 return;
984         }
985
986         if (!(tp->tg3_flags3 & TG3_FLG3_RGMII_INBAND_DISABLE))
987                 val |= MAC_PHYCFG2_EMODE_MASK_MASK |
988                        MAC_PHYCFG2_FMODE_MASK_MASK |
989                        MAC_PHYCFG2_GMODE_MASK_MASK |
990                        MAC_PHYCFG2_ACT_MASK_MASK   |
991                        MAC_PHYCFG2_QUAL_MASK_MASK |
992                        MAC_PHYCFG2_INBAND_ENABLE;
993
994         tw32(MAC_PHYCFG2, val);
995
996         val = tr32(MAC_PHYCFG1);
997         val &= ~(MAC_PHYCFG1_RXCLK_TO_MASK | MAC_PHYCFG1_TXCLK_TO_MASK |
998                  MAC_PHYCFG1_RGMII_EXT_RX_DEC | MAC_PHYCFG1_RGMII_SND_STAT_EN);
999         if (!(tp->tg3_flags3 & TG3_FLG3_RGMII_INBAND_DISABLE)) {
1000                 if (tp->tg3_flags3 & TG3_FLG3_RGMII_EXT_IBND_RX_EN)
1001                         val |= MAC_PHYCFG1_RGMII_EXT_RX_DEC;
1002                 if (tp->tg3_flags3 & TG3_FLG3_RGMII_EXT_IBND_TX_EN)
1003                         val |= MAC_PHYCFG1_RGMII_SND_STAT_EN;
1004         }
1005         val |= MAC_PHYCFG1_RXCLK_TIMEOUT | MAC_PHYCFG1_TXCLK_TIMEOUT |
1006                MAC_PHYCFG1_RGMII_INT | MAC_PHYCFG1_TXC_DRV;
1007         tw32(MAC_PHYCFG1, val);
1008
1009         val = tr32(MAC_EXT_RGMII_MODE);
1010         val &= ~(MAC_RGMII_MODE_RX_INT_B |
1011                  MAC_RGMII_MODE_RX_QUALITY |
1012                  MAC_RGMII_MODE_RX_ACTIVITY |
1013                  MAC_RGMII_MODE_RX_ENG_DET |
1014                  MAC_RGMII_MODE_TX_ENABLE |
1015                  MAC_RGMII_MODE_TX_LOWPWR |
1016                  MAC_RGMII_MODE_TX_RESET);
1017         if (!(tp->tg3_flags3 & TG3_FLG3_RGMII_INBAND_DISABLE)) {
1018                 if (tp->tg3_flags3 & TG3_FLG3_RGMII_EXT_IBND_RX_EN)
1019                         val |= MAC_RGMII_MODE_RX_INT_B |
1020                                MAC_RGMII_MODE_RX_QUALITY |
1021                                MAC_RGMII_MODE_RX_ACTIVITY |
1022                                MAC_RGMII_MODE_RX_ENG_DET;
1023                 if (tp->tg3_flags3 & TG3_FLG3_RGMII_EXT_IBND_TX_EN)
1024                         val |= MAC_RGMII_MODE_TX_ENABLE |
1025                                MAC_RGMII_MODE_TX_LOWPWR |
1026                                MAC_RGMII_MODE_TX_RESET;
1027         }
1028         tw32(MAC_EXT_RGMII_MODE, val);
1029 }
1030
1031 static void tg3_mdio_start(struct tg3 *tp)
1032 {
1033         tp->mi_mode &= ~MAC_MI_MODE_AUTO_POLL;
1034         tw32_f(MAC_MI_MODE, tp->mi_mode);
1035         udelay(80);
1036
1037         if ((tp->tg3_flags3 & TG3_FLG3_MDIOBUS_INITED) &&
1038             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
1039                 tg3_mdio_config_5785(tp);
1040 }
1041
1042 static int tg3_mdio_init(struct tg3 *tp)
1043 {
1044         int i;
1045         u32 reg;
1046         struct phy_device *phydev;
1047
1048         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717) {
1049                 u32 funcnum, is_serdes;
1050
1051                 funcnum = tr32(TG3_CPMU_STATUS) & TG3_CPMU_STATUS_PCIE_FUNC;
1052                 if (funcnum)
1053                         tp->phy_addr = 2;
1054                 else
1055                         tp->phy_addr = 1;
1056
1057                 if (tp->pci_chip_rev_id != CHIPREV_ID_5717_A0)
1058                         is_serdes = tr32(SG_DIG_STATUS) & SG_DIG_IS_SERDES;
1059                 else
1060                         is_serdes = tr32(TG3_CPMU_PHY_STRAP) &
1061                                     TG3_CPMU_PHY_STRAP_IS_SERDES;
1062                 if (is_serdes)
1063                         tp->phy_addr += 7;
1064         } else
1065                 tp->phy_addr = TG3_PHY_MII_ADDR;
1066
1067         tg3_mdio_start(tp);
1068
1069         if (!(tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB) ||
1070             (tp->tg3_flags3 & TG3_FLG3_MDIOBUS_INITED))
1071                 return 0;
1072
1073         tp->mdio_bus = mdiobus_alloc();
1074         if (tp->mdio_bus == NULL)
1075                 return -ENOMEM;
1076
1077         tp->mdio_bus->name     = "tg3 mdio bus";
1078         snprintf(tp->mdio_bus->id, MII_BUS_ID_SIZE, "%x",
1079                  (tp->pdev->bus->number << 8) | tp->pdev->devfn);
1080         tp->mdio_bus->priv     = tp;
1081         tp->mdio_bus->parent   = &tp->pdev->dev;
1082         tp->mdio_bus->read     = &tg3_mdio_read;
1083         tp->mdio_bus->write    = &tg3_mdio_write;
1084         tp->mdio_bus->reset    = &tg3_mdio_reset;
1085         tp->mdio_bus->phy_mask = ~(1 << TG3_PHY_MII_ADDR);
1086         tp->mdio_bus->irq      = &tp->mdio_irq[0];
1087
1088         for (i = 0; i < PHY_MAX_ADDR; i++)
1089                 tp->mdio_bus->irq[i] = PHY_POLL;
1090
1091         /* The bus registration will look for all the PHYs on the mdio bus.
1092          * Unfortunately, it does not ensure the PHY is powered up before
1093          * accessing the PHY ID registers.  A chip reset is the
1094          * quickest way to bring the device back to an operational state..
1095          */
1096         if (tg3_readphy(tp, MII_BMCR, &reg) || (reg & BMCR_PDOWN))
1097                 tg3_bmcr_reset(tp);
1098
1099         i = mdiobus_register(tp->mdio_bus);
1100         if (i) {
1101                 netdev_warn(tp->dev, "mdiobus_reg failed (0x%x)\n", i);
1102                 mdiobus_free(tp->mdio_bus);
1103                 return i;
1104         }
1105
1106         phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
1107
1108         if (!phydev || !phydev->drv) {
1109                 netdev_warn(tp->dev, "No PHY devices\n");
1110                 mdiobus_unregister(tp->mdio_bus);
1111                 mdiobus_free(tp->mdio_bus);
1112                 return -ENODEV;
1113         }
1114
1115         switch (phydev->drv->phy_id & phydev->drv->phy_id_mask) {
1116         case PHY_ID_BCM57780:
1117                 phydev->interface = PHY_INTERFACE_MODE_GMII;
1118                 phydev->dev_flags |= PHY_BRCM_AUTO_PWRDWN_ENABLE;
1119                 break;
1120         case PHY_ID_BCM50610:
1121         case PHY_ID_BCM50610M:
1122                 phydev->dev_flags |= PHY_BRCM_CLEAR_RGMII_MODE |
1123                                      PHY_BRCM_RX_REFCLK_UNUSED |
1124                                      PHY_BRCM_DIS_TXCRXC_NOENRGY |
1125                                      PHY_BRCM_AUTO_PWRDWN_ENABLE;
1126                 if (tp->tg3_flags3 & TG3_FLG3_RGMII_INBAND_DISABLE)
1127                         phydev->dev_flags |= PHY_BRCM_STD_IBND_DISABLE;
1128                 if (tp->tg3_flags3 & TG3_FLG3_RGMII_EXT_IBND_RX_EN)
1129                         phydev->dev_flags |= PHY_BRCM_EXT_IBND_RX_ENABLE;
1130                 if (tp->tg3_flags3 & TG3_FLG3_RGMII_EXT_IBND_TX_EN)
1131                         phydev->dev_flags |= PHY_BRCM_EXT_IBND_TX_ENABLE;
1132                 /* fallthru */
1133         case PHY_ID_RTL8211C:
1134                 phydev->interface = PHY_INTERFACE_MODE_RGMII;
1135                 break;
1136         case PHY_ID_RTL8201E:
1137         case PHY_ID_BCMAC131:
1138                 phydev->interface = PHY_INTERFACE_MODE_MII;
1139                 phydev->dev_flags |= PHY_BRCM_AUTO_PWRDWN_ENABLE;
1140                 tp->tg3_flags3 |= TG3_FLG3_PHY_IS_FET;
1141                 break;
1142         }
1143
1144         tp->tg3_flags3 |= TG3_FLG3_MDIOBUS_INITED;
1145
1146         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
1147                 tg3_mdio_config_5785(tp);
1148
1149         return 0;
1150 }
1151
1152 static void tg3_mdio_fini(struct tg3 *tp)
1153 {
1154         if (tp->tg3_flags3 & TG3_FLG3_MDIOBUS_INITED) {
1155                 tp->tg3_flags3 &= ~TG3_FLG3_MDIOBUS_INITED;
1156                 mdiobus_unregister(tp->mdio_bus);
1157                 mdiobus_free(tp->mdio_bus);
1158         }
1159 }
1160
1161 /* tp->lock is held. */
1162 static inline void tg3_generate_fw_event(struct tg3 *tp)
1163 {
1164         u32 val;
1165
1166         val = tr32(GRC_RX_CPU_EVENT);
1167         val |= GRC_RX_CPU_DRIVER_EVENT;
1168         tw32_f(GRC_RX_CPU_EVENT, val);
1169
1170         tp->last_event_jiffies = jiffies;
1171 }
1172
1173 #define TG3_FW_EVENT_TIMEOUT_USEC 2500
1174
1175 /* tp->lock is held. */
1176 static void tg3_wait_for_event_ack(struct tg3 *tp)
1177 {
1178         int i;
1179         unsigned int delay_cnt;
1180         long time_remain;
1181
1182         /* If enough time has passed, no wait is necessary. */
1183         time_remain = (long)(tp->last_event_jiffies + 1 +
1184                       usecs_to_jiffies(TG3_FW_EVENT_TIMEOUT_USEC)) -
1185                       (long)jiffies;
1186         if (time_remain < 0)
1187                 return;
1188
1189         /* Check if we can shorten the wait time. */
1190         delay_cnt = jiffies_to_usecs(time_remain);
1191         if (delay_cnt > TG3_FW_EVENT_TIMEOUT_USEC)
1192                 delay_cnt = TG3_FW_EVENT_TIMEOUT_USEC;
1193         delay_cnt = (delay_cnt >> 3) + 1;
1194
1195         for (i = 0; i < delay_cnt; i++) {
1196                 if (!(tr32(GRC_RX_CPU_EVENT) & GRC_RX_CPU_DRIVER_EVENT))
1197                         break;
1198                 udelay(8);
1199         }
1200 }
1201
1202 /* tp->lock is held. */
1203 static void tg3_ump_link_report(struct tg3 *tp)
1204 {
1205         u32 reg;
1206         u32 val;
1207
1208         if (!(tp->tg3_flags2 & TG3_FLG2_5780_CLASS) ||
1209             !(tp->tg3_flags  & TG3_FLAG_ENABLE_ASF))
1210                 return;
1211
1212         tg3_wait_for_event_ack(tp);
1213
1214         tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX, FWCMD_NICDRV_LINK_UPDATE);
1215
1216         tg3_write_mem(tp, NIC_SRAM_FW_CMD_LEN_MBOX, 14);
1217
1218         val = 0;
1219         if (!tg3_readphy(tp, MII_BMCR, &reg))
1220                 val = reg << 16;
1221         if (!tg3_readphy(tp, MII_BMSR, &reg))
1222                 val |= (reg & 0xffff);
1223         tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX, val);
1224
1225         val = 0;
1226         if (!tg3_readphy(tp, MII_ADVERTISE, &reg))
1227                 val = reg << 16;
1228         if (!tg3_readphy(tp, MII_LPA, &reg))
1229                 val |= (reg & 0xffff);
1230         tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 4, val);
1231
1232         val = 0;
1233         if (!(tp->tg3_flags2 & TG3_FLG2_MII_SERDES)) {
1234                 if (!tg3_readphy(tp, MII_CTRL1000, &reg))
1235                         val = reg << 16;
1236                 if (!tg3_readphy(tp, MII_STAT1000, &reg))
1237                         val |= (reg & 0xffff);
1238         }
1239         tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 8, val);
1240
1241         if (!tg3_readphy(tp, MII_PHYADDR, &reg))
1242                 val = reg << 16;
1243         else
1244                 val = 0;
1245         tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 12, val);
1246
1247         tg3_generate_fw_event(tp);
1248 }
1249
1250 static void tg3_link_report(struct tg3 *tp)
1251 {
1252         if (!netif_carrier_ok(tp->dev)) {
1253                 netif_info(tp, link, tp->dev, "Link is down\n");
1254                 tg3_ump_link_report(tp);
1255         } else if (netif_msg_link(tp)) {
1256                 netdev_info(tp->dev, "Link is up at %d Mbps, %s duplex\n",
1257                             (tp->link_config.active_speed == SPEED_1000 ?
1258                              1000 :
1259                              (tp->link_config.active_speed == SPEED_100 ?
1260                               100 : 10)),
1261                             (tp->link_config.active_duplex == DUPLEX_FULL ?
1262                              "full" : "half"));
1263
1264                 netdev_info(tp->dev, "Flow control is %s for TX and %s for RX\n",
1265                             (tp->link_config.active_flowctrl & FLOW_CTRL_TX) ?
1266                             "on" : "off",
1267                             (tp->link_config.active_flowctrl & FLOW_CTRL_RX) ?
1268                             "on" : "off");
1269                 tg3_ump_link_report(tp);
1270         }
1271 }
1272
1273 static u16 tg3_advert_flowctrl_1000T(u8 flow_ctrl)
1274 {
1275         u16 miireg;
1276
1277         if ((flow_ctrl & FLOW_CTRL_TX) && (flow_ctrl & FLOW_CTRL_RX))
1278                 miireg = ADVERTISE_PAUSE_CAP;
1279         else if (flow_ctrl & FLOW_CTRL_TX)
1280                 miireg = ADVERTISE_PAUSE_ASYM;
1281         else if (flow_ctrl & FLOW_CTRL_RX)
1282                 miireg = ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM;
1283         else
1284                 miireg = 0;
1285
1286         return miireg;
1287 }
1288
1289 static u16 tg3_advert_flowctrl_1000X(u8 flow_ctrl)
1290 {
1291         u16 miireg;
1292
1293         if ((flow_ctrl & FLOW_CTRL_TX) && (flow_ctrl & FLOW_CTRL_RX))
1294                 miireg = ADVERTISE_1000XPAUSE;
1295         else if (flow_ctrl & FLOW_CTRL_TX)
1296                 miireg = ADVERTISE_1000XPSE_ASYM;
1297         else if (flow_ctrl & FLOW_CTRL_RX)
1298                 miireg = ADVERTISE_1000XPAUSE | ADVERTISE_1000XPSE_ASYM;
1299         else
1300                 miireg = 0;
1301
1302         return miireg;
1303 }
1304
1305 static u8 tg3_resolve_flowctrl_1000X(u16 lcladv, u16 rmtadv)
1306 {
1307         u8 cap = 0;
1308
1309         if (lcladv & ADVERTISE_1000XPAUSE) {
1310                 if (lcladv & ADVERTISE_1000XPSE_ASYM) {
1311                         if (rmtadv & LPA_1000XPAUSE)
1312                                 cap = FLOW_CTRL_TX | FLOW_CTRL_RX;
1313                         else if (rmtadv & LPA_1000XPAUSE_ASYM)
1314                                 cap = FLOW_CTRL_RX;
1315                 } else {
1316                         if (rmtadv & LPA_1000XPAUSE)
1317                                 cap = FLOW_CTRL_TX | FLOW_CTRL_RX;
1318                 }
1319         } else if (lcladv & ADVERTISE_1000XPSE_ASYM) {
1320                 if ((rmtadv & LPA_1000XPAUSE) && (rmtadv & LPA_1000XPAUSE_ASYM))
1321                         cap = FLOW_CTRL_TX;
1322         }
1323
1324         return cap;
1325 }
1326
1327 static void tg3_setup_flow_control(struct tg3 *tp, u32 lcladv, u32 rmtadv)
1328 {
1329         u8 autoneg;
1330         u8 flowctrl = 0;
1331         u32 old_rx_mode = tp->rx_mode;
1332         u32 old_tx_mode = tp->tx_mode;
1333
1334         if (tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB)
1335                 autoneg = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]->autoneg;
1336         else
1337                 autoneg = tp->link_config.autoneg;
1338
1339         if (autoneg == AUTONEG_ENABLE &&
1340             (tp->tg3_flags & TG3_FLAG_PAUSE_AUTONEG)) {
1341                 if (tp->tg3_flags2 & TG3_FLG2_ANY_SERDES)
1342                         flowctrl = tg3_resolve_flowctrl_1000X(lcladv, rmtadv);
1343                 else
1344                         flowctrl = mii_resolve_flowctrl_fdx(lcladv, rmtadv);
1345         } else
1346                 flowctrl = tp->link_config.flowctrl;
1347
1348         tp->link_config.active_flowctrl = flowctrl;
1349
1350         if (flowctrl & FLOW_CTRL_RX)
1351                 tp->rx_mode |= RX_MODE_FLOW_CTRL_ENABLE;
1352         else
1353                 tp->rx_mode &= ~RX_MODE_FLOW_CTRL_ENABLE;
1354
1355         if (old_rx_mode != tp->rx_mode)
1356                 tw32_f(MAC_RX_MODE, tp->rx_mode);
1357
1358         if (flowctrl & FLOW_CTRL_TX)
1359                 tp->tx_mode |= TX_MODE_FLOW_CTRL_ENABLE;
1360         else
1361                 tp->tx_mode &= ~TX_MODE_FLOW_CTRL_ENABLE;
1362
1363         if (old_tx_mode != tp->tx_mode)
1364                 tw32_f(MAC_TX_MODE, tp->tx_mode);
1365 }
1366
1367 static void tg3_adjust_link(struct net_device *dev)
1368 {
1369         u8 oldflowctrl, linkmesg = 0;
1370         u32 mac_mode, lcl_adv, rmt_adv;
1371         struct tg3 *tp = netdev_priv(dev);
1372         struct phy_device *phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
1373
1374         spin_lock_bh(&tp->lock);
1375
1376         mac_mode = tp->mac_mode & ~(MAC_MODE_PORT_MODE_MASK |
1377                                     MAC_MODE_HALF_DUPLEX);
1378
1379         oldflowctrl = tp->link_config.active_flowctrl;
1380
1381         if (phydev->link) {
1382                 lcl_adv = 0;
1383                 rmt_adv = 0;
1384
1385                 if (phydev->speed == SPEED_100 || phydev->speed == SPEED_10)
1386                         mac_mode |= MAC_MODE_PORT_MODE_MII;
1387                 else if (phydev->speed == SPEED_1000 ||
1388                          GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785)
1389                         mac_mode |= MAC_MODE_PORT_MODE_GMII;
1390                 else
1391                         mac_mode |= MAC_MODE_PORT_MODE_MII;
1392
1393                 if (phydev->duplex == DUPLEX_HALF)
1394                         mac_mode |= MAC_MODE_HALF_DUPLEX;
1395                 else {
1396                         lcl_adv = tg3_advert_flowctrl_1000T(
1397                                   tp->link_config.flowctrl);
1398
1399                         if (phydev->pause)
1400                                 rmt_adv = LPA_PAUSE_CAP;
1401                         if (phydev->asym_pause)
1402                                 rmt_adv |= LPA_PAUSE_ASYM;
1403                 }
1404
1405                 tg3_setup_flow_control(tp, lcl_adv, rmt_adv);
1406         } else
1407                 mac_mode |= MAC_MODE_PORT_MODE_GMII;
1408
1409         if (mac_mode != tp->mac_mode) {
1410                 tp->mac_mode = mac_mode;
1411                 tw32_f(MAC_MODE, tp->mac_mode);
1412                 udelay(40);
1413         }
1414
1415         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785) {
1416                 if (phydev->speed == SPEED_10)
1417                         tw32(MAC_MI_STAT,
1418                              MAC_MI_STAT_10MBPS_MODE |
1419                              MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
1420                 else
1421                         tw32(MAC_MI_STAT, MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
1422         }
1423
1424         if (phydev->speed == SPEED_1000 && phydev->duplex == DUPLEX_HALF)
1425                 tw32(MAC_TX_LENGTHS,
1426                      ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
1427                       (6 << TX_LENGTHS_IPG_SHIFT) |
1428                       (0xff << TX_LENGTHS_SLOT_TIME_SHIFT)));
1429         else
1430                 tw32(MAC_TX_LENGTHS,
1431                      ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
1432                       (6 << TX_LENGTHS_IPG_SHIFT) |
1433                       (32 << TX_LENGTHS_SLOT_TIME_SHIFT)));
1434
1435         if ((phydev->link && tp->link_config.active_speed == SPEED_INVALID) ||
1436             (!phydev->link && tp->link_config.active_speed != SPEED_INVALID) ||
1437             phydev->speed != tp->link_config.active_speed ||
1438             phydev->duplex != tp->link_config.active_duplex ||
1439             oldflowctrl != tp->link_config.active_flowctrl)
1440             linkmesg = 1;
1441
1442         tp->link_config.active_speed = phydev->speed;
1443         tp->link_config.active_duplex = phydev->duplex;
1444
1445         spin_unlock_bh(&tp->lock);
1446
1447         if (linkmesg)
1448                 tg3_link_report(tp);
1449 }
1450
1451 static int tg3_phy_init(struct tg3 *tp)
1452 {
1453         struct phy_device *phydev;
1454
1455         if (tp->tg3_flags3 & TG3_FLG3_PHY_CONNECTED)
1456                 return 0;
1457
1458         /* Bring the PHY back to a known state. */
1459         tg3_bmcr_reset(tp);
1460
1461         phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
1462
1463         /* Attach the MAC to the PHY. */
1464         phydev = phy_connect(tp->dev, dev_name(&phydev->dev), tg3_adjust_link,
1465                              phydev->dev_flags, phydev->interface);
1466         if (IS_ERR(phydev)) {
1467                 netdev_err(tp->dev, "Could not attach to PHY\n");
1468                 return PTR_ERR(phydev);
1469         }
1470
1471         /* Mask with MAC supported features. */
1472         switch (phydev->interface) {
1473         case PHY_INTERFACE_MODE_GMII:
1474         case PHY_INTERFACE_MODE_RGMII:
1475                 if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY)) {
1476                         phydev->supported &= (PHY_GBIT_FEATURES |
1477                                               SUPPORTED_Pause |
1478                                               SUPPORTED_Asym_Pause);
1479                         break;
1480                 }
1481                 /* fallthru */
1482         case PHY_INTERFACE_MODE_MII:
1483                 phydev->supported &= (PHY_BASIC_FEATURES |
1484                                       SUPPORTED_Pause |
1485                                       SUPPORTED_Asym_Pause);
1486                 break;
1487         default:
1488                 phy_disconnect(tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]);
1489                 return -EINVAL;
1490         }
1491
1492         tp->tg3_flags3 |= TG3_FLG3_PHY_CONNECTED;
1493
1494         phydev->advertising = phydev->supported;
1495
1496         return 0;
1497 }
1498
1499 static void tg3_phy_start(struct tg3 *tp)
1500 {
1501         struct phy_device *phydev;
1502
1503         if (!(tp->tg3_flags3 & TG3_FLG3_PHY_CONNECTED))
1504                 return;
1505
1506         phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
1507
1508         if (tp->link_config.phy_is_low_power) {
1509                 tp->link_config.phy_is_low_power = 0;
1510                 phydev->speed = tp->link_config.orig_speed;
1511                 phydev->duplex = tp->link_config.orig_duplex;
1512                 phydev->autoneg = tp->link_config.orig_autoneg;
1513                 phydev->advertising = tp->link_config.orig_advertising;
1514         }
1515
1516         phy_start(phydev);
1517
1518         phy_start_aneg(phydev);
1519 }
1520
1521 static void tg3_phy_stop(struct tg3 *tp)
1522 {
1523         if (!(tp->tg3_flags3 & TG3_FLG3_PHY_CONNECTED))
1524                 return;
1525
1526         phy_stop(tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]);
1527 }
1528
1529 static void tg3_phy_fini(struct tg3 *tp)
1530 {
1531         if (tp->tg3_flags3 & TG3_FLG3_PHY_CONNECTED) {
1532                 phy_disconnect(tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]);
1533                 tp->tg3_flags3 &= ~TG3_FLG3_PHY_CONNECTED;
1534         }
1535 }
1536
1537 static void tg3_phydsp_write(struct tg3 *tp, u32 reg, u32 val)
1538 {
1539         tg3_writephy(tp, MII_TG3_DSP_ADDRESS, reg);
1540         tg3_writephy(tp, MII_TG3_DSP_RW_PORT, val);
1541 }
1542
1543 static void tg3_phy_fet_toggle_apd(struct tg3 *tp, bool enable)
1544 {
1545         u32 phytest;
1546
1547         if (!tg3_readphy(tp, MII_TG3_FET_TEST, &phytest)) {
1548                 u32 phy;
1549
1550                 tg3_writephy(tp, MII_TG3_FET_TEST,
1551                              phytest | MII_TG3_FET_SHADOW_EN);
1552                 if (!tg3_readphy(tp, MII_TG3_FET_SHDW_AUXSTAT2, &phy)) {
1553                         if (enable)
1554                                 phy |= MII_TG3_FET_SHDW_AUXSTAT2_APD;
1555                         else
1556                                 phy &= ~MII_TG3_FET_SHDW_AUXSTAT2_APD;
1557                         tg3_writephy(tp, MII_TG3_FET_SHDW_AUXSTAT2, phy);
1558                 }
1559                 tg3_writephy(tp, MII_TG3_FET_TEST, phytest);
1560         }
1561 }
1562
1563 static void tg3_phy_toggle_apd(struct tg3 *tp, bool enable)
1564 {
1565         u32 reg;
1566
1567         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS) ||
1568                 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 &&
1569              (tp->tg3_flags2 & TG3_FLG2_MII_SERDES)))
1570                 return;
1571
1572         if (tp->tg3_flags3 & TG3_FLG3_PHY_IS_FET) {
1573                 tg3_phy_fet_toggle_apd(tp, enable);
1574                 return;
1575         }
1576
1577         reg = MII_TG3_MISC_SHDW_WREN |
1578               MII_TG3_MISC_SHDW_SCR5_SEL |
1579               MII_TG3_MISC_SHDW_SCR5_LPED |
1580               MII_TG3_MISC_SHDW_SCR5_DLPTLM |
1581               MII_TG3_MISC_SHDW_SCR5_SDTL |
1582               MII_TG3_MISC_SHDW_SCR5_C125OE;
1583         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5784 || !enable)
1584                 reg |= MII_TG3_MISC_SHDW_SCR5_DLLAPD;
1585
1586         tg3_writephy(tp, MII_TG3_MISC_SHDW, reg);
1587
1588
1589         reg = MII_TG3_MISC_SHDW_WREN |
1590               MII_TG3_MISC_SHDW_APD_SEL |
1591               MII_TG3_MISC_SHDW_APD_WKTM_84MS;
1592         if (enable)
1593                 reg |= MII_TG3_MISC_SHDW_APD_ENABLE;
1594
1595         tg3_writephy(tp, MII_TG3_MISC_SHDW, reg);
1596 }
1597
1598 static void tg3_phy_toggle_automdix(struct tg3 *tp, int enable)
1599 {
1600         u32 phy;
1601
1602         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS) ||
1603             (tp->tg3_flags2 & TG3_FLG2_ANY_SERDES))
1604                 return;
1605
1606         if (tp->tg3_flags3 & TG3_FLG3_PHY_IS_FET) {
1607                 u32 ephy;
1608
1609                 if (!tg3_readphy(tp, MII_TG3_FET_TEST, &ephy)) {
1610                         u32 reg = MII_TG3_FET_SHDW_MISCCTRL;
1611
1612                         tg3_writephy(tp, MII_TG3_FET_TEST,
1613                                      ephy | MII_TG3_FET_SHADOW_EN);
1614                         if (!tg3_readphy(tp, reg, &phy)) {
1615                                 if (enable)
1616                                         phy |= MII_TG3_FET_SHDW_MISCCTRL_MDIX;
1617                                 else
1618                                         phy &= ~MII_TG3_FET_SHDW_MISCCTRL_MDIX;
1619                                 tg3_writephy(tp, reg, phy);
1620                         }
1621                         tg3_writephy(tp, MII_TG3_FET_TEST, ephy);
1622                 }
1623         } else {
1624                 phy = MII_TG3_AUXCTL_MISC_RDSEL_MISC |
1625                       MII_TG3_AUXCTL_SHDWSEL_MISC;
1626                 if (!tg3_writephy(tp, MII_TG3_AUX_CTRL, phy) &&
1627                     !tg3_readphy(tp, MII_TG3_AUX_CTRL, &phy)) {
1628                         if (enable)
1629                                 phy |= MII_TG3_AUXCTL_MISC_FORCE_AMDIX;
1630                         else
1631                                 phy &= ~MII_TG3_AUXCTL_MISC_FORCE_AMDIX;
1632                         phy |= MII_TG3_AUXCTL_MISC_WREN;
1633                         tg3_writephy(tp, MII_TG3_AUX_CTRL, phy);
1634                 }
1635         }
1636 }
1637
1638 static void tg3_phy_set_wirespeed(struct tg3 *tp)
1639 {
1640         u32 val;
1641
1642         if (tp->tg3_flags2 & TG3_FLG2_NO_ETH_WIRE_SPEED)
1643                 return;
1644
1645         if (!tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x7007) &&
1646             !tg3_readphy(tp, MII_TG3_AUX_CTRL, &val))
1647                 tg3_writephy(tp, MII_TG3_AUX_CTRL,
1648                              (val | (1 << 15) | (1 << 4)));
1649 }
1650
1651 static void tg3_phy_apply_otp(struct tg3 *tp)
1652 {
1653         u32 otp, phy;
1654
1655         if (!tp->phy_otp)
1656                 return;
1657
1658         otp = tp->phy_otp;
1659
1660         /* Enable SM_DSP clock and tx 6dB coding. */
1661         phy = MII_TG3_AUXCTL_SHDWSEL_AUXCTL |
1662               MII_TG3_AUXCTL_ACTL_SMDSP_ENA |
1663               MII_TG3_AUXCTL_ACTL_TX_6DB;
1664         tg3_writephy(tp, MII_TG3_AUX_CTRL, phy);
1665
1666         phy = ((otp & TG3_OTP_AGCTGT_MASK) >> TG3_OTP_AGCTGT_SHIFT);
1667         phy |= MII_TG3_DSP_TAP1_AGCTGT_DFLT;
1668         tg3_phydsp_write(tp, MII_TG3_DSP_TAP1, phy);
1669
1670         phy = ((otp & TG3_OTP_HPFFLTR_MASK) >> TG3_OTP_HPFFLTR_SHIFT) |
1671               ((otp & TG3_OTP_HPFOVER_MASK) >> TG3_OTP_HPFOVER_SHIFT);
1672         tg3_phydsp_write(tp, MII_TG3_DSP_AADJ1CH0, phy);
1673
1674         phy = ((otp & TG3_OTP_LPFDIS_MASK) >> TG3_OTP_LPFDIS_SHIFT);
1675         phy |= MII_TG3_DSP_AADJ1CH3_ADCCKADJ;
1676         tg3_phydsp_write(tp, MII_TG3_DSP_AADJ1CH3, phy);
1677
1678         phy = ((otp & TG3_OTP_VDAC_MASK) >> TG3_OTP_VDAC_SHIFT);
1679         tg3_phydsp_write(tp, MII_TG3_DSP_EXP75, phy);
1680
1681         phy = ((otp & TG3_OTP_10BTAMP_MASK) >> TG3_OTP_10BTAMP_SHIFT);
1682         tg3_phydsp_write(tp, MII_TG3_DSP_EXP96, phy);
1683
1684         phy = ((otp & TG3_OTP_ROFF_MASK) >> TG3_OTP_ROFF_SHIFT) |
1685               ((otp & TG3_OTP_RCOFF_MASK) >> TG3_OTP_RCOFF_SHIFT);
1686         tg3_phydsp_write(tp, MII_TG3_DSP_EXP97, phy);
1687
1688         /* Turn off SM_DSP clock. */
1689         phy = MII_TG3_AUXCTL_SHDWSEL_AUXCTL |
1690               MII_TG3_AUXCTL_ACTL_TX_6DB;
1691         tg3_writephy(tp, MII_TG3_AUX_CTRL, phy);
1692 }
1693
1694 static int tg3_wait_macro_done(struct tg3 *tp)
1695 {
1696         int limit = 100;
1697
1698         while (limit--) {
1699                 u32 tmp32;
1700
1701                 if (!tg3_readphy(tp, 0x16, &tmp32)) {
1702                         if ((tmp32 & 0x1000) == 0)
1703                                 break;
1704                 }
1705         }
1706         if (limit < 0)
1707                 return -EBUSY;
1708
1709         return 0;
1710 }
1711
1712 static int tg3_phy_write_and_check_testpat(struct tg3 *tp, int *resetp)
1713 {
1714         static const u32 test_pat[4][6] = {
1715         { 0x00005555, 0x00000005, 0x00002aaa, 0x0000000a, 0x00003456, 0x00000003 },
1716         { 0x00002aaa, 0x0000000a, 0x00003333, 0x00000003, 0x0000789a, 0x00000005 },
1717         { 0x00005a5a, 0x00000005, 0x00002a6a, 0x0000000a, 0x00001bcd, 0x00000003 },
1718         { 0x00002a5a, 0x0000000a, 0x000033c3, 0x00000003, 0x00002ef1, 0x00000005 }
1719         };
1720         int chan;
1721
1722         for (chan = 0; chan < 4; chan++) {
1723                 int i;
1724
1725                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
1726                              (chan * 0x2000) | 0x0200);
1727                 tg3_writephy(tp, 0x16, 0x0002);
1728
1729                 for (i = 0; i < 6; i++)
1730                         tg3_writephy(tp, MII_TG3_DSP_RW_PORT,
1731                                      test_pat[chan][i]);
1732
1733                 tg3_writephy(tp, 0x16, 0x0202);
1734                 if (tg3_wait_macro_done(tp)) {
1735                         *resetp = 1;
1736                         return -EBUSY;
1737                 }
1738
1739                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
1740                              (chan * 0x2000) | 0x0200);
1741                 tg3_writephy(tp, 0x16, 0x0082);
1742                 if (tg3_wait_macro_done(tp)) {
1743                         *resetp = 1;
1744                         return -EBUSY;
1745                 }
1746
1747                 tg3_writephy(tp, 0x16, 0x0802);
1748                 if (tg3_wait_macro_done(tp)) {
1749                         *resetp = 1;
1750                         return -EBUSY;
1751                 }
1752
1753                 for (i = 0; i < 6; i += 2) {
1754                         u32 low, high;
1755
1756                         if (tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &low) ||
1757                             tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &high) ||
1758                             tg3_wait_macro_done(tp)) {
1759                                 *resetp = 1;
1760                                 return -EBUSY;
1761                         }
1762                         low &= 0x7fff;
1763                         high &= 0x000f;
1764                         if (low != test_pat[chan][i] ||
1765                             high != test_pat[chan][i+1]) {
1766                                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000b);
1767                                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x4001);
1768                                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x4005);
1769
1770                                 return -EBUSY;
1771                         }
1772                 }
1773         }
1774
1775         return 0;
1776 }
1777
1778 static int tg3_phy_reset_chanpat(struct tg3 *tp)
1779 {
1780         int chan;
1781
1782         for (chan = 0; chan < 4; chan++) {
1783                 int i;
1784
1785                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
1786                              (chan * 0x2000) | 0x0200);
1787                 tg3_writephy(tp, 0x16, 0x0002);
1788                 for (i = 0; i < 6; i++)
1789                         tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x000);
1790                 tg3_writephy(tp, 0x16, 0x0202);
1791                 if (tg3_wait_macro_done(tp))
1792                         return -EBUSY;
1793         }
1794
1795         return 0;
1796 }
1797
1798 static int tg3_phy_reset_5703_4_5(struct tg3 *tp)
1799 {
1800         u32 reg32, phy9_orig;
1801         int retries, do_phy_reset, err;
1802
1803         retries = 10;
1804         do_phy_reset = 1;
1805         do {
1806                 if (do_phy_reset) {
1807                         err = tg3_bmcr_reset(tp);
1808                         if (err)
1809                                 return err;
1810                         do_phy_reset = 0;
1811                 }
1812
1813                 /* Disable transmitter and interrupt.  */
1814                 if (tg3_readphy(tp, MII_TG3_EXT_CTRL, &reg32))
1815                         continue;
1816
1817                 reg32 |= 0x3000;
1818                 tg3_writephy(tp, MII_TG3_EXT_CTRL, reg32);
1819
1820                 /* Set full-duplex, 1000 mbps.  */
1821                 tg3_writephy(tp, MII_BMCR,
1822                              BMCR_FULLDPLX | TG3_BMCR_SPEED1000);
1823
1824                 /* Set to master mode.  */
1825                 if (tg3_readphy(tp, MII_TG3_CTRL, &phy9_orig))
1826                         continue;
1827
1828                 tg3_writephy(tp, MII_TG3_CTRL,
1829                              (MII_TG3_CTRL_AS_MASTER |
1830                               MII_TG3_CTRL_ENABLE_AS_MASTER));
1831
1832                 /* Enable SM_DSP_CLOCK and 6dB.  */
1833                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0c00);
1834
1835                 /* Block the PHY control access.  */
1836                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8005);
1837                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0800);
1838
1839                 err = tg3_phy_write_and_check_testpat(tp, &do_phy_reset);
1840                 if (!err)
1841                         break;
1842         } while (--retries);
1843
1844         err = tg3_phy_reset_chanpat(tp);
1845         if (err)
1846                 return err;
1847
1848         tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8005);
1849         tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0000);
1850
1851         tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8200);
1852         tg3_writephy(tp, 0x16, 0x0000);
1853
1854         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
1855             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
1856                 /* Set Extended packet length bit for jumbo frames */
1857                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x4400);
1858         }
1859         else {
1860                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0400);
1861         }
1862
1863         tg3_writephy(tp, MII_TG3_CTRL, phy9_orig);
1864
1865         if (!tg3_readphy(tp, MII_TG3_EXT_CTRL, &reg32)) {
1866                 reg32 &= ~0x3000;
1867                 tg3_writephy(tp, MII_TG3_EXT_CTRL, reg32);
1868         } else if (!err)
1869                 err = -EBUSY;
1870
1871         return err;
1872 }
1873
1874 /* This will reset the tigon3 PHY if there is no valid
1875  * link unless the FORCE argument is non-zero.
1876  */
1877 static int tg3_phy_reset(struct tg3 *tp)
1878 {
1879         u32 cpmuctrl;
1880         u32 phy_status;
1881         int err;
1882
1883         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
1884                 u32 val;
1885
1886                 val = tr32(GRC_MISC_CFG);
1887                 tw32_f(GRC_MISC_CFG, val & ~GRC_MISC_CFG_EPHY_IDDQ);
1888                 udelay(40);
1889         }
1890         err  = tg3_readphy(tp, MII_BMSR, &phy_status);
1891         err |= tg3_readphy(tp, MII_BMSR, &phy_status);
1892         if (err != 0)
1893                 return -EBUSY;
1894
1895         if (netif_running(tp->dev) && netif_carrier_ok(tp->dev)) {
1896                 netif_carrier_off(tp->dev);
1897                 tg3_link_report(tp);
1898         }
1899
1900         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
1901             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
1902             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
1903                 err = tg3_phy_reset_5703_4_5(tp);
1904                 if (err)
1905                         return err;
1906                 goto out;
1907         }
1908
1909         cpmuctrl = 0;
1910         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 &&
1911             GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5784_AX) {
1912                 cpmuctrl = tr32(TG3_CPMU_CTRL);
1913                 if (cpmuctrl & CPMU_CTRL_GPHY_10MB_RXONLY)
1914                         tw32(TG3_CPMU_CTRL,
1915                              cpmuctrl & ~CPMU_CTRL_GPHY_10MB_RXONLY);
1916         }
1917
1918         err = tg3_bmcr_reset(tp);
1919         if (err)
1920                 return err;
1921
1922         if (cpmuctrl & CPMU_CTRL_GPHY_10MB_RXONLY) {
1923                 u32 phy;
1924
1925                 phy = MII_TG3_DSP_EXP8_AEDW | MII_TG3_DSP_EXP8_REJ2MHz;
1926                 tg3_phydsp_write(tp, MII_TG3_DSP_EXP8, phy);
1927
1928                 tw32(TG3_CPMU_CTRL, cpmuctrl);
1929         }
1930
1931         if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5784_AX ||
1932             GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5761_AX) {
1933                 u32 val;
1934
1935                 val = tr32(TG3_CPMU_LSPD_1000MB_CLK);
1936                 if ((val & CPMU_LSPD_1000MB_MACCLK_MASK) ==
1937                     CPMU_LSPD_1000MB_MACCLK_12_5) {
1938                         val &= ~CPMU_LSPD_1000MB_MACCLK_MASK;
1939                         udelay(40);
1940                         tw32_f(TG3_CPMU_LSPD_1000MB_CLK, val);
1941                 }
1942         }
1943
1944         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 &&
1945             (tp->tg3_flags2 & TG3_FLG2_MII_SERDES))
1946                 return 0;
1947
1948         tg3_phy_apply_otp(tp);
1949
1950         if (tp->tg3_flags3 & TG3_FLG3_PHY_ENABLE_APD)
1951                 tg3_phy_toggle_apd(tp, true);
1952         else
1953                 tg3_phy_toggle_apd(tp, false);
1954
1955 out:
1956         if (tp->tg3_flags2 & TG3_FLG2_PHY_ADC_BUG) {
1957                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0c00);
1958                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x201f);
1959                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x2aaa);
1960                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000a);
1961                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0323);
1962                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0400);
1963         }
1964         if (tp->tg3_flags2 & TG3_FLG2_PHY_5704_A0_BUG) {
1965                 tg3_writephy(tp, 0x1c, 0x8d68);
1966                 tg3_writephy(tp, 0x1c, 0x8d68);
1967         }
1968         if (tp->tg3_flags2 & TG3_FLG2_PHY_BER_BUG) {
1969                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0c00);
1970                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000a);
1971                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x310b);
1972                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x201f);
1973                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x9506);
1974                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x401f);
1975                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x14e2);
1976                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0400);
1977         }
1978         else if (tp->tg3_flags2 & TG3_FLG2_PHY_JITTER_BUG) {
1979                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0c00);
1980                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000a);
1981                 if (tp->tg3_flags2 & TG3_FLG2_PHY_ADJUST_TRIM) {
1982                         tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x110b);
1983                         tg3_writephy(tp, MII_TG3_TEST1,
1984                                      MII_TG3_TEST1_TRIM_EN | 0x4);
1985                 } else
1986                         tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x010b);
1987                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0400);
1988         }
1989         /* Set Extended packet length bit (bit 14) on all chips that */
1990         /* support jumbo frames */
1991         if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) {
1992                 /* Cannot do read-modify-write on 5401 */
1993                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x4c20);
1994         } else if (tp->tg3_flags & TG3_FLAG_JUMBO_CAPABLE) {
1995                 u32 phy_reg;
1996
1997                 /* Set bit 14 with read-modify-write to preserve other bits */
1998                 if (!tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0007) &&
1999                     !tg3_readphy(tp, MII_TG3_AUX_CTRL, &phy_reg))
2000                         tg3_writephy(tp, MII_TG3_AUX_CTRL, phy_reg | 0x4000);
2001         }
2002
2003         /* Set phy register 0x10 bit 0 to high fifo elasticity to support
2004          * jumbo frames transmission.
2005          */
2006         if (tp->tg3_flags & TG3_FLAG_JUMBO_CAPABLE) {
2007                 u32 phy_reg;
2008
2009                 if (!tg3_readphy(tp, MII_TG3_EXT_CTRL, &phy_reg))
2010                     tg3_writephy(tp, MII_TG3_EXT_CTRL,
2011                                  phy_reg | MII_TG3_EXT_CTRL_FIFO_ELASTIC);
2012         }
2013
2014         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
2015                 /* adjust output voltage */
2016                 tg3_writephy(tp, MII_TG3_FET_PTEST, 0x12);
2017         }
2018
2019         tg3_phy_toggle_automdix(tp, 1);
2020         tg3_phy_set_wirespeed(tp);
2021         return 0;
2022 }
2023
2024 static void tg3_frob_aux_power(struct tg3 *tp)
2025 {
2026         struct tg3 *tp_peer = tp;
2027
2028         /* The GPIOs do something completely different on 57765. */
2029         if ((tp->tg3_flags2 & TG3_FLG2_IS_NIC) == 0 ||
2030             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765)
2031                 return;
2032
2033         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
2034             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714 ||
2035             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717) {
2036                 struct net_device *dev_peer;
2037
2038                 dev_peer = pci_get_drvdata(tp->pdev_peer);
2039                 /* remove_one() may have been run on the peer. */
2040                 if (!dev_peer)
2041                         tp_peer = tp;
2042                 else
2043                         tp_peer = netdev_priv(dev_peer);
2044         }
2045
2046         if ((tp->tg3_flags & TG3_FLAG_WOL_ENABLE) != 0 ||
2047             (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) != 0 ||
2048             (tp_peer->tg3_flags & TG3_FLAG_WOL_ENABLE) != 0 ||
2049             (tp_peer->tg3_flags & TG3_FLAG_ENABLE_ASF) != 0) {
2050                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
2051                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
2052                         tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
2053                                     (GRC_LCLCTRL_GPIO_OE0 |
2054                                      GRC_LCLCTRL_GPIO_OE1 |
2055                                      GRC_LCLCTRL_GPIO_OE2 |
2056                                      GRC_LCLCTRL_GPIO_OUTPUT0 |
2057                                      GRC_LCLCTRL_GPIO_OUTPUT1),
2058                                     100);
2059                 } else if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761 ||
2060                            tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761S) {
2061                         /* The 5761 non-e device swaps GPIO 0 and GPIO 2. */
2062                         u32 grc_local_ctrl = GRC_LCLCTRL_GPIO_OE0 |
2063                                              GRC_LCLCTRL_GPIO_OE1 |
2064                                              GRC_LCLCTRL_GPIO_OE2 |
2065                                              GRC_LCLCTRL_GPIO_OUTPUT0 |
2066                                              GRC_LCLCTRL_GPIO_OUTPUT1 |
2067                                              tp->grc_local_ctrl;
2068                         tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl, 100);
2069
2070                         grc_local_ctrl |= GRC_LCLCTRL_GPIO_OUTPUT2;
2071                         tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl, 100);
2072
2073                         grc_local_ctrl &= ~GRC_LCLCTRL_GPIO_OUTPUT0;
2074                         tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl, 100);
2075                 } else {
2076                         u32 no_gpio2;
2077                         u32 grc_local_ctrl = 0;
2078
2079                         if (tp_peer != tp &&
2080                             (tp_peer->tg3_flags & TG3_FLAG_INIT_COMPLETE) != 0)
2081                                 return;
2082
2083                         /* Workaround to prevent overdrawing Amps. */
2084                         if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
2085                             ASIC_REV_5714) {
2086                                 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE3;
2087                                 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
2088                                             grc_local_ctrl, 100);
2089                         }
2090
2091                         /* On 5753 and variants, GPIO2 cannot be used. */
2092                         no_gpio2 = tp->nic_sram_data_cfg &
2093                                     NIC_SRAM_DATA_CFG_NO_GPIO2;
2094
2095                         grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE0 |
2096                                          GRC_LCLCTRL_GPIO_OE1 |
2097                                          GRC_LCLCTRL_GPIO_OE2 |
2098                                          GRC_LCLCTRL_GPIO_OUTPUT1 |
2099                                          GRC_LCLCTRL_GPIO_OUTPUT2;
2100                         if (no_gpio2) {
2101                                 grc_local_ctrl &= ~(GRC_LCLCTRL_GPIO_OE2 |
2102                                                     GRC_LCLCTRL_GPIO_OUTPUT2);
2103                         }
2104                         tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
2105                                                     grc_local_ctrl, 100);
2106
2107                         grc_local_ctrl |= GRC_LCLCTRL_GPIO_OUTPUT0;
2108
2109                         tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
2110                                                     grc_local_ctrl, 100);
2111
2112                         if (!no_gpio2) {
2113                                 grc_local_ctrl &= ~GRC_LCLCTRL_GPIO_OUTPUT2;
2114                                 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
2115                                             grc_local_ctrl, 100);
2116                         }
2117                 }
2118         } else {
2119                 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
2120                     GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701) {
2121                         if (tp_peer != tp &&
2122                             (tp_peer->tg3_flags & TG3_FLAG_INIT_COMPLETE) != 0)
2123                                 return;
2124
2125                         tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
2126                                     (GRC_LCLCTRL_GPIO_OE1 |
2127                                      GRC_LCLCTRL_GPIO_OUTPUT1), 100);
2128
2129                         tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
2130                                     GRC_LCLCTRL_GPIO_OE1, 100);
2131
2132                         tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
2133                                     (GRC_LCLCTRL_GPIO_OE1 |
2134                                      GRC_LCLCTRL_GPIO_OUTPUT1), 100);
2135                 }
2136         }
2137 }
2138
2139 static int tg3_5700_link_polarity(struct tg3 *tp, u32 speed)
2140 {
2141         if (tp->led_ctrl == LED_CTRL_MODE_PHY_2)
2142                 return 1;
2143         else if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5411) {
2144                 if (speed != SPEED_10)
2145                         return 1;
2146         } else if (speed == SPEED_10)
2147                 return 1;
2148
2149         return 0;
2150 }
2151
2152 static int tg3_setup_phy(struct tg3 *, int);
2153
2154 #define RESET_KIND_SHUTDOWN     0
2155 #define RESET_KIND_INIT         1
2156 #define RESET_KIND_SUSPEND      2
2157
2158 static void tg3_write_sig_post_reset(struct tg3 *, int);
2159 static int tg3_halt_cpu(struct tg3 *, u32);
2160
2161 static void tg3_power_down_phy(struct tg3 *tp, bool do_low_power)
2162 {
2163         u32 val;
2164
2165         if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) {
2166                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
2167                         u32 sg_dig_ctrl = tr32(SG_DIG_CTRL);
2168                         u32 serdes_cfg = tr32(MAC_SERDES_CFG);
2169
2170                         sg_dig_ctrl |=
2171                                 SG_DIG_USING_HW_AUTONEG | SG_DIG_SOFT_RESET;
2172                         tw32(SG_DIG_CTRL, sg_dig_ctrl);
2173                         tw32(MAC_SERDES_CFG, serdes_cfg | (1 << 15));
2174                 }
2175                 return;
2176         }
2177
2178         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
2179                 tg3_bmcr_reset(tp);
2180                 val = tr32(GRC_MISC_CFG);
2181                 tw32_f(GRC_MISC_CFG, val | GRC_MISC_CFG_EPHY_IDDQ);
2182                 udelay(40);
2183                 return;
2184         } else if (tp->tg3_flags3 & TG3_FLG3_PHY_IS_FET) {
2185                 u32 phytest;
2186                 if (!tg3_readphy(tp, MII_TG3_FET_TEST, &phytest)) {
2187                         u32 phy;
2188
2189                         tg3_writephy(tp, MII_ADVERTISE, 0);
2190                         tg3_writephy(tp, MII_BMCR,
2191                                      BMCR_ANENABLE | BMCR_ANRESTART);
2192
2193                         tg3_writephy(tp, MII_TG3_FET_TEST,
2194                                      phytest | MII_TG3_FET_SHADOW_EN);
2195                         if (!tg3_readphy(tp, MII_TG3_FET_SHDW_AUXMODE4, &phy)) {
2196                                 phy |= MII_TG3_FET_SHDW_AUXMODE4_SBPD;
2197                                 tg3_writephy(tp,
2198                                              MII_TG3_FET_SHDW_AUXMODE4,
2199                                              phy);
2200                         }
2201                         tg3_writephy(tp, MII_TG3_FET_TEST, phytest);
2202                 }
2203                 return;
2204         } else if (do_low_power) {
2205                 tg3_writephy(tp, MII_TG3_EXT_CTRL,
2206                              MII_TG3_EXT_CTRL_FORCE_LED_OFF);
2207
2208                 tg3_writephy(tp, MII_TG3_AUX_CTRL,
2209                              MII_TG3_AUXCTL_SHDWSEL_PWRCTL |
2210                              MII_TG3_AUXCTL_PCTL_100TX_LPWR |
2211                              MII_TG3_AUXCTL_PCTL_SPR_ISOLATE |
2212                              MII_TG3_AUXCTL_PCTL_VREG_11V);
2213         }
2214
2215         /* The PHY should not be powered down on some chips because
2216          * of bugs.
2217          */
2218         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
2219             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
2220             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780 &&
2221              (tp->tg3_flags2 & TG3_FLG2_MII_SERDES)))
2222                 return;
2223
2224         if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5784_AX ||
2225             GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5761_AX) {
2226                 val = tr32(TG3_CPMU_LSPD_1000MB_CLK);
2227                 val &= ~CPMU_LSPD_1000MB_MACCLK_MASK;
2228                 val |= CPMU_LSPD_1000MB_MACCLK_12_5;
2229                 tw32_f(TG3_CPMU_LSPD_1000MB_CLK, val);
2230         }
2231
2232         tg3_writephy(tp, MII_BMCR, BMCR_PDOWN);
2233 }
2234
2235 /* tp->lock is held. */
2236 static int tg3_nvram_lock(struct tg3 *tp)
2237 {
2238         if (tp->tg3_flags & TG3_FLAG_NVRAM) {
2239                 int i;
2240
2241                 if (tp->nvram_lock_cnt == 0) {
2242                         tw32(NVRAM_SWARB, SWARB_REQ_SET1);
2243                         for (i = 0; i < 8000; i++) {
2244                                 if (tr32(NVRAM_SWARB) & SWARB_GNT1)
2245                                         break;
2246                                 udelay(20);
2247                         }
2248                         if (i == 8000) {
2249                                 tw32(NVRAM_SWARB, SWARB_REQ_CLR1);
2250                                 return -ENODEV;
2251                         }
2252                 }
2253                 tp->nvram_lock_cnt++;
2254         }
2255         return 0;
2256 }
2257
2258 /* tp->lock is held. */
2259 static void tg3_nvram_unlock(struct tg3 *tp)
2260 {
2261         if (tp->tg3_flags & TG3_FLAG_NVRAM) {
2262                 if (tp->nvram_lock_cnt > 0)
2263                         tp->nvram_lock_cnt--;
2264                 if (tp->nvram_lock_cnt == 0)
2265                         tw32_f(NVRAM_SWARB, SWARB_REQ_CLR1);
2266         }
2267 }
2268
2269 /* tp->lock is held. */
2270 static void tg3_enable_nvram_access(struct tg3 *tp)
2271 {
2272         if ((tp->tg3_flags2 & TG3_FLG2_5750_PLUS) &&
2273             !(tp->tg3_flags3 & TG3_FLG3_PROTECTED_NVRAM)) {
2274                 u32 nvaccess = tr32(NVRAM_ACCESS);
2275
2276                 tw32(NVRAM_ACCESS, nvaccess | ACCESS_ENABLE);
2277         }
2278 }
2279
2280 /* tp->lock is held. */
2281 static void tg3_disable_nvram_access(struct tg3 *tp)
2282 {
2283         if ((tp->tg3_flags2 & TG3_FLG2_5750_PLUS) &&
2284             !(tp->tg3_flags3 & TG3_FLG3_PROTECTED_NVRAM)) {
2285                 u32 nvaccess = tr32(NVRAM_ACCESS);
2286
2287                 tw32(NVRAM_ACCESS, nvaccess & ~ACCESS_ENABLE);
2288         }
2289 }
2290
2291 static int tg3_nvram_read_using_eeprom(struct tg3 *tp,
2292                                         u32 offset, u32 *val)
2293 {
2294         u32 tmp;
2295         int i;
2296
2297         if (offset > EEPROM_ADDR_ADDR_MASK || (offset % 4) != 0)
2298                 return -EINVAL;
2299
2300         tmp = tr32(GRC_EEPROM_ADDR) & ~(EEPROM_ADDR_ADDR_MASK |
2301                                         EEPROM_ADDR_DEVID_MASK |
2302                                         EEPROM_ADDR_READ);
2303         tw32(GRC_EEPROM_ADDR,
2304              tmp |
2305              (0 << EEPROM_ADDR_DEVID_SHIFT) |
2306              ((offset << EEPROM_ADDR_ADDR_SHIFT) &
2307               EEPROM_ADDR_ADDR_MASK) |
2308              EEPROM_ADDR_READ | EEPROM_ADDR_START);
2309
2310         for (i = 0; i < 1000; i++) {
2311                 tmp = tr32(GRC_EEPROM_ADDR);
2312
2313                 if (tmp & EEPROM_ADDR_COMPLETE)
2314                         break;
2315                 msleep(1);
2316         }
2317         if (!(tmp & EEPROM_ADDR_COMPLETE))
2318                 return -EBUSY;
2319
2320         tmp = tr32(GRC_EEPROM_DATA);
2321
2322         /*
2323          * The data will always be opposite the native endian
2324          * format.  Perform a blind byteswap to compensate.
2325          */
2326         *val = swab32(tmp);
2327
2328         return 0;
2329 }
2330
2331 #define NVRAM_CMD_TIMEOUT 10000
2332
2333 static int tg3_nvram_exec_cmd(struct tg3 *tp, u32 nvram_cmd)
2334 {
2335         int i;
2336
2337         tw32(NVRAM_CMD, nvram_cmd);
2338         for (i = 0; i < NVRAM_CMD_TIMEOUT; i++) {
2339                 udelay(10);
2340                 if (tr32(NVRAM_CMD) & NVRAM_CMD_DONE) {
2341                         udelay(10);
2342                         break;
2343                 }
2344         }
2345
2346         if (i == NVRAM_CMD_TIMEOUT)
2347                 return -EBUSY;
2348
2349         return 0;
2350 }
2351
2352 static u32 tg3_nvram_phys_addr(struct tg3 *tp, u32 addr)
2353 {
2354         if ((tp->tg3_flags & TG3_FLAG_NVRAM) &&
2355             (tp->tg3_flags & TG3_FLAG_NVRAM_BUFFERED) &&
2356             (tp->tg3_flags2 & TG3_FLG2_FLASH) &&
2357            !(tp->tg3_flags3 & TG3_FLG3_NO_NVRAM_ADDR_TRANS) &&
2358             (tp->nvram_jedecnum == JEDEC_ATMEL))
2359
2360                 addr = ((addr / tp->nvram_pagesize) <<
2361                         ATMEL_AT45DB0X1B_PAGE_POS) +
2362                        (addr % tp->nvram_pagesize);
2363
2364         return addr;
2365 }
2366
2367 static u32 tg3_nvram_logical_addr(struct tg3 *tp, u32 addr)
2368 {
2369         if ((tp->tg3_flags & TG3_FLAG_NVRAM) &&
2370             (tp->tg3_flags & TG3_FLAG_NVRAM_BUFFERED) &&
2371             (tp->tg3_flags2 & TG3_FLG2_FLASH) &&
2372            !(tp->tg3_flags3 & TG3_FLG3_NO_NVRAM_ADDR_TRANS) &&
2373             (tp->nvram_jedecnum == JEDEC_ATMEL))
2374
2375                 addr = ((addr >> ATMEL_AT45DB0X1B_PAGE_POS) *
2376                         tp->nvram_pagesize) +
2377                        (addr & ((1 << ATMEL_AT45DB0X1B_PAGE_POS) - 1));
2378
2379         return addr;
2380 }
2381
2382 /* NOTE: Data read in from NVRAM is byteswapped according to
2383  * the byteswapping settings for all other register accesses.
2384  * tg3 devices are BE devices, so on a BE machine, the data
2385  * returned will be exactly as it is seen in NVRAM.  On a LE
2386  * machine, the 32-bit value will be byteswapped.
2387  */
2388 static int tg3_nvram_read(struct tg3 *tp, u32 offset, u32 *val)
2389 {
2390         int ret;
2391
2392         if (!(tp->tg3_flags & TG3_FLAG_NVRAM))
2393                 return tg3_nvram_read_using_eeprom(tp, offset, val);
2394
2395         offset = tg3_nvram_phys_addr(tp, offset);
2396
2397         if (offset > NVRAM_ADDR_MSK)
2398                 return -EINVAL;
2399
2400         ret = tg3_nvram_lock(tp);
2401         if (ret)
2402                 return ret;
2403
2404         tg3_enable_nvram_access(tp);
2405
2406         tw32(NVRAM_ADDR, offset);
2407         ret = tg3_nvram_exec_cmd(tp, NVRAM_CMD_RD | NVRAM_CMD_GO |
2408                 NVRAM_CMD_FIRST | NVRAM_CMD_LAST | NVRAM_CMD_DONE);
2409
2410         if (ret == 0)
2411                 *val = tr32(NVRAM_RDDATA);
2412
2413         tg3_disable_nvram_access(tp);
2414
2415         tg3_nvram_unlock(tp);
2416
2417         return ret;
2418 }
2419
2420 /* Ensures NVRAM data is in bytestream format. */
2421 static int tg3_nvram_read_be32(struct tg3 *tp, u32 offset, __be32 *val)
2422 {
2423         u32 v;
2424         int res = tg3_nvram_read(tp, offset, &v);
2425         if (!res)
2426                 *val = cpu_to_be32(v);
2427         return res;
2428 }
2429
2430 /* tp->lock is held. */
2431 static void __tg3_set_mac_addr(struct tg3 *tp, int skip_mac_1)
2432 {
2433         u32 addr_high, addr_low;
2434         int i;
2435
2436         addr_high = ((tp->dev->dev_addr[0] << 8) |
2437                      tp->dev->dev_addr[1]);
2438         addr_low = ((tp->dev->dev_addr[2] << 24) |
2439                     (tp->dev->dev_addr[3] << 16) |
2440                     (tp->dev->dev_addr[4] <<  8) |
2441                     (tp->dev->dev_addr[5] <<  0));
2442         for (i = 0; i < 4; i++) {
2443                 if (i == 1 && skip_mac_1)
2444                         continue;
2445                 tw32(MAC_ADDR_0_HIGH + (i * 8), addr_high);
2446                 tw32(MAC_ADDR_0_LOW + (i * 8), addr_low);
2447         }
2448
2449         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
2450             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
2451                 for (i = 0; i < 12; i++) {
2452                         tw32(MAC_EXTADDR_0_HIGH + (i * 8), addr_high);
2453                         tw32(MAC_EXTADDR_0_LOW + (i * 8), addr_low);
2454                 }
2455         }
2456
2457         addr_high = (tp->dev->dev_addr[0] +
2458                      tp->dev->dev_addr[1] +
2459                      tp->dev->dev_addr[2] +
2460                      tp->dev->dev_addr[3] +
2461                      tp->dev->dev_addr[4] +
2462                      tp->dev->dev_addr[5]) &
2463                 TX_BACKOFF_SEED_MASK;
2464         tw32(MAC_TX_BACKOFF_SEED, addr_high);
2465 }
2466
2467 static int tg3_set_power_state(struct tg3 *tp, pci_power_t state)
2468 {
2469         u32 misc_host_ctrl;
2470         bool device_should_wake, do_low_power;
2471
2472         /* Make sure register accesses (indirect or otherwise)
2473          * will function correctly.
2474          */
2475         pci_write_config_dword(tp->pdev,
2476                                TG3PCI_MISC_HOST_CTRL,
2477                                tp->misc_host_ctrl);
2478
2479         switch (state) {
2480         case PCI_D0:
2481                 pci_enable_wake(tp->pdev, state, false);
2482                 pci_set_power_state(tp->pdev, PCI_D0);
2483
2484                 /* Switch out of Vaux if it is a NIC */
2485                 if (tp->tg3_flags2 & TG3_FLG2_IS_NIC)
2486                         tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl, 100);
2487
2488                 return 0;
2489
2490         case PCI_D1:
2491         case PCI_D2:
2492         case PCI_D3hot:
2493                 break;
2494
2495         default:
2496                 netdev_err(tp->dev, "Invalid power state (D%d) requested\n",
2497                            state);
2498                 return -EINVAL;
2499         }
2500
2501         /* Restore the CLKREQ setting. */
2502         if (tp->tg3_flags3 & TG3_FLG3_CLKREQ_BUG) {
2503                 u16 lnkctl;
2504
2505                 pci_read_config_word(tp->pdev,
2506                                      tp->pcie_cap + PCI_EXP_LNKCTL,
2507                                      &lnkctl);
2508                 lnkctl |= PCI_EXP_LNKCTL_CLKREQ_EN;
2509                 pci_write_config_word(tp->pdev,
2510                                       tp->pcie_cap + PCI_EXP_LNKCTL,
2511                                       lnkctl);
2512         }
2513
2514         misc_host_ctrl = tr32(TG3PCI_MISC_HOST_CTRL);
2515         tw32(TG3PCI_MISC_HOST_CTRL,
2516              misc_host_ctrl | MISC_HOST_CTRL_MASK_PCI_INT);
2517
2518         device_should_wake = pci_pme_capable(tp->pdev, state) &&
2519                              device_may_wakeup(&tp->pdev->dev) &&
2520                              (tp->tg3_flags & TG3_FLAG_WOL_ENABLE);
2521
2522         if (tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB) {
2523                 do_low_power = false;
2524                 if ((tp->tg3_flags3 & TG3_FLG3_PHY_CONNECTED) &&
2525                     !tp->link_config.phy_is_low_power) {
2526                         struct phy_device *phydev;
2527                         u32 phyid, advertising;
2528
2529                         phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
2530
2531                         tp->link_config.phy_is_low_power = 1;
2532
2533                         tp->link_config.orig_speed = phydev->speed;
2534                         tp->link_config.orig_duplex = phydev->duplex;
2535                         tp->link_config.orig_autoneg = phydev->autoneg;
2536                         tp->link_config.orig_advertising = phydev->advertising;
2537
2538                         advertising = ADVERTISED_TP |
2539                                       ADVERTISED_Pause |
2540                                       ADVERTISED_Autoneg |
2541                                       ADVERTISED_10baseT_Half;
2542
2543                         if ((tp->tg3_flags & TG3_FLAG_ENABLE_ASF) ||
2544                             device_should_wake) {
2545                                 if (tp->tg3_flags & TG3_FLAG_WOL_SPEED_100MB)
2546                                         advertising |=
2547                                                 ADVERTISED_100baseT_Half |
2548                                                 ADVERTISED_100baseT_Full |
2549                                                 ADVERTISED_10baseT_Full;
2550                                 else
2551                                         advertising |= ADVERTISED_10baseT_Full;
2552                         }
2553
2554                         phydev->advertising = advertising;
2555
2556                         phy_start_aneg(phydev);
2557
2558                         phyid = phydev->drv->phy_id & phydev->drv->phy_id_mask;
2559                         if (phyid != PHY_ID_BCMAC131) {
2560                                 phyid &= PHY_BCM_OUI_MASK;
2561                                 if (phyid == PHY_BCM_OUI_1 ||
2562                                     phyid == PHY_BCM_OUI_2 ||
2563                                     phyid == PHY_BCM_OUI_3)
2564                                         do_low_power = true;
2565                         }
2566                 }
2567         } else {
2568                 do_low_power = true;
2569
2570                 if (tp->link_config.phy_is_low_power == 0) {
2571                         tp->link_config.phy_is_low_power = 1;
2572                         tp->link_config.orig_speed = tp->link_config.speed;
2573                         tp->link_config.orig_duplex = tp->link_config.duplex;
2574                         tp->link_config.orig_autoneg = tp->link_config.autoneg;
2575                 }
2576
2577                 if (!(tp->tg3_flags2 & TG3_FLG2_ANY_SERDES)) {
2578                         tp->link_config.speed = SPEED_10;
2579                         tp->link_config.duplex = DUPLEX_HALF;
2580                         tp->link_config.autoneg = AUTONEG_ENABLE;
2581                         tg3_setup_phy(tp, 0);
2582                 }
2583         }
2584
2585         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
2586                 u32 val;
2587
2588                 val = tr32(GRC_VCPU_EXT_CTRL);
2589                 tw32(GRC_VCPU_EXT_CTRL, val | GRC_VCPU_EXT_CTRL_DISABLE_WOL);
2590         } else if (!(tp->tg3_flags & TG3_FLAG_ENABLE_ASF)) {
2591                 int i;
2592                 u32 val;
2593
2594                 for (i = 0; i < 200; i++) {
2595                         tg3_read_mem(tp, NIC_SRAM_FW_ASF_STATUS_MBOX, &val);
2596                         if (val == ~NIC_SRAM_FIRMWARE_MBOX_MAGIC1)
2597                                 break;
2598                         msleep(1);
2599                 }
2600         }
2601         if (tp->tg3_flags & TG3_FLAG_WOL_CAP)
2602                 tg3_write_mem(tp, NIC_SRAM_WOL_MBOX, WOL_SIGNATURE |
2603                                                      WOL_DRV_STATE_SHUTDOWN |
2604                                                      WOL_DRV_WOL |
2605                                                      WOL_SET_MAGIC_PKT);
2606
2607         if (device_should_wake) {
2608                 u32 mac_mode;
2609
2610                 if (!(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)) {
2611                         if (do_low_power) {
2612                                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x5a);
2613                                 udelay(40);
2614                         }
2615
2616                         if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES)
2617                                 mac_mode = MAC_MODE_PORT_MODE_GMII;
2618                         else
2619                                 mac_mode = MAC_MODE_PORT_MODE_MII;
2620
2621                         mac_mode |= tp->mac_mode & MAC_MODE_LINK_POLARITY;
2622                         if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
2623                             ASIC_REV_5700) {
2624                                 u32 speed = (tp->tg3_flags &
2625                                              TG3_FLAG_WOL_SPEED_100MB) ?
2626                                              SPEED_100 : SPEED_10;
2627                                 if (tg3_5700_link_polarity(tp, speed))
2628                                         mac_mode |= MAC_MODE_LINK_POLARITY;
2629                                 else
2630                                         mac_mode &= ~MAC_MODE_LINK_POLARITY;
2631                         }
2632                 } else {
2633                         mac_mode = MAC_MODE_PORT_MODE_TBI;
2634                 }
2635
2636                 if (!(tp->tg3_flags2 & TG3_FLG2_5750_PLUS))
2637                         tw32(MAC_LED_CTRL, tp->led_ctrl);
2638
2639                 mac_mode |= MAC_MODE_MAGIC_PKT_ENABLE;
2640                 if (((tp->tg3_flags2 & TG3_FLG2_5705_PLUS) &&
2641                     !(tp->tg3_flags2 & TG3_FLG2_5780_CLASS)) &&
2642                     ((tp->tg3_flags & TG3_FLAG_ENABLE_ASF) ||
2643                      (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE)))
2644                         mac_mode |= MAC_MODE_KEEP_FRAME_IN_WOL;
2645
2646                 if (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE) {
2647                         mac_mode |= tp->mac_mode &
2648                                     (MAC_MODE_APE_TX_EN | MAC_MODE_APE_RX_EN);
2649                         if (mac_mode & MAC_MODE_APE_TX_EN)
2650                                 mac_mode |= MAC_MODE_TDE_ENABLE;
2651                 }
2652
2653                 tw32_f(MAC_MODE, mac_mode);
2654                 udelay(100);
2655
2656                 tw32_f(MAC_RX_MODE, RX_MODE_ENABLE);
2657                 udelay(10);
2658         }
2659
2660         if (!(tp->tg3_flags & TG3_FLAG_WOL_SPEED_100MB) &&
2661             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
2662              GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)) {
2663                 u32 base_val;
2664
2665                 base_val = tp->pci_clock_ctrl;
2666                 base_val |= (CLOCK_CTRL_RXCLK_DISABLE |
2667                              CLOCK_CTRL_TXCLK_DISABLE);
2668
2669                 tw32_wait_f(TG3PCI_CLOCK_CTRL, base_val | CLOCK_CTRL_ALTCLK |
2670                             CLOCK_CTRL_PWRDOWN_PLL133, 40);
2671         } else if ((tp->tg3_flags2 & TG3_FLG2_5780_CLASS) ||
2672                    (tp->tg3_flags & TG3_FLAG_CPMU_PRESENT) ||
2673                    (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)) {
2674                 /* do nothing */
2675         } else if (!((tp->tg3_flags2 & TG3_FLG2_5750_PLUS) &&
2676                      (tp->tg3_flags & TG3_FLAG_ENABLE_ASF))) {
2677                 u32 newbits1, newbits2;
2678
2679                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
2680                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
2681                         newbits1 = (CLOCK_CTRL_RXCLK_DISABLE |
2682                                     CLOCK_CTRL_TXCLK_DISABLE |
2683                                     CLOCK_CTRL_ALTCLK);
2684                         newbits2 = newbits1 | CLOCK_CTRL_44MHZ_CORE;
2685                 } else if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
2686                         newbits1 = CLOCK_CTRL_625_CORE;
2687                         newbits2 = newbits1 | CLOCK_CTRL_ALTCLK;
2688                 } else {
2689                         newbits1 = CLOCK_CTRL_ALTCLK;
2690                         newbits2 = newbits1 | CLOCK_CTRL_44MHZ_CORE;
2691                 }
2692
2693                 tw32_wait_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl | newbits1,
2694                             40);
2695
2696                 tw32_wait_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl | newbits2,
2697                             40);
2698
2699                 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
2700                         u32 newbits3;
2701
2702                         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
2703                             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
2704                                 newbits3 = (CLOCK_CTRL_RXCLK_DISABLE |
2705                                             CLOCK_CTRL_TXCLK_DISABLE |
2706                                             CLOCK_CTRL_44MHZ_CORE);
2707                         } else {
2708                                 newbits3 = CLOCK_CTRL_44MHZ_CORE;
2709                         }
2710
2711                         tw32_wait_f(TG3PCI_CLOCK_CTRL,
2712                                     tp->pci_clock_ctrl | newbits3, 40);
2713                 }
2714         }
2715
2716         if (!(device_should_wake) &&
2717             !(tp->tg3_flags & TG3_FLAG_ENABLE_ASF))
2718                 tg3_power_down_phy(tp, do_low_power);
2719
2720         tg3_frob_aux_power(tp);
2721
2722         /* Workaround for unstable PLL clock */
2723         if ((GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_AX) ||
2724             (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_BX)) {
2725                 u32 val = tr32(0x7d00);
2726
2727                 val &= ~((1 << 16) | (1 << 4) | (1 << 2) | (1 << 1) | 1);
2728                 tw32(0x7d00, val);
2729                 if (!(tp->tg3_flags & TG3_FLAG_ENABLE_ASF)) {
2730                         int err;
2731
2732                         err = tg3_nvram_lock(tp);
2733                         tg3_halt_cpu(tp, RX_CPU_BASE);
2734                         if (!err)
2735                                 tg3_nvram_unlock(tp);
2736                 }
2737         }
2738
2739         tg3_write_sig_post_reset(tp, RESET_KIND_SHUTDOWN);
2740
2741         if (device_should_wake)
2742                 pci_enable_wake(tp->pdev, state, true);
2743
2744         /* Finally, set the new power state. */
2745         pci_set_power_state(tp->pdev, state);
2746
2747         return 0;
2748 }
2749
2750 static void tg3_aux_stat_to_speed_duplex(struct tg3 *tp, u32 val, u16 *speed, u8 *duplex)
2751 {
2752         switch (val & MII_TG3_AUX_STAT_SPDMASK) {
2753         case MII_TG3_AUX_STAT_10HALF:
2754                 *speed = SPEED_10;
2755                 *duplex = DUPLEX_HALF;
2756                 break;
2757
2758         case MII_TG3_AUX_STAT_10FULL:
2759                 *speed = SPEED_10;
2760                 *duplex = DUPLEX_FULL;
2761                 break;
2762
2763         case MII_TG3_AUX_STAT_100HALF:
2764                 *speed = SPEED_100;
2765                 *duplex = DUPLEX_HALF;
2766                 break;
2767
2768         case MII_TG3_AUX_STAT_100FULL:
2769                 *speed = SPEED_100;
2770                 *duplex = DUPLEX_FULL;
2771                 break;
2772
2773         case MII_TG3_AUX_STAT_1000HALF:
2774                 *speed = SPEED_1000;
2775                 *duplex = DUPLEX_HALF;
2776                 break;
2777
2778         case MII_TG3_AUX_STAT_1000FULL:
2779                 *speed = SPEED_1000;
2780                 *duplex = DUPLEX_FULL;
2781                 break;
2782
2783         default:
2784                 if (tp->tg3_flags3 & TG3_FLG3_PHY_IS_FET) {
2785                         *speed = (val & MII_TG3_AUX_STAT_100) ? SPEED_100 :
2786                                  SPEED_10;
2787                         *duplex = (val & MII_TG3_AUX_STAT_FULL) ? DUPLEX_FULL :
2788                                   DUPLEX_HALF;
2789                         break;
2790                 }
2791                 *speed = SPEED_INVALID;
2792                 *duplex = DUPLEX_INVALID;
2793                 break;
2794         }
2795 }
2796
2797 static void tg3_phy_copper_begin(struct tg3 *tp)
2798 {
2799         u32 new_adv;
2800         int i;
2801
2802         if (tp->link_config.phy_is_low_power) {
2803                 /* Entering low power mode.  Disable gigabit and
2804                  * 100baseT advertisements.
2805                  */
2806                 tg3_writephy(tp, MII_TG3_CTRL, 0);
2807
2808                 new_adv = (ADVERTISE_10HALF | ADVERTISE_10FULL |
2809                            ADVERTISE_CSMA | ADVERTISE_PAUSE_CAP);
2810                 if (tp->tg3_flags & TG3_FLAG_WOL_SPEED_100MB)
2811                         new_adv |= (ADVERTISE_100HALF | ADVERTISE_100FULL);
2812
2813                 tg3_writephy(tp, MII_ADVERTISE, new_adv);
2814         } else if (tp->link_config.speed == SPEED_INVALID) {
2815                 if (tp->tg3_flags & TG3_FLAG_10_100_ONLY)
2816                         tp->link_config.advertising &=
2817                                 ~(ADVERTISED_1000baseT_Half |
2818                                   ADVERTISED_1000baseT_Full);
2819
2820                 new_adv = ADVERTISE_CSMA;
2821                 if (tp->link_config.advertising & ADVERTISED_10baseT_Half)
2822                         new_adv |= ADVERTISE_10HALF;
2823                 if (tp->link_config.advertising & ADVERTISED_10baseT_Full)
2824                         new_adv |= ADVERTISE_10FULL;
2825                 if (tp->link_config.advertising & ADVERTISED_100baseT_Half)
2826                         new_adv |= ADVERTISE_100HALF;
2827                 if (tp->link_config.advertising & ADVERTISED_100baseT_Full)
2828                         new_adv |= ADVERTISE_100FULL;
2829
2830                 new_adv |= tg3_advert_flowctrl_1000T(tp->link_config.flowctrl);
2831
2832                 tg3_writephy(tp, MII_ADVERTISE, new_adv);
2833
2834                 if (tp->link_config.advertising &
2835                     (ADVERTISED_1000baseT_Half | ADVERTISED_1000baseT_Full)) {
2836                         new_adv = 0;
2837                         if (tp->link_config.advertising & ADVERTISED_1000baseT_Half)
2838                                 new_adv |= MII_TG3_CTRL_ADV_1000_HALF;
2839                         if (tp->link_config.advertising & ADVERTISED_1000baseT_Full)
2840                                 new_adv |= MII_TG3_CTRL_ADV_1000_FULL;
2841                         if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY) &&
2842                             (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
2843                              tp->pci_chip_rev_id == CHIPREV_ID_5701_B0))
2844                                 new_adv |= (MII_TG3_CTRL_AS_MASTER |
2845                                             MII_TG3_CTRL_ENABLE_AS_MASTER);
2846                         tg3_writephy(tp, MII_TG3_CTRL, new_adv);
2847                 } else {
2848                         tg3_writephy(tp, MII_TG3_CTRL, 0);
2849                 }
2850         } else {
2851                 new_adv = tg3_advert_flowctrl_1000T(tp->link_config.flowctrl);
2852                 new_adv |= ADVERTISE_CSMA;
2853
2854                 /* Asking for a specific link mode. */
2855                 if (tp->link_config.speed == SPEED_1000) {
2856                         tg3_writephy(tp, MII_ADVERTISE, new_adv);
2857
2858                         if (tp->link_config.duplex == DUPLEX_FULL)
2859                                 new_adv = MII_TG3_CTRL_ADV_1000_FULL;
2860                         else
2861                                 new_adv = MII_TG3_CTRL_ADV_1000_HALF;
2862                         if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
2863                             tp->pci_chip_rev_id == CHIPREV_ID_5701_B0)
2864                                 new_adv |= (MII_TG3_CTRL_AS_MASTER |
2865                                             MII_TG3_CTRL_ENABLE_AS_MASTER);
2866                 } else {
2867                         if (tp->link_config.speed == SPEED_100) {
2868                                 if (tp->link_config.duplex == DUPLEX_FULL)
2869                                         new_adv |= ADVERTISE_100FULL;
2870                                 else
2871                                         new_adv |= ADVERTISE_100HALF;
2872                         } else {
2873                                 if (tp->link_config.duplex == DUPLEX_FULL)
2874                                         new_adv |= ADVERTISE_10FULL;
2875                                 else
2876                                         new_adv |= ADVERTISE_10HALF;
2877                         }
2878                         tg3_writephy(tp, MII_ADVERTISE, new_adv);
2879
2880                         new_adv = 0;
2881                 }
2882
2883                 tg3_writephy(tp, MII_TG3_CTRL, new_adv);
2884         }
2885
2886         if (tp->link_config.autoneg == AUTONEG_DISABLE &&
2887             tp->link_config.speed != SPEED_INVALID) {
2888                 u32 bmcr, orig_bmcr;
2889
2890                 tp->link_config.active_speed = tp->link_config.speed;
2891                 tp->link_config.active_duplex = tp->link_config.duplex;
2892
2893                 bmcr = 0;
2894                 switch (tp->link_config.speed) {
2895                 default:
2896                 case SPEED_10:
2897                         break;
2898
2899                 case SPEED_100:
2900                         bmcr |= BMCR_SPEED100;
2901                         break;
2902
2903                 case SPEED_1000:
2904                         bmcr |= TG3_BMCR_SPEED1000;
2905                         break;
2906                 }
2907
2908                 if (tp->link_config.duplex == DUPLEX_FULL)
2909                         bmcr |= BMCR_FULLDPLX;
2910
2911                 if (!tg3_readphy(tp, MII_BMCR, &orig_bmcr) &&
2912                     (bmcr != orig_bmcr)) {
2913                         tg3_writephy(tp, MII_BMCR, BMCR_LOOPBACK);
2914                         for (i = 0; i < 1500; i++) {
2915                                 u32 tmp;
2916
2917                                 udelay(10);
2918                                 if (tg3_readphy(tp, MII_BMSR, &tmp) ||
2919                                     tg3_readphy(tp, MII_BMSR, &tmp))
2920                                         continue;
2921                                 if (!(tmp & BMSR_LSTATUS)) {
2922                                         udelay(40);
2923                                         break;
2924                                 }
2925                         }
2926                         tg3_writephy(tp, MII_BMCR, bmcr);
2927                         udelay(40);
2928                 }
2929         } else {
2930                 tg3_writephy(tp, MII_BMCR,
2931                              BMCR_ANENABLE | BMCR_ANRESTART);
2932         }
2933 }
2934
2935 static int tg3_init_5401phy_dsp(struct tg3 *tp)
2936 {
2937         int err;
2938
2939         /* Turn off tap power management. */
2940         /* Set Extended packet length bit */
2941         err  = tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x4c20);
2942
2943         err |= tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x0012);
2944         err |= tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x1804);
2945
2946         err |= tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x0013);
2947         err |= tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x1204);
2948
2949         err |= tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8006);
2950         err |= tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0132);
2951
2952         err |= tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8006);
2953         err |= tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0232);
2954
2955         err |= tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x201f);
2956         err |= tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0a20);
2957
2958         udelay(40);
2959
2960         return err;
2961 }
2962
2963 static int tg3_copper_is_advertising_all(struct tg3 *tp, u32 mask)
2964 {
2965         u32 adv_reg, all_mask = 0;
2966
2967         if (mask & ADVERTISED_10baseT_Half)
2968                 all_mask |= ADVERTISE_10HALF;
2969         if (mask & ADVERTISED_10baseT_Full)
2970                 all_mask |= ADVERTISE_10FULL;
2971         if (mask & ADVERTISED_100baseT_Half)
2972                 all_mask |= ADVERTISE_100HALF;
2973         if (mask & ADVERTISED_100baseT_Full)
2974                 all_mask |= ADVERTISE_100FULL;
2975
2976         if (tg3_readphy(tp, MII_ADVERTISE, &adv_reg))
2977                 return 0;
2978
2979         if ((adv_reg & all_mask) != all_mask)
2980                 return 0;
2981         if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY)) {
2982                 u32 tg3_ctrl;
2983
2984                 all_mask = 0;
2985                 if (mask & ADVERTISED_1000baseT_Half)
2986                         all_mask |= ADVERTISE_1000HALF;
2987                 if (mask & ADVERTISED_1000baseT_Full)
2988                         all_mask |= ADVERTISE_1000FULL;
2989
2990                 if (tg3_readphy(tp, MII_TG3_CTRL, &tg3_ctrl))
2991                         return 0;
2992
2993                 if ((tg3_ctrl & all_mask) != all_mask)
2994                         return 0;
2995         }
2996         return 1;
2997 }
2998
2999 static int tg3_adv_1000T_flowctrl_ok(struct tg3 *tp, u32 *lcladv, u32 *rmtadv)
3000 {
3001         u32 curadv, reqadv;
3002
3003         if (tg3_readphy(tp, MII_ADVERTISE, lcladv))
3004                 return 1;
3005
3006         curadv = *lcladv & (ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM);
3007         reqadv = tg3_advert_flowctrl_1000T(tp->link_config.flowctrl);
3008
3009         if (tp->link_config.active_duplex == DUPLEX_FULL) {
3010                 if (curadv != reqadv)
3011                         return 0;
3012
3013                 if (tp->tg3_flags & TG3_FLAG_PAUSE_AUTONEG)
3014                         tg3_readphy(tp, MII_LPA, rmtadv);
3015         } else {
3016                 /* Reprogram the advertisement register, even if it
3017                  * does not affect the current link.  If the link
3018                  * gets renegotiated in the future, we can save an
3019                  * additional renegotiation cycle by advertising
3020                  * it correctly in the first place.
3021                  */
3022                 if (curadv != reqadv) {
3023                         *lcladv &= ~(ADVERTISE_PAUSE_CAP |
3024                                      ADVERTISE_PAUSE_ASYM);
3025                         tg3_writephy(tp, MII_ADVERTISE, *lcladv | reqadv);
3026                 }
3027         }
3028
3029         return 1;
3030 }
3031
3032 static int tg3_setup_copper_phy(struct tg3 *tp, int force_reset)
3033 {
3034         int current_link_up;
3035         u32 bmsr, dummy;
3036         u32 lcl_adv, rmt_adv;
3037         u16 current_speed;
3038         u8 current_duplex;
3039         int i, err;
3040
3041         tw32(MAC_EVENT, 0);
3042
3043         tw32_f(MAC_STATUS,
3044              (MAC_STATUS_SYNC_CHANGED |
3045               MAC_STATUS_CFG_CHANGED |
3046               MAC_STATUS_MI_COMPLETION |
3047               MAC_STATUS_LNKSTATE_CHANGED));
3048         udelay(40);
3049
3050         if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
3051                 tw32_f(MAC_MI_MODE,
3052                      (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
3053                 udelay(80);
3054         }
3055
3056         tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x02);
3057
3058         /* Some third-party PHYs need to be reset on link going
3059          * down.
3060          */
3061         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
3062              GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
3063              GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) &&
3064             netif_carrier_ok(tp->dev)) {
3065                 tg3_readphy(tp, MII_BMSR, &bmsr);
3066                 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
3067                     !(bmsr & BMSR_LSTATUS))
3068                         force_reset = 1;
3069         }
3070         if (force_reset)
3071                 tg3_phy_reset(tp);
3072
3073         if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) {
3074                 tg3_readphy(tp, MII_BMSR, &bmsr);
3075                 if (tg3_readphy(tp, MII_BMSR, &bmsr) ||
3076                     !(tp->tg3_flags & TG3_FLAG_INIT_COMPLETE))
3077                         bmsr = 0;
3078
3079                 if (!(bmsr & BMSR_LSTATUS)) {
3080                         err = tg3_init_5401phy_dsp(tp);
3081                         if (err)
3082                                 return err;
3083
3084                         tg3_readphy(tp, MII_BMSR, &bmsr);
3085                         for (i = 0; i < 1000; i++) {
3086                                 udelay(10);
3087                                 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
3088                                     (bmsr & BMSR_LSTATUS)) {
3089                                         udelay(40);
3090                                         break;
3091                                 }
3092                         }
3093
3094                         if ((tp->phy_id & TG3_PHY_ID_REV_MASK) ==
3095                             TG3_PHY_REV_BCM5401_B0 &&
3096                             !(bmsr & BMSR_LSTATUS) &&
3097                             tp->link_config.active_speed == SPEED_1000) {
3098                                 err = tg3_phy_reset(tp);
3099                                 if (!err)
3100                                         err = tg3_init_5401phy_dsp(tp);
3101                                 if (err)
3102                                         return err;
3103                         }
3104                 }
3105         } else if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
3106                    tp->pci_chip_rev_id == CHIPREV_ID_5701_B0) {
3107                 /* 5701 {A0,B0} CRC bug workaround */
3108                 tg3_writephy(tp, 0x15, 0x0a75);
3109                 tg3_writephy(tp, 0x1c, 0x8c68);
3110                 tg3_writephy(tp, 0x1c, 0x8d68);
3111                 tg3_writephy(tp, 0x1c, 0x8c68);
3112         }
3113
3114         /* Clear pending interrupts... */
3115         tg3_readphy(tp, MII_TG3_ISTAT, &dummy);
3116         tg3_readphy(tp, MII_TG3_ISTAT, &dummy);
3117
3118         if (tp->tg3_flags & TG3_FLAG_USE_MI_INTERRUPT)
3119                 tg3_writephy(tp, MII_TG3_IMASK, ~MII_TG3_INT_LINKCHG);
3120         else if (!(tp->tg3_flags3 & TG3_FLG3_PHY_IS_FET))
3121                 tg3_writephy(tp, MII_TG3_IMASK, ~0);
3122
3123         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
3124             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
3125                 if (tp->led_ctrl == LED_CTRL_MODE_PHY_1)
3126                         tg3_writephy(tp, MII_TG3_EXT_CTRL,
3127                                      MII_TG3_EXT_CTRL_LNK3_LED_MODE);
3128                 else
3129                         tg3_writephy(tp, MII_TG3_EXT_CTRL, 0);
3130         }
3131
3132         current_link_up = 0;
3133         current_speed = SPEED_INVALID;
3134         current_duplex = DUPLEX_INVALID;
3135
3136         if (tp->tg3_flags2 & TG3_FLG2_CAPACITIVE_COUPLING) {
3137                 u32 val;
3138
3139                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x4007);
3140                 tg3_readphy(tp, MII_TG3_AUX_CTRL, &val);
3141                 if (!(val & (1 << 10))) {
3142                         val |= (1 << 10);
3143                         tg3_writephy(tp, MII_TG3_AUX_CTRL, val);
3144                         goto relink;
3145                 }
3146         }
3147
3148         bmsr = 0;
3149         for (i = 0; i < 100; i++) {
3150                 tg3_readphy(tp, MII_BMSR, &bmsr);
3151                 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
3152                     (bmsr & BMSR_LSTATUS))
3153                         break;
3154                 udelay(40);
3155         }
3156
3157         if (bmsr & BMSR_LSTATUS) {
3158                 u32 aux_stat, bmcr;
3159
3160                 tg3_readphy(tp, MII_TG3_AUX_STAT, &aux_stat);
3161                 for (i = 0; i < 2000; i++) {
3162                         udelay(10);
3163                         if (!tg3_readphy(tp, MII_TG3_AUX_STAT, &aux_stat) &&
3164                             aux_stat)
3165                                 break;
3166                 }
3167
3168                 tg3_aux_stat_to_speed_duplex(tp, aux_stat,
3169                                              &current_speed,
3170                                              &current_duplex);
3171
3172                 bmcr = 0;
3173                 for (i = 0; i < 200; i++) {
3174                         tg3_readphy(tp, MII_BMCR, &bmcr);
3175                         if (tg3_readphy(tp, MII_BMCR, &bmcr))
3176                                 continue;
3177                         if (bmcr && bmcr != 0x7fff)
3178                                 break;
3179                         udelay(10);
3180                 }
3181
3182                 lcl_adv = 0;
3183                 rmt_adv = 0;
3184
3185                 tp->link_config.active_speed = current_speed;
3186                 tp->link_config.active_duplex = current_duplex;
3187
3188                 if (tp->link_config.autoneg == AUTONEG_ENABLE) {
3189                         if ((bmcr & BMCR_ANENABLE) &&
3190                             tg3_copper_is_advertising_all(tp,
3191                                                 tp->link_config.advertising)) {
3192                                 if (tg3_adv_1000T_flowctrl_ok(tp, &lcl_adv,
3193                                                                   &rmt_adv))
3194                                         current_link_up = 1;
3195                         }
3196                 } else {
3197                         if (!(bmcr & BMCR_ANENABLE) &&
3198                             tp->link_config.speed == current_speed &&
3199                             tp->link_config.duplex == current_duplex &&
3200                             tp->link_config.flowctrl ==
3201                             tp->link_config.active_flowctrl) {
3202                                 current_link_up = 1;
3203                         }
3204                 }
3205
3206                 if (current_link_up == 1 &&
3207                     tp->link_config.active_duplex == DUPLEX_FULL)
3208                         tg3_setup_flow_control(tp, lcl_adv, rmt_adv);
3209         }
3210
3211 relink:
3212         if (current_link_up == 0 || tp->link_config.phy_is_low_power) {
3213                 u32 tmp;
3214
3215                 tg3_phy_copper_begin(tp);
3216
3217                 tg3_readphy(tp, MII_BMSR, &tmp);
3218                 if (!tg3_readphy(tp, MII_BMSR, &tmp) &&
3219                     (tmp & BMSR_LSTATUS))
3220                         current_link_up = 1;
3221         }
3222
3223         tp->mac_mode &= ~MAC_MODE_PORT_MODE_MASK;
3224         if (current_link_up == 1) {
3225                 if (tp->link_config.active_speed == SPEED_100 ||
3226                     tp->link_config.active_speed == SPEED_10)
3227                         tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
3228                 else
3229                         tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
3230         } else if (tp->tg3_flags3 & TG3_FLG3_PHY_IS_FET)
3231                 tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
3232         else
3233                 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
3234
3235         tp->mac_mode &= ~MAC_MODE_HALF_DUPLEX;
3236         if (tp->link_config.active_duplex == DUPLEX_HALF)
3237                 tp->mac_mode |= MAC_MODE_HALF_DUPLEX;
3238
3239         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700) {
3240                 if (current_link_up == 1 &&
3241                     tg3_5700_link_polarity(tp, tp->link_config.active_speed))
3242                         tp->mac_mode |= MAC_MODE_LINK_POLARITY;
3243                 else
3244                         tp->mac_mode &= ~MAC_MODE_LINK_POLARITY;
3245         }
3246
3247         /* ??? Without this setting Netgear GA302T PHY does not
3248          * ??? send/receive packets...
3249          */
3250         if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5411 &&
3251             tp->pci_chip_rev_id == CHIPREV_ID_5700_ALTIMA) {
3252                 tp->mi_mode |= MAC_MI_MODE_AUTO_POLL;
3253                 tw32_f(MAC_MI_MODE, tp->mi_mode);
3254                 udelay(80);
3255         }
3256
3257         tw32_f(MAC_MODE, tp->mac_mode);
3258         udelay(40);
3259
3260         if (tp->tg3_flags & TG3_FLAG_USE_LINKCHG_REG) {
3261                 /* Polled via timer. */
3262                 tw32_f(MAC_EVENT, 0);
3263         } else {
3264                 tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
3265         }
3266         udelay(40);
3267
3268         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 &&
3269             current_link_up == 1 &&
3270             tp->link_config.active_speed == SPEED_1000 &&
3271             ((tp->tg3_flags & TG3_FLAG_PCIX_MODE) ||
3272              (tp->tg3_flags & TG3_FLAG_PCI_HIGH_SPEED))) {
3273                 udelay(120);
3274                 tw32_f(MAC_STATUS,
3275                      (MAC_STATUS_SYNC_CHANGED |
3276                       MAC_STATUS_CFG_CHANGED));
3277                 udelay(40);
3278                 tg3_write_mem(tp,
3279                               NIC_SRAM_FIRMWARE_MBOX,
3280                               NIC_SRAM_FIRMWARE_MBOX_MAGIC2);
3281         }
3282
3283         /* Prevent send BD corruption. */
3284         if (tp->tg3_flags3 & TG3_FLG3_CLKREQ_BUG) {
3285                 u16 oldlnkctl, newlnkctl;
3286
3287                 pci_read_config_word(tp->pdev,
3288                                      tp->pcie_cap + PCI_EXP_LNKCTL,
3289                                      &oldlnkctl);
3290                 if (tp->link_config.active_speed == SPEED_100 ||
3291                     tp->link_config.active_speed == SPEED_10)
3292                         newlnkctl = oldlnkctl & ~PCI_EXP_LNKCTL_CLKREQ_EN;
3293                 else
3294                         newlnkctl = oldlnkctl | PCI_EXP_LNKCTL_CLKREQ_EN;
3295                 if (newlnkctl != oldlnkctl)
3296                         pci_write_config_word(tp->pdev,
3297                                               tp->pcie_cap + PCI_EXP_LNKCTL,
3298                                               newlnkctl);
3299         }
3300
3301         if (current_link_up != netif_carrier_ok(tp->dev)) {
3302                 if (current_link_up)
3303                         netif_carrier_on(tp->dev);
3304                 else
3305                         netif_carrier_off(tp->dev);
3306                 tg3_link_report(tp);
3307         }
3308
3309         return 0;
3310 }
3311
3312 struct tg3_fiber_aneginfo {
3313         int state;
3314 #define ANEG_STATE_UNKNOWN              0
3315 #define ANEG_STATE_AN_ENABLE            1
3316 #define ANEG_STATE_RESTART_INIT         2
3317 #define ANEG_STATE_RESTART              3
3318 #define ANEG_STATE_DISABLE_LINK_OK      4
3319 #define ANEG_STATE_ABILITY_DETECT_INIT  5
3320 #define ANEG_STATE_ABILITY_DETECT       6
3321 #define ANEG_STATE_ACK_DETECT_INIT      7
3322 #define ANEG_STATE_ACK_DETECT           8
3323 #define ANEG_STATE_COMPLETE_ACK_INIT    9
3324 #define ANEG_STATE_COMPLETE_ACK         10
3325 #define ANEG_STATE_IDLE_DETECT_INIT     11
3326 #define ANEG_STATE_IDLE_DETECT          12
3327 #define ANEG_STATE_LINK_OK              13
3328 #define ANEG_STATE_NEXT_PAGE_WAIT_INIT  14
3329 #define ANEG_STATE_NEXT_PAGE_WAIT       15
3330
3331         u32 flags;
3332 #define MR_AN_ENABLE            0x00000001
3333 #define MR_RESTART_AN           0x00000002
3334 #define MR_AN_COMPLETE          0x00000004
3335 #define MR_PAGE_RX              0x00000008
3336 #define MR_NP_LOADED            0x00000010
3337 #define MR_TOGGLE_TX            0x00000020
3338 #define MR_LP_ADV_FULL_DUPLEX   0x00000040
3339 #define MR_LP_ADV_HALF_DUPLEX   0x00000080
3340 #define MR_LP_ADV_SYM_PAUSE     0x00000100
3341 #define MR_LP_ADV_ASYM_PAUSE    0x00000200
3342 #define MR_LP_ADV_REMOTE_FAULT1 0x00000400
3343 #define MR_LP_ADV_REMOTE_FAULT2 0x00000800
3344 #define MR_LP_ADV_NEXT_PAGE     0x00001000
3345 #define MR_TOGGLE_RX            0x00002000
3346 #define MR_NP_RX                0x00004000
3347
3348 #define MR_LINK_OK              0x80000000
3349
3350         unsigned long link_time, cur_time;
3351
3352         u32 ability_match_cfg;
3353         int ability_match_count;
3354
3355         char ability_match, idle_match, ack_match;
3356
3357         u32 txconfig, rxconfig;
3358 #define ANEG_CFG_NP             0x00000080
3359 #define ANEG_CFG_ACK            0x00000040
3360 #define ANEG_CFG_RF2            0x00000020
3361 #define ANEG_CFG_RF1            0x00000010
3362 #define ANEG_CFG_PS2            0x00000001
3363 #define ANEG_CFG_PS1            0x00008000
3364 #define ANEG_CFG_HD             0x00004000
3365 #define ANEG_CFG_FD             0x00002000
3366 #define ANEG_CFG_INVAL          0x00001f06
3367
3368 };
3369 #define ANEG_OK         0
3370 #define ANEG_DONE       1
3371 #define ANEG_TIMER_ENAB 2
3372 #define ANEG_FAILED     -1
3373
3374 #define ANEG_STATE_SETTLE_TIME  10000
3375
3376 static int tg3_fiber_aneg_smachine(struct tg3 *tp,
3377                                    struct tg3_fiber_aneginfo *ap)
3378 {
3379         u16 flowctrl;
3380         unsigned long delta;
3381         u32 rx_cfg_reg;
3382         int ret;
3383
3384         if (ap->state == ANEG_STATE_UNKNOWN) {
3385                 ap->rxconfig = 0;
3386                 ap->link_time = 0;
3387                 ap->cur_time = 0;
3388                 ap->ability_match_cfg = 0;
3389                 ap->ability_match_count = 0;
3390                 ap->ability_match = 0;
3391                 ap->idle_match = 0;
3392                 ap->ack_match = 0;
3393         }
3394         ap->cur_time++;
3395
3396         if (tr32(MAC_STATUS) & MAC_STATUS_RCVD_CFG) {
3397                 rx_cfg_reg = tr32(MAC_RX_AUTO_NEG);
3398
3399                 if (rx_cfg_reg != ap->ability_match_cfg) {
3400                         ap->ability_match_cfg = rx_cfg_reg;
3401                         ap->ability_match = 0;
3402                         ap->ability_match_count = 0;
3403                 } else {
3404                         if (++ap->ability_match_count > 1) {
3405                                 ap->ability_match = 1;
3406                                 ap->ability_match_cfg = rx_cfg_reg;
3407                         }
3408                 }
3409                 if (rx_cfg_reg & ANEG_CFG_ACK)
3410                         ap->ack_match = 1;
3411                 else
3412                         ap->ack_match = 0;
3413
3414                 ap->idle_match = 0;
3415         } else {
3416                 ap->idle_match = 1;
3417                 ap->ability_match_cfg = 0;
3418                 ap->ability_match_count = 0;
3419                 ap->ability_match = 0;
3420                 ap->ack_match = 0;
3421
3422                 rx_cfg_reg = 0;
3423         }
3424
3425         ap->rxconfig = rx_cfg_reg;
3426         ret = ANEG_OK;
3427
3428         switch(ap->state) {
3429         case ANEG_STATE_UNKNOWN:
3430                 if (ap->flags & (MR_AN_ENABLE | MR_RESTART_AN))
3431                         ap->state = ANEG_STATE_AN_ENABLE;
3432
3433                 /* fallthru */
3434         case ANEG_STATE_AN_ENABLE:
3435                 ap->flags &= ~(MR_AN_COMPLETE | MR_PAGE_RX);
3436                 if (ap->flags & MR_AN_ENABLE) {
3437                         ap->link_time = 0;
3438                         ap->cur_time = 0;
3439                         ap->ability_match_cfg = 0;
3440                         ap->ability_match_count = 0;
3441                         ap->ability_match = 0;
3442                         ap->idle_match = 0;
3443                         ap->ack_match = 0;
3444
3445                         ap->state = ANEG_STATE_RESTART_INIT;
3446                 } else {
3447                         ap->state = ANEG_STATE_DISABLE_LINK_OK;
3448                 }
3449                 break;
3450
3451         case ANEG_STATE_RESTART_INIT:
3452                 ap->link_time = ap->cur_time;
3453                 ap->flags &= ~(MR_NP_LOADED);
3454                 ap->txconfig = 0;
3455                 tw32(MAC_TX_AUTO_NEG, 0);
3456                 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
3457                 tw32_f(MAC_MODE, tp->mac_mode);
3458                 udelay(40);
3459
3460                 ret = ANEG_TIMER_ENAB;
3461                 ap->state = ANEG_STATE_RESTART;
3462
3463                 /* fallthru */
3464         case ANEG_STATE_RESTART:
3465                 delta = ap->cur_time - ap->link_time;
3466                 if (delta > ANEG_STATE_SETTLE_TIME) {
3467                         ap->state = ANEG_STATE_ABILITY_DETECT_INIT;
3468                 } else {
3469                         ret = ANEG_TIMER_ENAB;
3470                 }
3471                 break;
3472
3473         case ANEG_STATE_DISABLE_LINK_OK:
3474                 ret = ANEG_DONE;
3475                 break;
3476
3477         case ANEG_STATE_ABILITY_DETECT_INIT:
3478                 ap->flags &= ~(MR_TOGGLE_TX);
3479                 ap->txconfig = ANEG_CFG_FD;
3480                 flowctrl = tg3_advert_flowctrl_1000X(tp->link_config.flowctrl);
3481                 if (flowctrl & ADVERTISE_1000XPAUSE)
3482                         ap->txconfig |= ANEG_CFG_PS1;
3483                 if (flowctrl & ADVERTISE_1000XPSE_ASYM)
3484                         ap->txconfig |= ANEG_CFG_PS2;
3485                 tw32(MAC_TX_AUTO_NEG, ap->txconfig);
3486                 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
3487                 tw32_f(MAC_MODE, tp->mac_mode);
3488                 udelay(40);
3489
3490                 ap->state = ANEG_STATE_ABILITY_DETECT;
3491                 break;
3492
3493         case ANEG_STATE_ABILITY_DETECT:
3494                 if (ap->ability_match != 0 && ap->rxconfig != 0) {
3495                         ap->state = ANEG_STATE_ACK_DETECT_INIT;
3496                 }
3497                 break;
3498
3499         case ANEG_STATE_ACK_DETECT_INIT:
3500                 ap->txconfig |= ANEG_CFG_ACK;
3501                 tw32(MAC_TX_AUTO_NEG, ap->txconfig);
3502                 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
3503                 tw32_f(MAC_MODE, tp->mac_mode);
3504                 udelay(40);
3505
3506                 ap->state = ANEG_STATE_ACK_DETECT;
3507
3508                 /* fallthru */
3509         case ANEG_STATE_ACK_DETECT:
3510                 if (ap->ack_match != 0) {
3511                         if ((ap->rxconfig & ~ANEG_CFG_ACK) ==
3512                             (ap->ability_match_cfg & ~ANEG_CFG_ACK)) {
3513                                 ap->state = ANEG_STATE_COMPLETE_ACK_INIT;
3514                         } else {
3515                                 ap->state = ANEG_STATE_AN_ENABLE;
3516                         }
3517                 } else if (ap->ability_match != 0 &&
3518                            ap->rxconfig == 0) {
3519                         ap->state = ANEG_STATE_AN_ENABLE;
3520                 }
3521                 break;
3522
3523         case ANEG_STATE_COMPLETE_ACK_INIT:
3524                 if (ap->rxconfig & ANEG_CFG_INVAL) {
3525                         ret = ANEG_FAILED;
3526                         break;
3527                 }
3528                 ap->flags &= ~(MR_LP_ADV_FULL_DUPLEX |
3529                                MR_LP_ADV_HALF_DUPLEX |
3530                                MR_LP_ADV_SYM_PAUSE |
3531                                MR_LP_ADV_ASYM_PAUSE |
3532                                MR_LP_ADV_REMOTE_FAULT1 |
3533                                MR_LP_ADV_REMOTE_FAULT2 |
3534                                MR_LP_ADV_NEXT_PAGE |
3535                                MR_TOGGLE_RX |
3536                                MR_NP_RX);
3537                 if (ap->rxconfig & ANEG_CFG_FD)
3538                         ap->flags |= MR_LP_ADV_FULL_DUPLEX;
3539                 if (ap->rxconfig & ANEG_CFG_HD)
3540                         ap->flags |= MR_LP_ADV_HALF_DUPLEX;
3541                 if (ap->rxconfig & ANEG_CFG_PS1)
3542                         ap->flags |= MR_LP_ADV_SYM_PAUSE;
3543                 if (ap->rxconfig & ANEG_CFG_PS2)
3544                         ap->flags |= MR_LP_ADV_ASYM_PAUSE;
3545                 if (ap->rxconfig & ANEG_CFG_RF1)
3546                         ap->flags |= MR_LP_ADV_REMOTE_FAULT1;
3547                 if (ap->rxconfig & ANEG_CFG_RF2)
3548                         ap->flags |= MR_LP_ADV_REMOTE_FAULT2;
3549                 if (ap->rxconfig & ANEG_CFG_NP)
3550                         ap->flags |= MR_LP_ADV_NEXT_PAGE;
3551
3552                 ap->link_time = ap->cur_time;
3553
3554                 ap->flags ^= (MR_TOGGLE_TX);
3555                 if (ap->rxconfig & 0x0008)
3556                         ap->flags |= MR_TOGGLE_RX;
3557                 if (ap->rxconfig & ANEG_CFG_NP)
3558                         ap->flags |= MR_NP_RX;
3559                 ap->flags |= MR_PAGE_RX;
3560
3561                 ap->state = ANEG_STATE_COMPLETE_ACK;
3562                 ret = ANEG_TIMER_ENAB;
3563                 break;
3564
3565         case ANEG_STATE_COMPLETE_ACK:
3566                 if (ap->ability_match != 0 &&
3567                     ap->rxconfig == 0) {
3568                         ap->state = ANEG_STATE_AN_ENABLE;
3569                         break;
3570                 }
3571                 delta = ap->cur_time - ap->link_time;
3572                 if (delta > ANEG_STATE_SETTLE_TIME) {
3573                         if (!(ap->flags & (MR_LP_ADV_NEXT_PAGE))) {
3574                                 ap->state = ANEG_STATE_IDLE_DETECT_INIT;
3575                         } else {
3576                                 if ((ap->txconfig & ANEG_CFG_NP) == 0 &&
3577                                     !(ap->flags & MR_NP_RX)) {
3578                                         ap->state = ANEG_STATE_IDLE_DETECT_INIT;
3579                                 } else {
3580                                         ret = ANEG_FAILED;
3581                                 }
3582                         }
3583                 }
3584                 break;
3585
3586         case ANEG_STATE_IDLE_DETECT_INIT:
3587                 ap->link_time = ap->cur_time;
3588                 tp->mac_mode &= ~MAC_MODE_SEND_CONFIGS;
3589                 tw32_f(MAC_MODE, tp->mac_mode);
3590                 udelay(40);
3591
3592                 ap->state = ANEG_STATE_IDLE_DETECT;
3593                 ret = ANEG_TIMER_ENAB;
3594                 break;
3595
3596         case ANEG_STATE_IDLE_DETECT:
3597                 if (ap->ability_match != 0 &&
3598                     ap->rxconfig == 0) {
3599                         ap->state = ANEG_STATE_AN_ENABLE;
3600                         break;
3601                 }
3602                 delta = ap->cur_time - ap->link_time;
3603                 if (delta > ANEG_STATE_SETTLE_TIME) {
3604                         /* XXX another gem from the Broadcom driver :( */
3605                         ap->state = ANEG_STATE_LINK_OK;
3606                 }
3607                 break;
3608
3609         case ANEG_STATE_LINK_OK:
3610                 ap->flags |= (MR_AN_COMPLETE | MR_LINK_OK);
3611                 ret = ANEG_DONE;
3612                 break;
3613
3614         case ANEG_STATE_NEXT_PAGE_WAIT_INIT:
3615                 /* ??? unimplemented */
3616                 break;
3617
3618         case ANEG_STATE_NEXT_PAGE_WAIT:
3619                 /* ??? unimplemented */
3620                 break;
3621
3622         default:
3623                 ret = ANEG_FAILED;
3624                 break;
3625         }
3626
3627         return ret;
3628 }
3629
3630 static int fiber_autoneg(struct tg3 *tp, u32 *txflags, u32 *rxflags)
3631 {
3632         int res = 0;
3633         struct tg3_fiber_aneginfo aninfo;
3634         int status = ANEG_FAILED;
3635         unsigned int tick;
3636         u32 tmp;
3637
3638         tw32_f(MAC_TX_AUTO_NEG, 0);
3639
3640         tmp = tp->mac_mode & ~MAC_MODE_PORT_MODE_MASK;
3641         tw32_f(MAC_MODE, tmp | MAC_MODE_PORT_MODE_GMII);
3642         udelay(40);
3643
3644         tw32_f(MAC_MODE, tp->mac_mode | MAC_MODE_SEND_CONFIGS);
3645         udelay(40);
3646
3647         memset(&aninfo, 0, sizeof(aninfo));
3648         aninfo.flags |= MR_AN_ENABLE;
3649         aninfo.state = ANEG_STATE_UNKNOWN;
3650         aninfo.cur_time = 0;
3651         tick = 0;
3652         while (++tick < 195000) {
3653                 status = tg3_fiber_aneg_smachine(tp, &aninfo);
3654                 if (status == ANEG_DONE || status == ANEG_FAILED)
3655                         break;
3656
3657                 udelay(1);
3658         }
3659
3660         tp->mac_mode &= ~MAC_MODE_SEND_CONFIGS;
3661         tw32_f(MAC_MODE, tp->mac_mode);
3662         udelay(40);
3663
3664         *txflags = aninfo.txconfig;
3665         *rxflags = aninfo.flags;
3666
3667         if (status == ANEG_DONE &&
3668             (aninfo.flags & (MR_AN_COMPLETE | MR_LINK_OK |
3669                              MR_LP_ADV_FULL_DUPLEX)))
3670                 res = 1;
3671
3672         return res;
3673 }
3674
3675 static void tg3_init_bcm8002(struct tg3 *tp)
3676 {
3677         u32 mac_status = tr32(MAC_STATUS);
3678         int i;
3679
3680         /* Reset when initting first time or we have a link. */
3681         if ((tp->tg3_flags & TG3_FLAG_INIT_COMPLETE) &&
3682             !(mac_status & MAC_STATUS_PCS_SYNCED))
3683                 return;
3684
3685         /* Set PLL lock range. */
3686         tg3_writephy(tp, 0x16, 0x8007);
3687
3688         /* SW reset */
3689         tg3_writephy(tp, MII_BMCR, BMCR_RESET);
3690
3691         /* Wait for reset to complete. */
3692         /* XXX schedule_timeout() ... */
3693         for (i = 0; i < 500; i++)
3694                 udelay(10);
3695
3696         /* Config mode; select PMA/Ch 1 regs. */
3697         tg3_writephy(tp, 0x10, 0x8411);
3698
3699         /* Enable auto-lock and comdet, select txclk for tx. */
3700         tg3_writephy(tp, 0x11, 0x0a10);
3701
3702         tg3_writephy(tp, 0x18, 0x00a0);
3703         tg3_writephy(tp, 0x16, 0x41ff);
3704
3705         /* Assert and deassert POR. */
3706         tg3_writephy(tp, 0x13, 0x0400);
3707         udelay(40);
3708         tg3_writephy(tp, 0x13, 0x0000);
3709
3710         tg3_writephy(tp, 0x11, 0x0a50);
3711         udelay(40);
3712         tg3_writephy(tp, 0x11, 0x0a10);
3713
3714         /* Wait for signal to stabilize */
3715         /* XXX schedule_timeout() ... */
3716         for (i = 0; i < 15000; i++)
3717                 udelay(10);
3718
3719         /* Deselect the channel register so we can read the PHYID
3720          * later.
3721          */
3722         tg3_writephy(tp, 0x10, 0x8011);
3723 }
3724
3725 static int tg3_setup_fiber_hw_autoneg(struct tg3 *tp, u32 mac_status)
3726 {
3727         u16 flowctrl;
3728         u32 sg_dig_ctrl, sg_dig_status;
3729         u32 serdes_cfg, expected_sg_dig_ctrl;
3730         int workaround, port_a;
3731         int current_link_up;
3732
3733         serdes_cfg = 0;
3734         expected_sg_dig_ctrl = 0;
3735         workaround = 0;
3736         port_a = 1;
3737         current_link_up = 0;
3738
3739         if (tp->pci_chip_rev_id != CHIPREV_ID_5704_A0 &&
3740             tp->pci_chip_rev_id != CHIPREV_ID_5704_A1) {
3741                 workaround = 1;
3742                 if (tr32(TG3PCI_DUAL_MAC_CTRL) & DUAL_MAC_CTRL_ID)
3743                         port_a = 0;
3744
3745                 /* preserve bits 0-11,13,14 for signal pre-emphasis */
3746                 /* preserve bits 20-23 for voltage regulator */
3747                 serdes_cfg = tr32(MAC_SERDES_CFG) & 0x00f06fff;
3748         }
3749
3750         sg_dig_ctrl = tr32(SG_DIG_CTRL);
3751
3752         if (tp->link_config.autoneg != AUTONEG_ENABLE) {
3753                 if (sg_dig_ctrl & SG_DIG_USING_HW_AUTONEG) {
3754                         if (workaround) {
3755                                 u32 val = serdes_cfg;
3756
3757                                 if (port_a)
3758                                         val |= 0xc010000;
3759                                 else
3760                                         val |= 0x4010000;
3761                                 tw32_f(MAC_SERDES_CFG, val);
3762                         }
3763
3764                         tw32_f(SG_DIG_CTRL, SG_DIG_COMMON_SETUP);
3765                 }
3766                 if (mac_status & MAC_STATUS_PCS_SYNCED) {
3767                         tg3_setup_flow_control(tp, 0, 0);
3768                         current_link_up = 1;
3769                 }
3770                 goto out;
3771         }
3772
3773         /* Want auto-negotiation.  */
3774         expected_sg_dig_ctrl = SG_DIG_USING_HW_AUTONEG | SG_DIG_COMMON_SETUP;
3775
3776         flowctrl = tg3_advert_flowctrl_1000X(tp->link_config.flowctrl);
3777         if (flowctrl & ADVERTISE_1000XPAUSE)
3778                 expected_sg_dig_ctrl |= SG_DIG_PAUSE_CAP;
3779         if (flowctrl & ADVERTISE_1000XPSE_ASYM)
3780                 expected_sg_dig_ctrl |= SG_DIG_ASYM_PAUSE;
3781
3782         if (sg_dig_ctrl != expected_sg_dig_ctrl) {
3783                 if ((tp->tg3_flags2 & TG3_FLG2_PARALLEL_DETECT) &&
3784                     tp->serdes_counter &&
3785                     ((mac_status & (MAC_STATUS_PCS_SYNCED |
3786                                     MAC_STATUS_RCVD_CFG)) ==
3787                      MAC_STATUS_PCS_SYNCED)) {
3788                         tp->serdes_counter--;
3789                         current_link_up = 1;
3790                         goto out;
3791                 }
3792 restart_autoneg:
3793                 if (workaround)
3794                         tw32_f(MAC_SERDES_CFG, serdes_cfg | 0xc011000);
3795                 tw32_f(SG_DIG_CTRL, expected_sg_dig_ctrl | SG_DIG_SOFT_RESET);
3796                 udelay(5);
3797                 tw32_f(SG_DIG_CTRL, expected_sg_dig_ctrl);
3798
3799                 tp->serdes_counter = SERDES_AN_TIMEOUT_5704S;
3800                 tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
3801         } else if (mac_status & (MAC_STATUS_PCS_SYNCED |
3802                                  MAC_STATUS_SIGNAL_DET)) {
3803                 sg_dig_status = tr32(SG_DIG_STATUS);
3804                 mac_status = tr32(MAC_STATUS);
3805
3806                 if ((sg_dig_status & SG_DIG_AUTONEG_COMPLETE) &&
3807                     (mac_status & MAC_STATUS_PCS_SYNCED)) {
3808                         u32 local_adv = 0, remote_adv = 0;
3809
3810                         if (sg_dig_ctrl & SG_DIG_PAUSE_CAP)
3811                                 local_adv |= ADVERTISE_1000XPAUSE;
3812                         if (sg_dig_ctrl & SG_DIG_ASYM_PAUSE)
3813                                 local_adv |= ADVERTISE_1000XPSE_ASYM;
3814
3815                         if (sg_dig_status & SG_DIG_PARTNER_PAUSE_CAPABLE)
3816                                 remote_adv |= LPA_1000XPAUSE;
3817                         if (sg_dig_status & SG_DIG_PARTNER_ASYM_PAUSE)
3818                                 remote_adv |= LPA_1000XPAUSE_ASYM;
3819
3820                         tg3_setup_flow_control(tp, local_adv, remote_adv);
3821                         current_link_up = 1;
3822                         tp->serdes_counter = 0;
3823                         tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
3824                 } else if (!(sg_dig_status & SG_DIG_AUTONEG_COMPLETE)) {
3825                         if (tp->serdes_counter)
3826                                 tp->serdes_counter--;
3827                         else {
3828                                 if (workaround) {
3829                                         u32 val = serdes_cfg;
3830
3831                                         if (port_a)
3832                                                 val |= 0xc010000;
3833                                         else
3834                                                 val |= 0x4010000;
3835
3836                                         tw32_f(MAC_SERDES_CFG, val);
3837                                 }
3838
3839                                 tw32_f(SG_DIG_CTRL, SG_DIG_COMMON_SETUP);
3840                                 udelay(40);
3841
3842                                 /* Link parallel detection - link is up */
3843                                 /* only if we have PCS_SYNC and not */
3844                                 /* receiving config code words */
3845                                 mac_status = tr32(MAC_STATUS);
3846                                 if ((mac_status & MAC_STATUS_PCS_SYNCED) &&
3847                                     !(mac_status & MAC_STATUS_RCVD_CFG)) {
3848                                         tg3_setup_flow_control(tp, 0, 0);
3849                                         current_link_up = 1;
3850                                         tp->tg3_flags2 |=
3851                                                 TG3_FLG2_PARALLEL_DETECT;
3852                                         tp->serdes_counter =
3853                                                 SERDES_PARALLEL_DET_TIMEOUT;
3854                                 } else
3855                                         goto restart_autoneg;
3856                         }
3857                 }
3858         } else {
3859                 tp->serdes_counter = SERDES_AN_TIMEOUT_5704S;
3860                 tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
3861         }
3862
3863 out:
3864         return current_link_up;
3865 }
3866
3867 static int tg3_setup_fiber_by_hand(struct tg3 *tp, u32 mac_status)
3868 {
3869         int current_link_up = 0;
3870
3871         if (!(mac_status & MAC_STATUS_PCS_SYNCED))
3872                 goto out;
3873
3874         if (tp->link_config.autoneg == AUTONEG_ENABLE) {
3875                 u32 txflags, rxflags;
3876                 int i;
3877
3878                 if (fiber_autoneg(tp, &txflags, &rxflags)) {
3879                         u32 local_adv = 0, remote_adv = 0;
3880
3881                         if (txflags & ANEG_CFG_PS1)
3882                                 local_adv |= ADVERTISE_1000XPAUSE;
3883                         if (txflags & ANEG_CFG_PS2)
3884                                 local_adv |= ADVERTISE_1000XPSE_ASYM;
3885
3886                         if (rxflags & MR_LP_ADV_SYM_PAUSE)
3887                                 remote_adv |= LPA_1000XPAUSE;
3888                         if (rxflags & MR_LP_ADV_ASYM_PAUSE)
3889                                 remote_adv |= LPA_1000XPAUSE_ASYM;
3890
3891                         tg3_setup_flow_control(tp, local_adv, remote_adv);
3892
3893                         current_link_up = 1;
3894                 }
3895                 for (i = 0; i < 30; i++) {
3896                         udelay(20);
3897                         tw32_f(MAC_STATUS,
3898                                (MAC_STATUS_SYNC_CHANGED |
3899                                 MAC_STATUS_CFG_CHANGED));
3900                         udelay(40);
3901                         if ((tr32(MAC_STATUS) &
3902                              (MAC_STATUS_SYNC_CHANGED |
3903                               MAC_STATUS_CFG_CHANGED)) == 0)
3904                                 break;
3905                 }
3906
3907                 mac_status = tr32(MAC_STATUS);
3908                 if (current_link_up == 0 &&
3909                     (mac_status & MAC_STATUS_PCS_SYNCED) &&
3910                     !(mac_status & MAC_STATUS_RCVD_CFG))
3911                         current_link_up = 1;
3912         } else {
3913                 tg3_setup_flow_control(tp, 0, 0);
3914
3915                 /* Forcing 1000FD link up. */
3916                 current_link_up = 1;
3917
3918                 tw32_f(MAC_MODE, (tp->mac_mode | MAC_MODE_SEND_CONFIGS));
3919                 udelay(40);
3920
3921                 tw32_f(MAC_MODE, tp->mac_mode);
3922                 udelay(40);
3923         }
3924
3925 out:
3926         return current_link_up;
3927 }
3928
3929 static int tg3_setup_fiber_phy(struct tg3 *tp, int force_reset)
3930 {
3931         u32 orig_pause_cfg;
3932         u16 orig_active_speed;
3933         u8 orig_active_duplex;
3934         u32 mac_status;
3935         int current_link_up;
3936         int i;
3937
3938         orig_pause_cfg = tp->link_config.active_flowctrl;
3939         orig_active_speed = tp->link_config.active_speed;
3940         orig_active_duplex = tp->link_config.active_duplex;
3941
3942         if (!(tp->tg3_flags2 & TG3_FLG2_HW_AUTONEG) &&
3943             netif_carrier_ok(tp->dev) &&
3944             (tp->tg3_flags & TG3_FLAG_INIT_COMPLETE)) {
3945                 mac_status = tr32(MAC_STATUS);
3946                 mac_status &= (MAC_STATUS_PCS_SYNCED |
3947                                MAC_STATUS_SIGNAL_DET |
3948                                MAC_STATUS_CFG_CHANGED |
3949                                MAC_STATUS_RCVD_CFG);
3950                 if (mac_status == (MAC_STATUS_PCS_SYNCED |
3951                                    MAC_STATUS_SIGNAL_DET)) {
3952                         tw32_f(MAC_STATUS, (MAC_STATUS_SYNC_CHANGED |
3953                                             MAC_STATUS_CFG_CHANGED));
3954                         return 0;
3955                 }
3956         }
3957
3958         tw32_f(MAC_TX_AUTO_NEG, 0);
3959
3960         tp->mac_mode &= ~(MAC_MODE_PORT_MODE_MASK | MAC_MODE_HALF_DUPLEX);
3961         tp->mac_mode |= MAC_MODE_PORT_MODE_TBI;
3962         tw32_f(MAC_MODE, tp->mac_mode);
3963         udelay(40);
3964
3965         if (tp->phy_id == TG3_PHY_ID_BCM8002)
3966                 tg3_init_bcm8002(tp);
3967
3968         /* Enable link change event even when serdes polling.  */
3969         tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
3970         udelay(40);
3971
3972         current_link_up = 0;
3973         mac_status = tr32(MAC_STATUS);
3974
3975         if (tp->tg3_flags2 & TG3_FLG2_HW_AUTONEG)
3976                 current_link_up = tg3_setup_fiber_hw_autoneg(tp, mac_status);
3977         else
3978                 current_link_up = tg3_setup_fiber_by_hand(tp, mac_status);
3979
3980         tp->napi[0].hw_status->status =
3981                 (SD_STATUS_UPDATED |
3982                  (tp->napi[0].hw_status->status & ~SD_STATUS_LINK_CHG));
3983
3984         for (i = 0; i < 100; i++) {
3985                 tw32_f(MAC_STATUS, (MAC_STATUS_SYNC_CHANGED |
3986                                     MAC_STATUS_CFG_CHANGED));
3987                 udelay(5);
3988                 if ((tr32(MAC_STATUS) & (MAC_STATUS_SYNC_CHANGED |
3989                                          MAC_STATUS_CFG_CHANGED |
3990                                          MAC_STATUS_LNKSTATE_CHANGED)) == 0)
3991                         break;
3992         }
3993
3994         mac_status = tr32(MAC_STATUS);
3995         if ((mac_status & MAC_STATUS_PCS_SYNCED) == 0) {
3996                 current_link_up = 0;
3997                 if (tp->link_config.autoneg == AUTONEG_ENABLE &&
3998                     tp->serdes_counter == 0) {
3999                         tw32_f(MAC_MODE, (tp->mac_mode |
4000                                           MAC_MODE_SEND_CONFIGS));
4001                         udelay(1);
4002                         tw32_f(MAC_MODE, tp->mac_mode);
4003                 }
4004         }
4005
4006         if (current_link_up == 1) {
4007                 tp->link_config.active_speed = SPEED_1000;
4008                 tp->link_config.active_duplex = DUPLEX_FULL;
4009                 tw32(MAC_LED_CTRL, (tp->led_ctrl |
4010                                     LED_CTRL_LNKLED_OVERRIDE |
4011                                     LED_CTRL_1000MBPS_ON));
4012         } else {
4013                 tp->link_config.active_speed = SPEED_INVALID;
4014                 tp->link_config.active_duplex = DUPLEX_INVALID;
4015                 tw32(MAC_LED_CTRL, (tp->led_ctrl |
4016                                     LED_CTRL_LNKLED_OVERRIDE |
4017                                     LED_CTRL_TRAFFIC_OVERRIDE));
4018         }
4019
4020         if (current_link_up != netif_carrier_ok(tp->dev)) {
4021                 if (current_link_up)
4022                         netif_carrier_on(tp->dev);
4023                 else
4024                         netif_carrier_off(tp->dev);
4025                 tg3_link_report(tp);
4026         } else {
4027                 u32 now_pause_cfg = tp->link_config.active_flowctrl;
4028                 if (orig_pause_cfg != now_pause_cfg ||
4029                     orig_active_speed != tp->link_config.active_speed ||
4030                     orig_active_duplex != tp->link_config.active_duplex)
4031                         tg3_link_report(tp);
4032         }
4033
4034         return 0;
4035 }
4036
4037 static int tg3_setup_fiber_mii_phy(struct tg3 *tp, int force_reset)
4038 {
4039         int current_link_up, err = 0;
4040         u32 bmsr, bmcr;
4041         u16 current_speed;
4042         u8 current_duplex;
4043         u32 local_adv, remote_adv;
4044
4045         tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
4046         tw32_f(MAC_MODE, tp->mac_mode);
4047         udelay(40);
4048
4049         tw32(MAC_EVENT, 0);
4050
4051         tw32_f(MAC_STATUS,
4052              (MAC_STATUS_SYNC_CHANGED |
4053               MAC_STATUS_CFG_CHANGED |
4054               MAC_STATUS_MI_COMPLETION |
4055               MAC_STATUS_LNKSTATE_CHANGED));
4056         udelay(40);
4057
4058         if (force_reset)
4059                 tg3_phy_reset(tp);
4060
4061         current_link_up = 0;
4062         current_speed = SPEED_INVALID;
4063         current_duplex = DUPLEX_INVALID;
4064
4065         err |= tg3_readphy(tp, MII_BMSR, &bmsr);
4066         err |= tg3_readphy(tp, MII_BMSR, &bmsr);
4067         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) {
4068                 if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
4069                         bmsr |= BMSR_LSTATUS;
4070                 else
4071                         bmsr &= ~BMSR_LSTATUS;
4072         }
4073
4074         err |= tg3_readphy(tp, MII_BMCR, &bmcr);
4075
4076         if ((tp->link_config.autoneg == AUTONEG_ENABLE) && !force_reset &&
4077             (tp->tg3_flags2 & TG3_FLG2_PARALLEL_DETECT)) {
4078                 /* do nothing, just check for link up at the end */
4079         } else if (tp->link_config.autoneg == AUTONEG_ENABLE) {
4080                 u32 adv, new_adv;
4081
4082                 err |= tg3_readphy(tp, MII_ADVERTISE, &adv);
4083                 new_adv = adv & ~(ADVERTISE_1000XFULL | ADVERTISE_1000XHALF |
4084                                   ADVERTISE_1000XPAUSE |
4085                                   ADVERTISE_1000XPSE_ASYM |
4086                                   ADVERTISE_SLCT);
4087
4088                 new_adv |= tg3_advert_flowctrl_1000X(tp->link_config.flowctrl);
4089
4090                 if (tp->link_config.advertising & ADVERTISED_1000baseT_Half)
4091                         new_adv |= ADVERTISE_1000XHALF;
4092                 if (tp->link_config.advertising & ADVERTISED_1000baseT_Full)
4093                         new_adv |= ADVERTISE_1000XFULL;
4094
4095                 if ((new_adv != adv) || !(bmcr & BMCR_ANENABLE)) {
4096                         tg3_writephy(tp, MII_ADVERTISE, new_adv);
4097                         bmcr |= BMCR_ANENABLE | BMCR_ANRESTART;
4098                         tg3_writephy(tp, MII_BMCR, bmcr);
4099
4100                         tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
4101                         tp->serdes_counter = SERDES_AN_TIMEOUT_5714S;
4102                         tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
4103
4104                         return err;
4105                 }
4106         } else {
4107                 u32 new_bmcr;
4108
4109                 bmcr &= ~BMCR_SPEED1000;
4110                 new_bmcr = bmcr & ~(BMCR_ANENABLE | BMCR_FULLDPLX);
4111
4112                 if (tp->link_config.duplex == DUPLEX_FULL)
4113                         new_bmcr |= BMCR_FULLDPLX;
4114
4115                 if (new_bmcr != bmcr) {
4116                         /* BMCR_SPEED1000 is a reserved bit that needs
4117                          * to be set on write.
4118                          */
4119                         new_bmcr |= BMCR_SPEED1000;
4120
4121                         /* Force a linkdown */
4122                         if (netif_carrier_ok(tp->dev)) {
4123                                 u32 adv;
4124
4125                                 err |= tg3_readphy(tp, MII_ADVERTISE, &adv);
4126                                 adv &= ~(ADVERTISE_1000XFULL |
4127                                          ADVERTISE_1000XHALF |
4128                                          ADVERTISE_SLCT);
4129                                 tg3_writephy(tp, MII_ADVERTISE, adv);
4130                                 tg3_writephy(tp, MII_BMCR, bmcr |
4131                                                            BMCR_ANRESTART |
4132                                                            BMCR_ANENABLE);
4133                                 udelay(10);
4134                                 netif_carrier_off(tp->dev);
4135                         }
4136                         tg3_writephy(tp, MII_BMCR, new_bmcr);
4137                         bmcr = new_bmcr;
4138                         err |= tg3_readphy(tp, MII_BMSR, &bmsr);
4139                         err |= tg3_readphy(tp, MII_BMSR, &bmsr);
4140                         if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
4141                             ASIC_REV_5714) {
4142                                 if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
4143                                         bmsr |= BMSR_LSTATUS;
4144                                 else
4145                                         bmsr &= ~BMSR_LSTATUS;
4146                         }
4147                         tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
4148                 }
4149         }
4150
4151         if (bmsr & BMSR_LSTATUS) {
4152                 current_speed = SPEED_1000;
4153                 current_link_up = 1;
4154                 if (bmcr & BMCR_FULLDPLX)
4155                         current_duplex = DUPLEX_FULL;
4156                 else
4157                         current_duplex = DUPLEX_HALF;
4158
4159                 local_adv = 0;
4160                 remote_adv = 0;
4161
4162                 if (bmcr & BMCR_ANENABLE) {
4163                         u32 common;
4164
4165                         err |= tg3_readphy(tp, MII_ADVERTISE, &local_adv);
4166                         err |= tg3_readphy(tp, MII_LPA, &remote_adv);
4167                         common = local_adv & remote_adv;
4168                         if (common & (ADVERTISE_1000XHALF |
4169                                       ADVERTISE_1000XFULL)) {
4170                                 if (common & ADVERTISE_1000XFULL)
4171                                         current_duplex = DUPLEX_FULL;
4172                                 else
4173                                         current_duplex = DUPLEX_HALF;
4174                         }
4175                         else
4176                                 current_link_up = 0;
4177                 }
4178         }
4179
4180         if (current_link_up == 1 && current_duplex == DUPLEX_FULL)
4181                 tg3_setup_flow_control(tp, local_adv, remote_adv);
4182
4183         tp->mac_mode &= ~MAC_MODE_HALF_DUPLEX;
4184         if (tp->link_config.active_duplex == DUPLEX_HALF)
4185                 tp->mac_mode |= MAC_MODE_HALF_DUPLEX;
4186
4187         tw32_f(MAC_MODE, tp->mac_mode);
4188         udelay(40);
4189
4190         tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
4191
4192         tp->link_config.active_speed = current_speed;
4193         tp->link_config.active_duplex = current_duplex;
4194
4195         if (current_link_up != netif_carrier_ok(tp->dev)) {
4196                 if (current_link_up)
4197                         netif_carrier_on(tp->dev);
4198                 else {
4199                         netif_carrier_off(tp->dev);
4200                         tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
4201                 }
4202                 tg3_link_report(tp);
4203         }
4204         return err;
4205 }
4206
4207 static void tg3_serdes_parallel_detect(struct tg3 *tp)
4208 {
4209         if (tp->serdes_counter) {
4210                 /* Give autoneg time to complete. */
4211                 tp->serdes_counter--;
4212                 return;
4213         }
4214         if (!netif_carrier_ok(tp->dev) &&
4215             (tp->link_config.autoneg == AUTONEG_ENABLE)) {
4216                 u32 bmcr;
4217
4218                 tg3_readphy(tp, MII_BMCR, &bmcr);
4219                 if (bmcr & BMCR_ANENABLE) {
4220                         u32 phy1, phy2;
4221
4222                         /* Select shadow register 0x1f */
4223                         tg3_writephy(tp, 0x1c, 0x7c00);
4224                         tg3_readphy(tp, 0x1c, &phy1);
4225
4226                         /* Select expansion interrupt status register */
4227                         tg3_writephy(tp, 0x17, 0x0f01);
4228                         tg3_readphy(tp, 0x15, &phy2);
4229                         tg3_readphy(tp, 0x15, &phy2);
4230
4231                         if ((phy1 & 0x10) && !(phy2 & 0x20)) {
4232                                 /* We have signal detect and not receiving
4233                                  * config code words, link is up by parallel
4234                                  * detection.
4235                                  */
4236
4237                                 bmcr &= ~BMCR_ANENABLE;
4238                                 bmcr |= BMCR_SPEED1000 | BMCR_FULLDPLX;
4239                                 tg3_writephy(tp, MII_BMCR, bmcr);
4240                                 tp->tg3_flags2 |= TG3_FLG2_PARALLEL_DETECT;
4241                         }
4242                 }
4243         }
4244         else if (netif_carrier_ok(tp->dev) &&
4245                  (tp->link_config.autoneg == AUTONEG_ENABLE) &&
4246                  (tp->tg3_flags2 & TG3_FLG2_PARALLEL_DETECT)) {
4247                 u32 phy2;
4248
4249                 /* Select expansion interrupt status register */
4250                 tg3_writephy(tp, 0x17, 0x0f01);
4251                 tg3_readphy(tp, 0x15, &phy2);
4252                 if (phy2 & 0x20) {
4253                         u32 bmcr;
4254
4255                         /* Config code words received, turn on autoneg. */
4256                         tg3_readphy(tp, MII_BMCR, &bmcr);
4257                         tg3_writephy(tp, MII_BMCR, bmcr | BMCR_ANENABLE);
4258
4259                         tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
4260
4261                 }
4262         }
4263 }
4264
4265 static int tg3_setup_phy(struct tg3 *tp, int force_reset)
4266 {
4267         int err;
4268
4269         if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) {
4270                 err = tg3_setup_fiber_phy(tp, force_reset);
4271         } else if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES) {
4272                 err = tg3_setup_fiber_mii_phy(tp, force_reset);
4273         } else {
4274                 err = tg3_setup_copper_phy(tp, force_reset);
4275         }
4276
4277         if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5784_AX) {
4278                 u32 val, scale;
4279
4280                 val = tr32(TG3_CPMU_CLCK_STAT) & CPMU_CLCK_STAT_MAC_CLCK_MASK;
4281                 if (val == CPMU_CLCK_STAT_MAC_CLCK_62_5)
4282                         scale = 65;
4283                 else if (val == CPMU_CLCK_STAT_MAC_CLCK_6_25)
4284                         scale = 6;
4285                 else
4286                         scale = 12;
4287
4288                 val = tr32(GRC_MISC_CFG) & ~GRC_MISC_CFG_PRESCALAR_MASK;
4289                 val |= (scale << GRC_MISC_CFG_PRESCALAR_SHIFT);
4290                 tw32(GRC_MISC_CFG, val);
4291         }
4292
4293         if (tp->link_config.active_speed == SPEED_1000 &&
4294             tp->link_config.active_duplex == DUPLEX_HALF)
4295                 tw32(MAC_TX_LENGTHS,
4296                      ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
4297                       (6 << TX_LENGTHS_IPG_SHIFT) |
4298                       (0xff << TX_LENGTHS_SLOT_TIME_SHIFT)));
4299         else
4300                 tw32(MAC_TX_LENGTHS,
4301                      ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
4302                       (6 << TX_LENGTHS_IPG_SHIFT) |
4303                       (32 << TX_LENGTHS_SLOT_TIME_SHIFT)));
4304
4305         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
4306                 if (netif_carrier_ok(tp->dev)) {
4307                         tw32(HOSTCC_STAT_COAL_TICKS,
4308                              tp->coal.stats_block_coalesce_usecs);
4309                 } else {
4310                         tw32(HOSTCC_STAT_COAL_TICKS, 0);
4311                 }
4312         }
4313
4314         if (tp->tg3_flags & TG3_FLAG_ASPM_WORKAROUND) {
4315                 u32 val = tr32(PCIE_PWR_MGMT_THRESH);
4316                 if (!netif_carrier_ok(tp->dev))
4317                         val = (val & ~PCIE_PWR_MGMT_L1_THRESH_MSK) |
4318                               tp->pwrmgmt_thresh;
4319                 else
4320                         val |= PCIE_PWR_MGMT_L1_THRESH_MSK;
4321                 tw32(PCIE_PWR_MGMT_THRESH, val);
4322         }
4323
4324         return err;
4325 }
4326
4327 /* This is called whenever we suspect that the system chipset is re-
4328  * ordering the sequence of MMIO to the tx send mailbox. The symptom
4329  * is bogus tx completions. We try to recover by setting the
4330  * TG3_FLAG_MBOX_WRITE_REORDER flag and resetting the chip later
4331  * in the workqueue.
4332  */
4333 static void tg3_tx_recover(struct tg3 *tp)
4334 {
4335         BUG_ON((tp->tg3_flags & TG3_FLAG_MBOX_WRITE_REORDER) ||
4336                tp->write32_tx_mbox == tg3_write_indirect_mbox);
4337
4338         netdev_warn(tp->dev, "The system may be re-ordering memory-mapped I/O cycles to the network device, attempting to recover\n"
4339                     "Please report the problem to the driver maintainer and include system chipset information.\n");
4340
4341         spin_lock(&tp->lock);
4342         tp->tg3_flags |= TG3_FLAG_TX_RECOVERY_PENDING;
4343         spin_unlock(&tp->lock);
4344 }
4345
4346 static inline u32 tg3_tx_avail(struct tg3_napi *tnapi)
4347 {
4348         smp_mb();
4349         return tnapi->tx_pending -
4350                ((tnapi->tx_prod - tnapi->tx_cons) & (TG3_TX_RING_SIZE - 1));
4351 }
4352
4353 /* Tigon3 never reports partial packet sends.  So we do not
4354  * need special logic to handle SKBs that have not had all
4355  * of their frags sent yet, like SunGEM does.
4356  */
4357 static void tg3_tx(struct tg3_napi *tnapi)
4358 {
4359         struct tg3 *tp = tnapi->tp;
4360         u32 hw_idx = tnapi->hw_status->idx[0].tx_consumer;
4361         u32 sw_idx = tnapi->tx_cons;
4362         struct netdev_queue *txq;
4363         int index = tnapi - tp->napi;
4364
4365         if (tp->tg3_flags3 & TG3_FLG3_ENABLE_TSS)
4366                 index--;
4367
4368         txq = netdev_get_tx_queue(tp->dev, index);
4369
4370         while (sw_idx != hw_idx) {
4371                 struct ring_info *ri = &tnapi->tx_buffers[sw_idx];
4372                 struct sk_buff *skb = ri->skb;
4373                 int i, tx_bug = 0;
4374
4375                 if (unlikely(skb == NULL)) {
4376                         tg3_tx_recover(tp);
4377                         return;
4378                 }
4379
4380                 pci_unmap_single(tp->pdev,
4381                                  pci_unmap_addr(ri, mapping),
4382                                  skb_headlen(skb),
4383                                  PCI_DMA_TODEVICE);
4384
4385                 ri->skb = NULL;
4386
4387                 sw_idx = NEXT_TX(sw_idx);
4388
4389                 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
4390                         ri = &tnapi->tx_buffers[sw_idx];
4391                         if (unlikely(ri->skb != NULL || sw_idx == hw_idx))
4392                                 tx_bug = 1;
4393
4394                         pci_unmap_page(tp->pdev,
4395                                        pci_unmap_addr(ri, mapping),
4396                                        skb_shinfo(skb)->frags[i].size,
4397                                        PCI_DMA_TODEVICE);
4398                         sw_idx = NEXT_TX(sw_idx);
4399                 }
4400
4401                 dev_kfree_skb(skb);
4402
4403                 if (unlikely(tx_bug)) {
4404                         tg3_tx_recover(tp);
4405                         return;
4406                 }
4407         }
4408
4409         tnapi->tx_cons = sw_idx;
4410
4411         /* Need to make the tx_cons update visible to tg3_start_xmit()
4412          * before checking for netif_queue_stopped().  Without the
4413          * memory barrier, there is a small possibility that tg3_start_xmit()
4414          * will miss it and cause the queue to be stopped forever.
4415          */
4416         smp_mb();
4417
4418         if (unlikely(netif_tx_queue_stopped(txq) &&
4419                      (tg3_tx_avail(tnapi) > TG3_TX_WAKEUP_THRESH(tnapi)))) {
4420                 __netif_tx_lock(txq, smp_processor_id());
4421                 if (netif_tx_queue_stopped(txq) &&
4422                     (tg3_tx_avail(tnapi) > TG3_TX_WAKEUP_THRESH(tnapi)))
4423                         netif_tx_wake_queue(txq);
4424                 __netif_tx_unlock(txq);
4425         }
4426 }
4427
4428 static void tg3_rx_skb_free(struct tg3 *tp, struct ring_info *ri, u32 map_sz)
4429 {
4430         if (!ri->skb)
4431                 return;
4432
4433         pci_unmap_single(tp->pdev, pci_unmap_addr(ri, mapping),
4434                          map_sz, PCI_DMA_FROMDEVICE);
4435         dev_kfree_skb_any(ri->skb);
4436         ri->skb = NULL;
4437 }
4438
4439 /* Returns size of skb allocated or < 0 on error.
4440  *
4441  * We only need to fill in the address because the other members
4442  * of the RX descriptor are invariant, see tg3_init_rings.
4443  *
4444  * Note the purposeful assymetry of cpu vs. chip accesses.  For
4445  * posting buffers we only dirty the first cache line of the RX
4446  * descriptor (containing the address).  Whereas for the RX status
4447  * buffers the cpu only reads the last cacheline of the RX descriptor
4448  * (to fetch the error flags, vlan tag, checksum, and opaque cookie).
4449  */
4450 static int tg3_alloc_rx_skb(struct tg3 *tp, struct tg3_rx_prodring_set *tpr,
4451                             u32 opaque_key, u32 dest_idx_unmasked)
4452 {
4453         struct tg3_rx_buffer_desc *desc;
4454         struct ring_info *map, *src_map;
4455         struct sk_buff *skb;
4456         dma_addr_t mapping;
4457         int skb_size, dest_idx;
4458
4459         src_map = NULL;
4460         switch (opaque_key) {
4461         case RXD_OPAQUE_RING_STD:
4462                 dest_idx = dest_idx_unmasked % TG3_RX_RING_SIZE;
4463                 desc = &tpr->rx_std[dest_idx];
4464                 map = &tpr->rx_std_buffers[dest_idx];
4465                 skb_size = tp->rx_pkt_map_sz;
4466                 break;
4467
4468         case RXD_OPAQUE_RING_JUMBO:
4469                 dest_idx = dest_idx_unmasked % TG3_RX_JUMBO_RING_SIZE;
4470                 desc = &tpr->rx_jmb[dest_idx].std;
4471                 map = &tpr->rx_jmb_buffers[dest_idx];
4472                 skb_size = TG3_RX_JMB_MAP_SZ;
4473                 break;
4474
4475         default:
4476                 return -EINVAL;
4477         }
4478
4479         /* Do not overwrite any of the map or rp information
4480          * until we are sure we can commit to a new buffer.
4481          *
4482          * Callers depend upon this behavior and assume that
4483          * we leave everything unchanged if we fail.
4484          */
4485         skb = netdev_alloc_skb(tp->dev, skb_size + tp->rx_offset);
4486         if (skb == NULL)
4487                 return -ENOMEM;
4488
4489         skb_reserve(skb, tp->rx_offset);
4490
4491         mapping = pci_map_single(tp->pdev, skb->data, skb_size,
4492                                  PCI_DMA_FROMDEVICE);
4493         if (pci_dma_mapping_error(tp->pdev, mapping)) {
4494                 dev_kfree_skb(skb);
4495                 return -EIO;
4496         }
4497
4498         map->skb = skb;
4499         pci_unmap_addr_set(map, mapping, mapping);
4500
4501         desc->addr_hi = ((u64)mapping >> 32);
4502         desc->addr_lo = ((u64)mapping & 0xffffffff);
4503
4504         return skb_size;
4505 }
4506
4507 /* We only need to move over in the address because the other
4508  * members of the RX descriptor are invariant.  See notes above
4509  * tg3_alloc_rx_skb for full details.
4510  */
4511 static void tg3_recycle_rx(struct tg3_napi *tnapi,
4512                            struct tg3_rx_prodring_set *dpr,
4513                            u32 opaque_key, int src_idx,
4514                            u32 dest_idx_unmasked)
4515 {
4516         struct tg3 *tp = tnapi->tp;
4517         struct tg3_rx_buffer_desc *src_desc, *dest_desc;
4518         struct ring_info *src_map, *dest_map;
4519         int dest_idx;
4520         struct tg3_rx_prodring_set *spr = &tp->prodring[0];
4521
4522         switch (opaque_key) {
4523         case RXD_OPAQUE_RING_STD:
4524                 dest_idx = dest_idx_unmasked % TG3_RX_RING_SIZE;
4525                 dest_desc = &dpr->rx_std[dest_idx];
4526                 dest_map = &dpr->rx_std_buffers[dest_idx];
4527                 src_desc = &spr->rx_std[src_idx];
4528                 src_map = &spr->rx_std_buffers[src_idx];
4529                 break;
4530
4531         case RXD_OPAQUE_RING_JUMBO:
4532                 dest_idx = dest_idx_unmasked % TG3_RX_JUMBO_RING_SIZE;
4533                 dest_desc = &dpr->rx_jmb[dest_idx].std;
4534                 dest_map = &dpr->rx_jmb_buffers[dest_idx];
4535                 src_desc = &spr->rx_jmb[src_idx].std;
4536                 src_map = &spr->rx_jmb_buffers[src_idx];
4537                 break;
4538
4539         default:
4540                 return;
4541         }
4542
4543         dest_map->skb = src_map->skb;
4544         pci_unmap_addr_set(dest_map, mapping,
4545                            pci_unmap_addr(src_map, mapping));
4546         dest_desc->addr_hi = src_desc->addr_hi;
4547         dest_desc->addr_lo = src_desc->addr_lo;
4548
4549         /* Ensure that the update to the skb happens after the physical
4550          * addresses have been transferred to the new BD location.
4551          */
4552         smp_wmb();
4553
4554         src_map->skb = NULL;
4555 }
4556
4557 /* The RX ring scheme is composed of multiple rings which post fresh
4558  * buffers to the chip, and one special ring the chip uses to report
4559  * status back to the host.
4560  *
4561  * The special ring reports the status of received packets to the
4562  * host.  The chip does not write into the original descriptor the
4563  * RX buffer was obtained from.  The chip simply takes the original
4564  * descriptor as provided by the host, updates the status and length
4565  * field, then writes this into the next status ring entry.
4566  *
4567  * Each ring the host uses to post buffers to the chip is described
4568  * by a TG3_BDINFO entry in the chips SRAM area.  When a packet arrives,
4569  * it is first placed into the on-chip ram.  When the packet's length
4570  * is known, it walks down the TG3_BDINFO entries to select the ring.
4571  * Each TG3_BDINFO specifies a MAXLEN field and the first TG3_BDINFO
4572  * which is within the range of the new packet's length is chosen.
4573  *
4574  * The "separate ring for rx status" scheme may sound queer, but it makes
4575  * sense from a cache coherency perspective.  If only the host writes
4576  * to the buffer post rings, and only the chip writes to the rx status
4577  * rings, then cache lines never move beyond shared-modified state.
4578  * If both the host and chip were to write into the same ring, cache line
4579  * eviction could occur since both entities want it in an exclusive state.
4580  */
4581 static int tg3_rx(struct tg3_napi *tnapi, int budget)
4582 {
4583         struct tg3 *tp = tnapi->tp;
4584         u32 work_mask, rx_std_posted = 0;
4585         u32 std_prod_idx, jmb_prod_idx;
4586         u32 sw_idx = tnapi->rx_rcb_ptr;
4587         u16 hw_idx;
4588         int received;
4589         struct tg3_rx_prodring_set *tpr = tnapi->prodring;
4590
4591         hw_idx = *(tnapi->rx_rcb_prod_idx);
4592         /*
4593          * We need to order the read of hw_idx and the read of
4594          * the opaque cookie.
4595          */
4596         rmb();
4597         work_mask = 0;
4598         received = 0;
4599         std_prod_idx = tpr->rx_std_prod_idx;
4600         jmb_prod_idx = tpr->rx_jmb_prod_idx;
4601         while (sw_idx != hw_idx && budget > 0) {
4602                 struct ring_info *ri;
4603                 struct tg3_rx_buffer_desc *desc = &tnapi->rx_rcb[sw_idx];
4604                 unsigned int len;
4605                 struct sk_buff *skb;
4606                 dma_addr_t dma_addr;
4607                 u32 opaque_key, desc_idx, *post_ptr;
4608
4609                 desc_idx = desc->opaque & RXD_OPAQUE_INDEX_MASK;
4610                 opaque_key = desc->opaque & RXD_OPAQUE_RING_MASK;
4611                 if (opaque_key == RXD_OPAQUE_RING_STD) {
4612                         ri = &tp->prodring[0].rx_std_buffers[desc_idx];
4613                         dma_addr = pci_unmap_addr(ri, mapping);
4614                         skb = ri->skb;
4615                         post_ptr = &std_prod_idx;
4616                         rx_std_posted++;
4617                 } else if (opaque_key == RXD_OPAQUE_RING_JUMBO) {
4618                         ri = &tp->prodring[0].rx_jmb_buffers[desc_idx];
4619                         dma_addr = pci_unmap_addr(ri, mapping);
4620                         skb = ri->skb;
4621                         post_ptr = &jmb_prod_idx;
4622                 } else
4623                         goto next_pkt_nopost;
4624
4625                 work_mask |= opaque_key;
4626
4627                 if ((desc->err_vlan & RXD_ERR_MASK) != 0 &&
4628                     (desc->err_vlan != RXD_ERR_ODD_NIBBLE_RCVD_MII)) {
4629                 drop_it:
4630                         tg3_recycle_rx(tnapi, tpr, opaque_key,
4631                                        desc_idx, *post_ptr);
4632                 drop_it_no_recycle:
4633                         /* Other statistics kept track of by card. */
4634                         tp->net_stats.rx_dropped++;
4635                         goto next_pkt;
4636                 }
4637
4638                 len = ((desc->idx_len & RXD_LEN_MASK) >> RXD_LEN_SHIFT) -
4639                       ETH_FCS_LEN;
4640
4641                 if (len > RX_COPY_THRESHOLD &&
4642                     tp->rx_offset == NET_IP_ALIGN) {
4643                     /* rx_offset will likely not equal NET_IP_ALIGN
4644                      * if this is a 5701 card running in PCI-X mode
4645                      * [see tg3_get_invariants()]
4646                      */
4647                         int skb_size;
4648
4649                         skb_size = tg3_alloc_rx_skb(tp, tpr, opaque_key,
4650                                                     *post_ptr);
4651                         if (skb_size < 0)
4652                                 goto drop_it;
4653
4654                         pci_unmap_single(tp->pdev, dma_addr, skb_size,
4655                                          PCI_DMA_FROMDEVICE);
4656
4657                         /* Ensure that the update to the skb happens
4658                          * after the usage of the old DMA mapping.
4659                          */
4660                         smp_wmb();
4661
4662                         ri->skb = NULL;
4663
4664                         skb_put(skb, len);
4665                 } else {
4666                         struct sk_buff *copy_skb;
4667
4668                         tg3_recycle_rx(tnapi, tpr, opaque_key,
4669                                        desc_idx, *post_ptr);
4670
4671                         copy_skb = netdev_alloc_skb(tp->dev,
4672                                                     len + TG3_RAW_IP_ALIGN);
4673                         if (copy_skb == NULL)
4674                                 goto drop_it_no_recycle;
4675
4676                         skb_reserve(copy_skb, TG3_RAW_IP_ALIGN);
4677                         skb_put(copy_skb, len);
4678                         pci_dma_sync_single_for_cpu(tp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE);
4679                         skb_copy_from_linear_data(skb, copy_skb->data, len);
4680                         pci_dma_sync_single_for_device(tp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE);
4681
4682                         /* We'll reuse the original ring buffer. */
4683                         skb = copy_skb;
4684                 }
4685
4686                 if ((tp->tg3_flags & TG3_FLAG_RX_CHECKSUMS) &&
4687                     (desc->type_flags & RXD_FLAG_TCPUDP_CSUM) &&
4688                     (((desc->ip_tcp_csum & RXD_TCPCSUM_MASK)
4689                       >> RXD_TCPCSUM_SHIFT) == 0xffff))
4690                         skb->ip_summed = CHECKSUM_UNNECESSARY;
4691                 else
4692                         skb->ip_summed = CHECKSUM_NONE;
4693
4694                 skb->protocol = eth_type_trans(skb, tp->dev);
4695
4696                 if (len > (tp->dev->mtu + ETH_HLEN) &&
4697                     skb->protocol != htons(ETH_P_8021Q)) {
4698                         dev_kfree_skb(skb);
4699                         goto next_pkt;
4700                 }
4701
4702 #if TG3_VLAN_TAG_USED
4703                 if (tp->vlgrp != NULL &&
4704                     desc->type_flags & RXD_FLAG_VLAN) {
4705                         vlan_gro_receive(&tnapi->napi, tp->vlgrp,
4706                                          desc->err_vlan & RXD_VLAN_MASK, skb);
4707                 } else
4708 #endif
4709                         napi_gro_receive(&tnapi->napi, skb);
4710
4711                 received++;
4712                 budget--;
4713
4714 next_pkt:
4715                 (*post_ptr)++;
4716
4717                 if (unlikely(rx_std_posted >= tp->rx_std_max_post)) {
4718                         tpr->rx_std_prod_idx = std_prod_idx % TG3_RX_RING_SIZE;
4719                         tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG,
4720                                      tpr->rx_std_prod_idx);
4721                         work_mask &= ~RXD_OPAQUE_RING_STD;
4722                         rx_std_posted = 0;
4723                 }
4724 next_pkt_nopost:
4725                 sw_idx++;
4726                 sw_idx &= (TG3_RX_RCB_RING_SIZE(tp) - 1);
4727
4728                 /* Refresh hw_idx to see if there is new work */
4729                 if (sw_idx == hw_idx) {
4730                         hw_idx = *(tnapi->rx_rcb_prod_idx);
4731                         rmb();
4732                 }
4733         }
4734
4735         /* ACK the status ring. */
4736         tnapi->rx_rcb_ptr = sw_idx;
4737         tw32_rx_mbox(tnapi->consmbox, sw_idx);
4738
4739         /* Refill RX ring(s). */
4740         if (!(tp->tg3_flags3 & TG3_FLG3_ENABLE_RSS)) {
4741                 if (work_mask & RXD_OPAQUE_RING_STD) {
4742                         tpr->rx_std_prod_idx = std_prod_idx % TG3_RX_RING_SIZE;
4743                         tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG,
4744                                      tpr->rx_std_prod_idx);
4745                 }
4746                 if (work_mask & RXD_OPAQUE_RING_JUMBO) {
4747                         tpr->rx_jmb_prod_idx = jmb_prod_idx %
4748                                                TG3_RX_JUMBO_RING_SIZE;
4749                         tw32_rx_mbox(TG3_RX_JMB_PROD_IDX_REG,
4750                                      tpr->rx_jmb_prod_idx);
4751                 }
4752                 mmiowb();
4753         } else if (work_mask) {
4754                 /* rx_std_buffers[] and rx_jmb_buffers[] entries must be
4755                  * updated before the producer indices can be updated.
4756                  */
4757                 smp_wmb();
4758
4759                 tpr->rx_std_prod_idx = std_prod_idx % TG3_RX_RING_SIZE;
4760                 tpr->rx_jmb_prod_idx = jmb_prod_idx % TG3_RX_JUMBO_RING_SIZE;
4761
4762                 if (tnapi != &tp->napi[1])
4763                         napi_schedule(&tp->napi[1].napi);
4764         }
4765
4766         return received;
4767 }
4768
4769 static void tg3_poll_link(struct tg3 *tp)
4770 {
4771         /* handle link change and other phy events */
4772         if (!(tp->tg3_flags &
4773               (TG3_FLAG_USE_LINKCHG_REG |
4774                TG3_FLAG_POLL_SERDES))) {
4775                 struct tg3_hw_status *sblk = tp->napi[0].hw_status;
4776
4777                 if (sblk->status & SD_STATUS_LINK_CHG) {
4778                         sblk->status = SD_STATUS_UPDATED |
4779                                        (sblk->status & ~SD_STATUS_LINK_CHG);
4780                         spin_lock(&tp->lock);
4781                         if (tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB) {
4782                                 tw32_f(MAC_STATUS,
4783                                      (MAC_STATUS_SYNC_CHANGED |
4784                                       MAC_STATUS_CFG_CHANGED |
4785                                       MAC_STATUS_MI_COMPLETION |
4786                                       MAC_STATUS_LNKSTATE_CHANGED));
4787                                 udelay(40);
4788                         } else
4789                                 tg3_setup_phy(tp, 0);
4790                         spin_unlock(&tp->lock);
4791                 }
4792         }
4793 }
4794
4795 static int tg3_rx_prodring_xfer(struct tg3 *tp,
4796                                 struct tg3_rx_prodring_set *dpr,
4797                                 struct tg3_rx_prodring_set *spr)
4798 {
4799         u32 si, di, cpycnt, src_prod_idx;
4800         int i, err = 0;
4801
4802         while (1) {
4803                 src_prod_idx = spr->rx_std_prod_idx;
4804
4805                 /* Make sure updates to the rx_std_buffers[] entries and the
4806                  * standard producer index are seen in the correct order.
4807                  */
4808                 smp_rmb();
4809
4810                 if (spr->rx_std_cons_idx == src_prod_idx)
4811                         break;
4812
4813                 if (spr->rx_std_cons_idx < src_prod_idx)
4814                         cpycnt = src_prod_idx - spr->rx_std_cons_idx;
4815                 else
4816                         cpycnt = TG3_RX_RING_SIZE - spr->rx_std_cons_idx;
4817
4818                 cpycnt = min(cpycnt, TG3_RX_RING_SIZE - dpr->rx_std_prod_idx);
4819
4820                 si = spr->rx_std_cons_idx;
4821                 di = dpr->rx_std_prod_idx;
4822
4823                 for (i = di; i < di + cpycnt; i++) {
4824                         if (dpr->rx_std_buffers[i].skb) {
4825                                 cpycnt = i - di;
4826                                 err = -ENOSPC;
4827                                 break;
4828                         }
4829                 }
4830
4831                 if (!cpycnt)
4832                         break;
4833
4834                 /* Ensure that updates to the rx_std_buffers ring and the
4835                  * shadowed hardware producer ring from tg3_recycle_skb() are
4836                  * ordered correctly WRT the skb check above.
4837                  */
4838                 smp_rmb();
4839
4840                 memcpy(&dpr->rx_std_buffers[di],
4841                        &spr->rx_std_buffers[si],
4842                        cpycnt * sizeof(struct ring_info));
4843
4844                 for (i = 0; i < cpycnt; i++, di++, si++) {
4845                         struct tg3_rx_buffer_desc *sbd, *dbd;
4846                         sbd = &spr->rx_std[si];
4847                         dbd = &dpr->rx_std[di];
4848                         dbd->addr_hi = sbd->addr_hi;
4849                         dbd->addr_lo = sbd->addr_lo;
4850                 }
4851
4852                 spr->rx_std_cons_idx = (spr->rx_std_cons_idx + cpycnt) %
4853                                        TG3_RX_RING_SIZE;
4854                 dpr->rx_std_prod_idx = (dpr->rx_std_prod_idx + cpycnt) %
4855                                        TG3_RX_RING_SIZE;
4856         }
4857
4858         while (1) {
4859                 src_prod_idx = spr->rx_jmb_prod_idx;
4860
4861                 /* Make sure updates to the rx_jmb_buffers[] entries and
4862                  * the jumbo producer index are seen in the correct order.
4863                  */
4864                 smp_rmb();
4865
4866                 if (spr->rx_jmb_cons_idx == src_prod_idx)
4867                         break;
4868
4869                 if (spr->rx_jmb_cons_idx < src_prod_idx)
4870                         cpycnt = src_prod_idx - spr->rx_jmb_cons_idx;
4871                 else
4872                         cpycnt = TG3_RX_JUMBO_RING_SIZE - spr->rx_jmb_cons_idx;
4873
4874                 cpycnt = min(cpycnt,
4875                              TG3_RX_JUMBO_RING_SIZE - dpr->rx_jmb_prod_idx);
4876
4877                 si = spr->rx_jmb_cons_idx;
4878                 di = dpr->rx_jmb_prod_idx;
4879
4880                 for (i = di; i < di + cpycnt; i++) {
4881                         if (dpr->rx_jmb_buffers[i].skb) {
4882                                 cpycnt = i - di;
4883                                 err = -ENOSPC;
4884                                 break;
4885                         }
4886                 }
4887
4888                 if (!cpycnt)
4889                         break;
4890
4891                 /* Ensure that updates to the rx_jmb_buffers ring and the
4892                  * shadowed hardware producer ring from tg3_recycle_skb() are
4893                  * ordered correctly WRT the skb check above.
4894                  */
4895                 smp_rmb();
4896
4897                 memcpy(&dpr->rx_jmb_buffers[di],
4898                        &spr->rx_jmb_buffers[si],
4899                        cpycnt * sizeof(struct ring_info));
4900
4901                 for (i = 0; i < cpycnt; i++, di++, si++) {
4902                         struct tg3_rx_buffer_desc *sbd, *dbd;
4903                         sbd = &spr->rx_jmb[si].std;
4904                         dbd = &dpr->rx_jmb[di].std;
4905                         dbd->addr_hi = sbd->addr_hi;
4906                         dbd->addr_lo = sbd->addr_lo;
4907                 }
4908
4909                 spr->rx_jmb_cons_idx = (spr->rx_jmb_cons_idx + cpycnt) %
4910                                        TG3_RX_JUMBO_RING_SIZE;
4911                 dpr->rx_jmb_prod_idx = (dpr->rx_jmb_prod_idx + cpycnt) %
4912                                        TG3_RX_JUMBO_RING_SIZE;
4913         }
4914
4915         return err;
4916 }
4917
4918 static int tg3_poll_work(struct tg3_napi *tnapi, int work_done, int budget)
4919 {
4920         struct tg3 *tp = tnapi->tp;
4921
4922         /* run TX completion thread */
4923         if (tnapi->hw_status->idx[0].tx_consumer != tnapi->tx_cons) {
4924                 tg3_tx(tnapi);
4925                 if (unlikely(tp->tg3_flags & TG3_FLAG_TX_RECOVERY_PENDING))
4926                         return work_done;
4927         }
4928
4929         /* run RX thread, within the bounds set by NAPI.
4930          * All RX "locking" is done by ensuring outside
4931          * code synchronizes with tg3->napi.poll()
4932          */
4933         if (*(tnapi->rx_rcb_prod_idx) != tnapi->rx_rcb_ptr)
4934                 work_done += tg3_rx(tnapi, budget - work_done);
4935
4936         if ((tp->tg3_flags3 & TG3_FLG3_ENABLE_RSS) && tnapi == &tp->napi[1]) {
4937                 struct tg3_rx_prodring_set *dpr = &tp->prodring[0];
4938                 int i, err = 0;
4939                 u32 std_prod_idx = dpr->rx_std_prod_idx;
4940                 u32 jmb_prod_idx = dpr->rx_jmb_prod_idx;
4941
4942                 for (i = 1; i < tp->irq_cnt; i++)
4943                         err |= tg3_rx_prodring_xfer(tp, dpr,
4944                                                     tp->napi[i].prodring);
4945
4946                 wmb();
4947
4948                 if (std_prod_idx != dpr->rx_std_prod_idx)
4949                         tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG,
4950                                      dpr->rx_std_prod_idx);
4951
4952                 if (jmb_prod_idx != dpr->rx_jmb_prod_idx)
4953                         tw32_rx_mbox(TG3_RX_JMB_PROD_IDX_REG,
4954                                      dpr->rx_jmb_prod_idx);
4955
4956                 mmiowb();
4957
4958                 if (err)
4959                         tw32_f(HOSTCC_MODE, tp->coal_now);
4960         }
4961
4962         return work_done;
4963 }
4964
4965 static int tg3_poll_msix(struct napi_struct *napi, int budget)
4966 {
4967         struct tg3_napi *tnapi = container_of(napi, struct tg3_napi, napi);
4968         struct tg3 *tp = tnapi->tp;
4969         int work_done = 0;
4970         struct tg3_hw_status *sblk = tnapi->hw_status;
4971
4972         while (1) {
4973                 work_done = tg3_poll_work(tnapi, work_done, budget);
4974
4975                 if (unlikely(tp->tg3_flags & TG3_FLAG_TX_RECOVERY_PENDING))
4976                         goto tx_recovery;
4977
4978                 if (unlikely(work_done >= budget))
4979                         break;
4980
4981                 /* tp->last_tag is used in tg3_restart_ints() below
4982                  * to tell the hw how much work has been processed,
4983                  * so we must read it before checking for more work.
4984                  */
4985                 tnapi->last_tag = sblk->status_tag;
4986                 tnapi->last_irq_tag = tnapi->last_tag;
4987                 rmb();
4988
4989                 /* check for RX/TX work to do */
4990                 if (sblk->idx[0].tx_consumer == tnapi->tx_cons &&
4991                     *(tnapi->rx_rcb_prod_idx) == tnapi->rx_rcb_ptr) {
4992                         napi_complete(napi);
4993                         /* Reenable interrupts. */
4994                         tw32_mailbox(tnapi->int_mbox, tnapi->last_tag << 24);
4995                         mmiowb();
4996                         break;
4997                 }
4998         }
4999
5000         return work_done;
5001
5002 tx_recovery:
5003         /* work_done is guaranteed to be less than budget. */
5004         napi_complete(napi);
5005         schedule_work(&tp->reset_task);
5006         return work_done;
5007 }
5008
5009 static int tg3_poll(struct napi_struct *napi, int budget)
5010 {
5011         struct tg3_napi *tnapi = container_of(napi, struct tg3_napi, napi);
5012         struct tg3 *tp = tnapi->tp;
5013         int work_done = 0;
5014         struct tg3_hw_status *sblk = tnapi->hw_status;
5015
5016         while (1) {
5017                 tg3_poll_link(tp);
5018
5019                 work_done = tg3_poll_work(tnapi, work_done, budget);
5020
5021                 if (unlikely(tp->tg3_flags & TG3_FLAG_TX_RECOVERY_PENDING))
5022                         goto tx_recovery;
5023
5024                 if (unlikely(work_done >= budget))
5025                         break;
5026
5027                 if (tp->tg3_flags & TG3_FLAG_TAGGED_STATUS) {
5028                         /* tp->last_tag is used in tg3_int_reenable() below
5029                          * to tell the hw how much work has been processed,
5030                          * so we must read it before checking for more work.
5031                          */
5032                         tnapi->last_tag = sblk->status_tag;
5033                         tnapi->last_irq_tag = tnapi->last_tag;
5034                         rmb();
5035                 } else
5036                         sblk->status &= ~SD_STATUS_UPDATED;
5037
5038                 if (likely(!tg3_has_work(tnapi))) {
5039                         napi_complete(napi);
5040                         tg3_int_reenable(tnapi);
5041                         break;
5042                 }
5043         }
5044
5045         return work_done;
5046
5047 tx_recovery:
5048         /* work_done is guaranteed to be less than budget. */
5049         napi_complete(napi);
5050         schedule_work(&tp->reset_task);
5051         return work_done;
5052 }
5053
5054 static void tg3_irq_quiesce(struct tg3 *tp)
5055 {
5056         int i;
5057
5058         BUG_ON(tp->irq_sync);
5059
5060         tp->irq_sync = 1;
5061         smp_mb();
5062
5063         for (i = 0; i < tp->irq_cnt; i++)
5064                 synchronize_irq(tp->napi[i].irq_vec);
5065 }
5066
5067 static inline int tg3_irq_sync(struct tg3 *tp)
5068 {
5069         return tp->irq_sync;
5070 }
5071
5072 /* Fully shutdown all tg3 driver activity elsewhere in the system.
5073  * If irq_sync is non-zero, then the IRQ handler must be synchronized
5074  * with as well.  Most of the time, this is not necessary except when
5075  * shutting down the device.
5076  */
5077 static inline void tg3_full_lock(struct tg3 *tp, int irq_sync)
5078 {
5079         spin_lock_bh(&tp->lock);
5080         if (irq_sync)
5081                 tg3_irq_quiesce(tp);
5082 }
5083
5084 static inline void tg3_full_unlock(struct tg3 *tp)
5085 {
5086         spin_unlock_bh(&tp->lock);
5087 }
5088
5089 /* One-shot MSI handler - Chip automatically disables interrupt
5090  * after sending MSI so driver doesn't have to do it.
5091  */
5092 static irqreturn_t tg3_msi_1shot(int irq, void *dev_id)
5093 {
5094         struct tg3_napi *tnapi = dev_id;
5095         struct tg3 *tp = tnapi->tp;
5096
5097         prefetch(tnapi->hw_status);
5098         if (tnapi->rx_rcb)
5099                 prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
5100
5101         if (likely(!tg3_irq_sync(tp)))
5102                 napi_schedule(&tnapi->napi);
5103
5104         return IRQ_HANDLED;
5105 }
5106
5107 /* MSI ISR - No need to check for interrupt sharing and no need to
5108  * flush status block and interrupt mailbox. PCI ordering rules
5109  * guarantee that MSI will arrive after the status block.
5110  */
5111 static irqreturn_t tg3_msi(int irq, void *dev_id)
5112 {
5113         struct tg3_napi *tnapi = dev_id;
5114         struct tg3 *tp = tnapi->tp;
5115
5116         prefetch(tnapi->hw_status);
5117         if (tnapi->rx_rcb)
5118                 prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
5119         /*
5120          * Writing any value to intr-mbox-0 clears PCI INTA# and
5121          * chip-internal interrupt pending events.
5122          * Writing non-zero to intr-mbox-0 additional tells the
5123          * NIC to stop sending us irqs, engaging "in-intr-handler"
5124          * event coalescing.
5125          */
5126         tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
5127         if (likely(!tg3_irq_sync(tp)))
5128                 napi_schedule(&tnapi->napi);
5129
5130         return IRQ_RETVAL(1);
5131 }
5132
5133 static irqreturn_t tg3_interrupt(int irq, void *dev_id)
5134 {
5135         struct tg3_napi *tnapi = dev_id;
5136         struct tg3 *tp = tnapi->tp;
5137         struct tg3_hw_status *sblk = tnapi->hw_status;
5138         unsigned int handled = 1;
5139
5140         /* In INTx mode, it is possible for the interrupt to arrive at
5141          * the CPU before the status block posted prior to the interrupt.
5142          * Reading the PCI State register will confirm whether the
5143          * interrupt is ours and will flush the status block.
5144          */
5145         if (unlikely(!(sblk->status & SD_STATUS_UPDATED))) {
5146                 if ((tp->tg3_flags & TG3_FLAG_CHIP_RESETTING) ||
5147                     (tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
5148                         handled = 0;
5149                         goto out;
5150                 }
5151         }
5152
5153         /*
5154          * Writing any value to intr-mbox-0 clears PCI INTA# and
5155          * chip-internal interrupt pending events.
5156          * Writing non-zero to intr-mbox-0 additional tells the
5157          * NIC to stop sending us irqs, engaging "in-intr-handler"
5158          * event coalescing.
5159          *
5160          * Flush the mailbox to de-assert the IRQ immediately to prevent
5161          * spurious interrupts.  The flush impacts performance but
5162          * excessive spurious interrupts can be worse in some cases.
5163          */
5164         tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
5165         if (tg3_irq_sync(tp))
5166                 goto out;
5167         sblk->status &= ~SD_STATUS_UPDATED;
5168         if (likely(tg3_has_work(tnapi))) {
5169                 prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
5170                 napi_schedule(&tnapi->napi);
5171         } else {
5172                 /* No work, shared interrupt perhaps?  re-enable
5173                  * interrupts, and flush that PCI write
5174                  */
5175                 tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
5176                                0x00000000);
5177         }
5178 out:
5179         return IRQ_RETVAL(handled);
5180 }
5181
5182 static irqreturn_t tg3_interrupt_tagged(int irq, void *dev_id)
5183 {
5184         struct tg3_napi *tnapi = dev_id;
5185         struct tg3 *tp = tnapi->tp;
5186         struct tg3_hw_status *sblk = tnapi->hw_status;
5187         unsigned int handled = 1;
5188
5189         /* In INTx mode, it is possible for the interrupt to arrive at
5190          * the CPU before the status block posted prior to the interrupt.
5191          * Reading the PCI State register will confirm whether the
5192          * interrupt is ours and will flush the status block.
5193          */
5194         if (unlikely(sblk->status_tag == tnapi->last_irq_tag)) {
5195                 if ((tp->tg3_flags & TG3_FLAG_CHIP_RESETTING) ||
5196                     (tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
5197                         handled = 0;
5198                         goto out;
5199                 }
5200         }
5201
5202         /*
5203          * writing any value to intr-mbox-0 clears PCI INTA# and
5204          * chip-internal interrupt pending events.
5205          * writing non-zero to intr-mbox-0 additional tells the
5206          * NIC to stop sending us irqs, engaging "in-intr-handler"
5207          * event coalescing.
5208          *
5209          * Flush the mailbox to de-assert the IRQ immediately to prevent
5210          * spurious interrupts.  The flush impacts performance but
5211          * excessive spurious interrupts can be worse in some cases.
5212          */
5213         tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
5214
5215         /*
5216          * In a shared interrupt configuration, sometimes other devices'
5217          * interrupts will scream.  We record the current status tag here
5218          * so that the above check can report that the screaming interrupts
5219          * are unhandled.  Eventually they will be silenced.
5220          */
5221         tnapi->last_irq_tag = sblk->status_tag;
5222
5223         if (tg3_irq_sync(tp))
5224                 goto out;
5225
5226         prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
5227
5228         napi_schedule(&tnapi->napi);
5229
5230 out:
5231         return IRQ_RETVAL(handled);
5232 }
5233
5234 /* ISR for interrupt test */
5235 static irqreturn_t tg3_test_isr(int irq, void *dev_id)
5236 {
5237         struct tg3_napi *tnapi = dev_id;
5238         struct tg3 *tp = tnapi->tp;
5239         struct tg3_hw_status *sblk = tnapi->hw_status;
5240
5241         if ((sblk->status & SD_STATUS_UPDATED) ||
5242             !(tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
5243                 tg3_disable_ints(tp);
5244                 return IRQ_RETVAL(1);
5245         }
5246         return IRQ_RETVAL(0);
5247 }
5248
5249 static int tg3_init_hw(struct tg3 *, int);
5250 static int tg3_halt(struct tg3 *, int, int);
5251
5252 /* Restart hardware after configuration changes, self-test, etc.
5253  * Invoked with tp->lock held.
5254  */
5255 static int tg3_restart_hw(struct tg3 *tp, int reset_phy)
5256         __releases(tp->lock)
5257         __acquires(tp->lock)
5258 {
5259         int err;
5260
5261         err = tg3_init_hw(tp, reset_phy);
5262         if (err) {
5263                 netdev_err(tp->dev, "Failed to re-initialize device, aborting\n");
5264                 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
5265                 tg3_full_unlock(tp);
5266                 del_timer_sync(&tp->timer);
5267                 tp->irq_sync = 0;
5268                 tg3_napi_enable(tp);
5269                 dev_close(tp->dev);
5270                 tg3_full_lock(tp, 0);
5271         }
5272         return err;
5273 }
5274
5275 #ifdef CONFIG_NET_POLL_CONTROLLER
5276 static void tg3_poll_controller(struct net_device *dev)
5277 {
5278         int i;
5279         struct tg3 *tp = netdev_priv(dev);
5280
5281         for (i = 0; i < tp->irq_cnt; i++)
5282                 tg3_interrupt(tp->napi[i].irq_vec, &tp->napi[i]);
5283 }
5284 #endif
5285
5286 static void tg3_reset_task(struct work_struct *work)
5287 {
5288         struct tg3 *tp = container_of(work, struct tg3, reset_task);
5289         int err;
5290         unsigned int restart_timer;
5291
5292         tg3_full_lock(tp, 0);
5293
5294         if (!netif_running(tp->dev)) {
5295                 tg3_full_unlock(tp);
5296                 return;
5297         }
5298
5299         tg3_full_unlock(tp);
5300
5301         tg3_phy_stop(tp);
5302
5303         tg3_netif_stop(tp);
5304
5305         tg3_full_lock(tp, 1);
5306
5307         restart_timer = tp->tg3_flags2 & TG3_FLG2_RESTART_TIMER;
5308         tp->tg3_flags2 &= ~TG3_FLG2_RESTART_TIMER;
5309
5310         if (tp->tg3_flags & TG3_FLAG_TX_RECOVERY_PENDING) {
5311                 tp->write32_tx_mbox = tg3_write32_tx_mbox;
5312                 tp->write32_rx_mbox = tg3_write_flush_reg32;
5313                 tp->tg3_flags |= TG3_FLAG_MBOX_WRITE_REORDER;
5314                 tp->tg3_flags &= ~TG3_FLAG_TX_RECOVERY_PENDING;
5315         }
5316
5317         tg3_halt(tp, RESET_KIND_SHUTDOWN, 0);
5318         err = tg3_init_hw(tp, 1);
5319         if (err)
5320                 goto out;
5321
5322         tg3_netif_start(tp);
5323
5324         if (restart_timer)
5325                 mod_timer(&tp->timer, jiffies + 1);
5326
5327 out:
5328         tg3_full_unlock(tp);
5329
5330         if (!err)
5331                 tg3_phy_start(tp);
5332 }
5333
5334 static void tg3_dump_short_state(struct tg3 *tp)
5335 {
5336         netdev_err(tp->dev, "DEBUG: MAC_TX_STATUS[%08x] MAC_RX_STATUS[%08x]\n",
5337                    tr32(MAC_TX_STATUS), tr32(MAC_RX_STATUS));
5338         netdev_err(tp->dev, "DEBUG: RDMAC_STATUS[%08x] WDMAC_STATUS[%08x]\n",
5339                    tr32(RDMAC_STATUS), tr32(WDMAC_STATUS));
5340 }
5341
5342 static void tg3_tx_timeout(struct net_device *dev)
5343 {
5344         struct tg3 *tp = netdev_priv(dev);
5345
5346         if (netif_msg_tx_err(tp)) {
5347                 netdev_err(dev, "transmit timed out, resetting\n");
5348                 tg3_dump_short_state(tp);
5349         }
5350
5351         schedule_work(&tp->reset_task);
5352 }
5353
5354 /* Test for DMA buffers crossing any 4GB boundaries: 4G, 8G, etc */
5355 static inline int tg3_4g_overflow_test(dma_addr_t mapping, int len)
5356 {
5357         u32 base = (u32) mapping & 0xffffffff;
5358
5359         return ((base > 0xffffdcc0) &&
5360                 (base + len + 8 < base));
5361 }
5362
5363 /* Test for DMA addresses > 40-bit */
5364 static inline int tg3_40bit_overflow_test(struct tg3 *tp, dma_addr_t mapping,
5365                                           int len)
5366 {
5367 #if defined(CONFIG_HIGHMEM) && (BITS_PER_LONG == 64)
5368         if (tp->tg3_flags & TG3_FLAG_40BIT_DMA_BUG)
5369                 return (((u64) mapping + len) > DMA_BIT_MASK(40));
5370         return 0;
5371 #else
5372         return 0;
5373 #endif
5374 }
5375
5376 static void tg3_set_txd(struct tg3_napi *, int, dma_addr_t, int, u32, u32);
5377
5378 /* Workaround 4GB and 40-bit hardware DMA bugs. */
5379 static int tigon3_dma_hwbug_workaround(struct tg3_napi *tnapi,
5380                                        struct sk_buff *skb, u32 last_plus_one,
5381                                        u32 *start, u32 base_flags, u32 mss)
5382 {
5383         struct tg3 *tp = tnapi->tp;
5384         struct sk_buff *new_skb;
5385         dma_addr_t new_addr = 0;
5386         u32 entry = *start;
5387         int i, ret = 0;
5388
5389         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701)
5390                 new_skb = skb_copy(skb, GFP_ATOMIC);
5391         else {
5392                 int more_headroom = 4 - ((unsigned long)skb->data & 3);
5393
5394                 new_skb = skb_copy_expand(skb,
5395                                           skb_headroom(skb) + more_headroom,
5396                                           skb_tailroom(skb), GFP_ATOMIC);
5397         }
5398
5399         if (!new_skb) {
5400                 ret = -1;
5401         } else {
5402                 /* New SKB is guaranteed to be linear. */
5403                 entry = *start;
5404                 new_addr = pci_map_single(tp->pdev, new_skb->data, new_skb->len,
5405                                           PCI_DMA_TODEVICE);
5406                 /* Make sure the mapping succeeded */
5407                 if (pci_dma_mapping_error(tp->pdev, new_addr)) {
5408                         ret = -1;
5409                         dev_kfree_skb(new_skb);
5410                         new_skb = NULL;
5411
5412                 /* Make sure new skb does not cross any 4G boundaries.
5413                  * Drop the packet if it does.
5414                  */
5415                 } else if ((tp->tg3_flags3 & TG3_FLG3_4G_DMA_BNDRY_BUG) &&
5416                             tg3_4g_overflow_test(new_addr, new_skb->len)) {
5417                         pci_unmap_single(tp->pdev, new_addr, new_skb->len,
5418                                          PCI_DMA_TODEVICE);
5419                         ret = -1;
5420                         dev_kfree_skb(new_skb);
5421                         new_skb = NULL;
5422                 } else {
5423                         tg3_set_txd(tnapi, entry, new_addr, new_skb->len,
5424                                     base_flags, 1 | (mss << 1));
5425                         *start = NEXT_TX(entry);
5426                 }
5427         }
5428
5429         /* Now clean up the sw ring entries. */
5430         i = 0;
5431         while (entry != last_plus_one) {
5432                 int len;
5433
5434                 if (i == 0)
5435                         len = skb_headlen(skb);
5436                 else
5437                         len = skb_shinfo(skb)->frags[i-1].size;
5438
5439                 pci_unmap_single(tp->pdev,
5440                                  pci_unmap_addr(&tnapi->tx_buffers[entry],
5441                                                 mapping),
5442                                  len, PCI_DMA_TODEVICE);
5443                 if (i == 0) {
5444                         tnapi->tx_buffers[entry].skb = new_skb;
5445                         pci_unmap_addr_set(&tnapi->tx_buffers[entry], mapping,
5446                                            new_addr);
5447                 } else {
5448                         tnapi->tx_buffers[entry].skb = NULL;
5449                 }
5450                 entry = NEXT_TX(entry);
5451                 i++;
5452         }
5453
5454         dev_kfree_skb(skb);
5455
5456         return ret;
5457 }
5458
5459 static void tg3_set_txd(struct tg3_napi *tnapi, int entry,
5460                         dma_addr_t mapping, int len, u32 flags,
5461                         u32 mss_and_is_end)
5462 {
5463         struct tg3_tx_buffer_desc *txd = &tnapi->tx_ring[entry];
5464         int is_end = (mss_and_is_end & 0x1);
5465         u32 mss = (mss_and_is_end >> 1);
5466         u32 vlan_tag = 0;
5467
5468         if (is_end)
5469                 flags |= TXD_FLAG_END;
5470         if (flags & TXD_FLAG_VLAN) {
5471                 vlan_tag = flags >> 16;
5472                 flags &= 0xffff;
5473         }
5474         vlan_tag |= (mss << TXD_MSS_SHIFT);
5475
5476         txd->addr_hi = ((u64) mapping >> 32);
5477         txd->addr_lo = ((u64) mapping & 0xffffffff);
5478         txd->len_flags = (len << TXD_LEN_SHIFT) | flags;
5479         txd->vlan_tag = vlan_tag << TXD_VLAN_TAG_SHIFT;
5480 }
5481
5482 /* hard_start_xmit for devices that don't have any bugs and
5483  * support TG3_FLG2_HW_TSO_2 and TG3_FLG2_HW_TSO_3 only.
5484  */
5485 static netdev_tx_t tg3_start_xmit(struct sk_buff *skb,
5486                                   struct net_device *dev)
5487 {
5488         struct tg3 *tp = netdev_priv(dev);
5489         u32 len, entry, base_flags, mss;
5490         dma_addr_t mapping;
5491         struct tg3_napi *tnapi;
5492         struct netdev_queue *txq;
5493         unsigned int i, last;
5494
5495
5496         txq = netdev_get_tx_queue(dev, skb_get_queue_mapping(skb));
5497         tnapi = &tp->napi[skb_get_queue_mapping(skb)];
5498         if (tp->tg3_flags3 & TG3_FLG3_ENABLE_TSS)
5499                 tnapi++;
5500
5501         /* We are running in BH disabled context with netif_tx_lock
5502          * and TX reclaim runs via tp->napi.poll inside of a software
5503          * interrupt.  Furthermore, IRQ processing runs lockless so we have
5504          * no IRQ context deadlocks to worry about either.  Rejoice!
5505          */
5506         if (unlikely(tg3_tx_avail(tnapi) <= (skb_shinfo(skb)->nr_frags + 1))) {
5507                 if (!netif_tx_queue_stopped(txq)) {
5508                         netif_tx_stop_queue(txq);
5509
5510                         /* This is a hard error, log it. */
5511                         netdev_err(dev, "BUG! Tx Ring full when queue awake!\n");
5512                 }
5513                 return NETDEV_TX_BUSY;
5514         }
5515
5516         entry = tnapi->tx_prod;
5517         base_flags = 0;
5518         mss = 0;
5519         if ((mss = skb_shinfo(skb)->gso_size) != 0) {
5520                 int tcp_opt_len, ip_tcp_len;
5521                 u32 hdrlen;
5522
5523                 if (skb_header_cloned(skb) &&
5524                     pskb_expand_head(skb, 0, 0, GFP_ATOMIC)) {
5525                         dev_kfree_skb(skb);
5526                         goto out_unlock;
5527                 }
5528
5529                 if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6)
5530                         hdrlen = skb_headlen(skb) - ETH_HLEN;
5531                 else {
5532                         struct iphdr *iph = ip_hdr(skb);
5533
5534                         tcp_opt_len = tcp_optlen(skb);
5535                         ip_tcp_len = ip_hdrlen(skb) + sizeof(struct tcphdr);
5536
5537                         iph->check = 0;
5538                         iph->tot_len = htons(mss + ip_tcp_len + tcp_opt_len);
5539                         hdrlen = ip_tcp_len + tcp_opt_len;
5540                 }
5541
5542                 if (tp->tg3_flags2 & TG3_FLG2_HW_TSO_3) {
5543                         mss |= (hdrlen & 0xc) << 12;
5544                         if (hdrlen & 0x10)
5545                                 base_flags |= 0x00000010;
5546                         base_flags |= (hdrlen & 0x3e0) << 5;
5547                 } else
5548                         mss |= hdrlen << 9;
5549
5550                 base_flags |= (TXD_FLAG_CPU_PRE_DMA |
5551                                TXD_FLAG_CPU_POST_DMA);
5552
5553                 tcp_hdr(skb)->check = 0;
5554
5555         }
5556         else if (skb->ip_summed == CHECKSUM_PARTIAL)
5557                 base_flags |= TXD_FLAG_TCPUDP_CSUM;
5558 #if TG3_VLAN_TAG_USED
5559         if (tp->vlgrp != NULL && vlan_tx_tag_present(skb))
5560                 base_flags |= (TXD_FLAG_VLAN |
5561                                (vlan_tx_tag_get(skb) << 16));
5562 #endif
5563
5564         len = skb_headlen(skb);
5565
5566         /* Queue skb data, a.k.a. the main skb fragment. */
5567         mapping = pci_map_single(tp->pdev, skb->data, len, PCI_DMA_TODEVICE);
5568         if (pci_dma_mapping_error(tp->pdev, mapping)) {
5569                 dev_kfree_skb(skb);
5570                 goto out_unlock;
5571         }
5572
5573         tnapi->tx_buffers[entry].skb = skb;
5574         pci_unmap_addr_set(&tnapi->tx_buffers[entry], mapping, mapping);
5575
5576         if ((tp->tg3_flags3 & TG3_FLG3_USE_JUMBO_BDFLAG) &&
5577             !mss && skb->len > ETH_DATA_LEN)
5578                 base_flags |= TXD_FLAG_JMB_PKT;
5579
5580         tg3_set_txd(tnapi, entry, mapping, len, base_flags,
5581                     (skb_shinfo(skb)->nr_frags == 0) | (mss << 1));
5582
5583         entry = NEXT_TX(entry);
5584
5585         /* Now loop through additional data fragments, and queue them. */
5586         if (skb_shinfo(skb)->nr_frags > 0) {
5587                 last = skb_shinfo(skb)->nr_frags - 1;
5588                 for (i = 0; i <= last; i++) {
5589                         skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
5590
5591                         len = frag->size;
5592                         mapping = pci_map_page(tp->pdev,
5593                                                frag->page,
5594                                                frag->page_offset,
5595                                                len, PCI_DMA_TODEVICE);
5596                         if (pci_dma_mapping_error(tp->pdev, mapping))
5597                                 goto dma_error;
5598
5599                         tnapi->tx_buffers[entry].skb = NULL;
5600                         pci_unmap_addr_set(&tnapi->tx_buffers[entry], mapping,
5601                                            mapping);
5602
5603                         tg3_set_txd(tnapi, entry, mapping, len,
5604                                     base_flags, (i == last) | (mss << 1));
5605
5606                         entry = NEXT_TX(entry);
5607                 }
5608         }
5609
5610         /* Packets are ready, update Tx producer idx local and on card. */
5611         tw32_tx_mbox(tnapi->prodmbox, entry);
5612
5613         tnapi->tx_prod = entry;
5614         if (unlikely(tg3_tx_avail(tnapi) <= (MAX_SKB_FRAGS + 1))) {
5615                 netif_tx_stop_queue(txq);
5616                 if (tg3_tx_avail(tnapi) > TG3_TX_WAKEUP_THRESH(tnapi))
5617                         netif_tx_wake_queue(txq);
5618         }
5619
5620 out_unlock:
5621         mmiowb();
5622
5623         return NETDEV_TX_OK;
5624
5625 dma_error:
5626         last = i;
5627         entry = tnapi->tx_prod;
5628         tnapi->tx_buffers[entry].skb = NULL;
5629         pci_unmap_single(tp->pdev,
5630                          pci_unmap_addr(&tnapi->tx_buffers[entry], mapping),
5631                          skb_headlen(skb),
5632                          PCI_DMA_TODEVICE);
5633         for (i = 0; i <= last; i++) {
5634                 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
5635                 entry = NEXT_TX(entry);
5636
5637                 pci_unmap_page(tp->pdev,
5638                                pci_unmap_addr(&tnapi->tx_buffers[entry],
5639                                               mapping),
5640                                frag->size, PCI_DMA_TODEVICE);
5641         }
5642
5643         dev_kfree_skb(skb);
5644         return NETDEV_TX_OK;
5645 }
5646
5647 static netdev_tx_t tg3_start_xmit_dma_bug(struct sk_buff *,
5648                                           struct net_device *);
5649
5650 /* Use GSO to workaround a rare TSO bug that may be triggered when the
5651  * TSO header is greater than 80 bytes.
5652  */
5653 static int tg3_tso_bug(struct tg3 *tp, struct sk_buff *skb)
5654 {
5655         struct sk_buff *segs, *nskb;
5656         u32 frag_cnt_est = skb_shinfo(skb)->gso_segs * 3;
5657
5658         /* Estimate the number of fragments in the worst case */
5659         if (unlikely(tg3_tx_avail(&tp->napi[0]) <= frag_cnt_est)) {
5660                 netif_stop_queue(tp->dev);
5661                 if (tg3_tx_avail(&tp->napi[0]) <= frag_cnt_est)
5662                         return NETDEV_TX_BUSY;
5663
5664                 netif_wake_queue(tp->dev);
5665         }
5666
5667         segs = skb_gso_segment(skb, tp->dev->features & ~NETIF_F_TSO);
5668         if (IS_ERR(segs))
5669                 goto tg3_tso_bug_end;
5670
5671         do {
5672                 nskb = segs;
5673                 segs = segs->next;
5674                 nskb->next = NULL;
5675                 tg3_start_xmit_dma_bug(nskb, tp->dev);
5676         } while (segs);
5677
5678 tg3_tso_bug_end:
5679         dev_kfree_skb(skb);
5680
5681         return NETDEV_TX_OK;
5682 }
5683
5684 /* hard_start_xmit for devices that have the 4G bug and/or 40-bit bug and
5685  * support TG3_FLG2_HW_TSO_1 or firmware TSO only.
5686  */
5687 static netdev_tx_t tg3_start_xmit_dma_bug(struct sk_buff *skb,
5688                                           struct net_device *dev)
5689 {
5690         struct tg3 *tp = netdev_priv(dev);
5691         u32 len, entry, base_flags, mss;
5692         int would_hit_hwbug;
5693         dma_addr_t mapping;
5694         struct tg3_napi *tnapi;
5695         struct netdev_queue *txq;
5696         unsigned int i, last;
5697
5698
5699         txq = netdev_get_tx_queue(dev, skb_get_queue_mapping(skb));
5700         tnapi = &tp->napi[skb_get_queue_mapping(skb)];
5701         if (tp->tg3_flags3 & TG3_FLG3_ENABLE_TSS)
5702                 tnapi++;
5703
5704         /* We are running in BH disabled context with netif_tx_lock
5705          * and TX reclaim runs via tp->napi.poll inside of a software
5706          * interrupt.  Furthermore, IRQ processing runs lockless so we have
5707          * no IRQ context deadlocks to worry about either.  Rejoice!
5708          */
5709         if (unlikely(tg3_tx_avail(tnapi) <= (skb_shinfo(skb)->nr_frags + 1))) {
5710                 if (!netif_tx_queue_stopped(txq)) {
5711                         netif_tx_stop_queue(txq);
5712
5713                         /* This is a hard error, log it. */
5714                         netdev_err(dev, "BUG! Tx Ring full when queue awake!\n");
5715                 }
5716                 return NETDEV_TX_BUSY;
5717         }
5718
5719         entry = tnapi->tx_prod;
5720         base_flags = 0;
5721         if (skb->ip_summed == CHECKSUM_PARTIAL)
5722                 base_flags |= TXD_FLAG_TCPUDP_CSUM;
5723
5724         if ((mss = skb_shinfo(skb)->gso_size) != 0) {
5725                 struct iphdr *iph;
5726                 u32 tcp_opt_len, ip_tcp_len, hdr_len;
5727
5728                 if (skb_header_cloned(skb) &&
5729                     pskb_expand_head(skb, 0, 0, GFP_ATOMIC)) {
5730                         dev_kfree_skb(skb);
5731                         goto out_unlock;
5732                 }
5733
5734                 tcp_opt_len = tcp_optlen(skb);
5735                 ip_tcp_len = ip_hdrlen(skb) + sizeof(struct tcphdr);
5736
5737                 hdr_len = ip_tcp_len + tcp_opt_len;
5738                 if (unlikely((ETH_HLEN + hdr_len) > 80) &&
5739                              (tp->tg3_flags2 & TG3_FLG2_TSO_BUG))
5740                         return (tg3_tso_bug(tp, skb));
5741
5742                 base_flags |= (TXD_FLAG_CPU_PRE_DMA |
5743                                TXD_FLAG_CPU_POST_DMA);
5744
5745                 iph = ip_hdr(skb);
5746                 iph->check = 0;
5747                 iph->tot_len = htons(mss + hdr_len);
5748                 if (tp->tg3_flags2 & TG3_FLG2_HW_TSO) {
5749                         tcp_hdr(skb)->check = 0;
5750                         base_flags &= ~TXD_FLAG_TCPUDP_CSUM;
5751                 } else
5752                         tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr,
5753                                                                  iph->daddr, 0,
5754                                                                  IPPROTO_TCP,
5755                                                                  0);
5756
5757                 if (tp->tg3_flags2 & TG3_FLG2_HW_TSO_3) {
5758