net: convert print_mac to %pM
[linux-2.6.git] / drivers / net / tg3.c
1 /*
2  * tg3.c: Broadcom Tigon3 ethernet driver.
3  *
4  * Copyright (C) 2001, 2002, 2003, 2004 David S. Miller (davem@redhat.com)
5  * Copyright (C) 2001, 2002, 2003 Jeff Garzik (jgarzik@pobox.com)
6  * Copyright (C) 2004 Sun Microsystems Inc.
7  * Copyright (C) 2005-2007 Broadcom Corporation.
8  *
9  * Firmware is:
10  *      Derived from proprietary unpublished source code,
11  *      Copyright (C) 2000-2003 Broadcom Corporation.
12  *
13  *      Permission is hereby granted for the distribution of this firmware
14  *      data in hexadecimal or equivalent format, provided this copyright
15  *      notice is accompanying it.
16  */
17
18
19 #include <linux/module.h>
20 #include <linux/moduleparam.h>
21 #include <linux/kernel.h>
22 #include <linux/types.h>
23 #include <linux/compiler.h>
24 #include <linux/slab.h>
25 #include <linux/delay.h>
26 #include <linux/in.h>
27 #include <linux/init.h>
28 #include <linux/ioport.h>
29 #include <linux/pci.h>
30 #include <linux/netdevice.h>
31 #include <linux/etherdevice.h>
32 #include <linux/skbuff.h>
33 #include <linux/ethtool.h>
34 #include <linux/mii.h>
35 #include <linux/phy.h>
36 #include <linux/brcmphy.h>
37 #include <linux/if_vlan.h>
38 #include <linux/ip.h>
39 #include <linux/tcp.h>
40 #include <linux/workqueue.h>
41 #include <linux/prefetch.h>
42 #include <linux/dma-mapping.h>
43
44 #include <net/checksum.h>
45 #include <net/ip.h>
46
47 #include <asm/system.h>
48 #include <asm/io.h>
49 #include <asm/byteorder.h>
50 #include <asm/uaccess.h>
51
52 #ifdef CONFIG_SPARC
53 #include <asm/idprom.h>
54 #include <asm/prom.h>
55 #endif
56
57 #if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE)
58 #define TG3_VLAN_TAG_USED 1
59 #else
60 #define TG3_VLAN_TAG_USED 0
61 #endif
62
63 #define TG3_TSO_SUPPORT 1
64
65 #include "tg3.h"
66
67 #define DRV_MODULE_NAME         "tg3"
68 #define PFX DRV_MODULE_NAME     ": "
69 #define DRV_MODULE_VERSION      "3.94"
70 #define DRV_MODULE_RELDATE      "August 14, 2008"
71
72 #define TG3_DEF_MAC_MODE        0
73 #define TG3_DEF_RX_MODE         0
74 #define TG3_DEF_TX_MODE         0
75 #define TG3_DEF_MSG_ENABLE        \
76         (NETIF_MSG_DRV          | \
77          NETIF_MSG_PROBE        | \
78          NETIF_MSG_LINK         | \
79          NETIF_MSG_TIMER        | \
80          NETIF_MSG_IFDOWN       | \
81          NETIF_MSG_IFUP         | \
82          NETIF_MSG_RX_ERR       | \
83          NETIF_MSG_TX_ERR)
84
85 /* length of time before we decide the hardware is borked,
86  * and dev->tx_timeout() should be called to fix the problem
87  */
88 #define TG3_TX_TIMEOUT                  (5 * HZ)
89
90 /* hardware minimum and maximum for a single frame's data payload */
91 #define TG3_MIN_MTU                     60
92 #define TG3_MAX_MTU(tp) \
93         ((tp->tg3_flags2 & TG3_FLG2_JUMBO_CAPABLE) ? 9000 : 1500)
94
95 /* These numbers seem to be hard coded in the NIC firmware somehow.
96  * You can't change the ring sizes, but you can change where you place
97  * them in the NIC onboard memory.
98  */
99 #define TG3_RX_RING_SIZE                512
100 #define TG3_DEF_RX_RING_PENDING         200
101 #define TG3_RX_JUMBO_RING_SIZE          256
102 #define TG3_DEF_RX_JUMBO_RING_PENDING   100
103
104 /* Do not place this n-ring entries value into the tp struct itself,
105  * we really want to expose these constants to GCC so that modulo et
106  * al.  operations are done with shifts and masks instead of with
107  * hw multiply/modulo instructions.  Another solution would be to
108  * replace things like '% foo' with '& (foo - 1)'.
109  */
110 #define TG3_RX_RCB_RING_SIZE(tp)        \
111         ((tp->tg3_flags2 & TG3_FLG2_5705_PLUS) ?  512 : 1024)
112
113 #define TG3_TX_RING_SIZE                512
114 #define TG3_DEF_TX_RING_PENDING         (TG3_TX_RING_SIZE - 1)
115
116 #define TG3_RX_RING_BYTES       (sizeof(struct tg3_rx_buffer_desc) * \
117                                  TG3_RX_RING_SIZE)
118 #define TG3_RX_JUMBO_RING_BYTES (sizeof(struct tg3_rx_buffer_desc) * \
119                                  TG3_RX_JUMBO_RING_SIZE)
120 #define TG3_RX_RCB_RING_BYTES(tp) (sizeof(struct tg3_rx_buffer_desc) * \
121                                    TG3_RX_RCB_RING_SIZE(tp))
122 #define TG3_TX_RING_BYTES       (sizeof(struct tg3_tx_buffer_desc) * \
123                                  TG3_TX_RING_SIZE)
124 #define NEXT_TX(N)              (((N) + 1) & (TG3_TX_RING_SIZE - 1))
125
126 #define RX_PKT_BUF_SZ           (1536 + tp->rx_offset + 64)
127 #define RX_JUMBO_PKT_BUF_SZ     (9046 + tp->rx_offset + 64)
128
129 /* minimum number of free TX descriptors required to wake up TX process */
130 #define TG3_TX_WAKEUP_THRESH(tp)                ((tp)->tx_pending / 4)
131
132 /* number of ETHTOOL_GSTATS u64's */
133 #define TG3_NUM_STATS           (sizeof(struct tg3_ethtool_stats)/sizeof(u64))
134
135 #define TG3_NUM_TEST            6
136
137 static char version[] __devinitdata =
138         DRV_MODULE_NAME ".c:v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
139
140 MODULE_AUTHOR("David S. Miller (davem@redhat.com) and Jeff Garzik (jgarzik@pobox.com)");
141 MODULE_DESCRIPTION("Broadcom Tigon3 ethernet driver");
142 MODULE_LICENSE("GPL");
143 MODULE_VERSION(DRV_MODULE_VERSION);
144
145 static int tg3_debug = -1;      /* -1 == use TG3_DEF_MSG_ENABLE as value */
146 module_param(tg3_debug, int, 0);
147 MODULE_PARM_DESC(tg3_debug, "Tigon3 bitmapped debugging message enable value");
148
149 static struct pci_device_id tg3_pci_tbl[] = {
150         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5700)},
151         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5701)},
152         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702)},
153         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703)},
154         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704)},
155         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702FE)},
156         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705)},
157         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705_2)},
158         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705M)},
159         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705M_2)},
160         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702X)},
161         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703X)},
162         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704S)},
163         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702A3)},
164         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703A3)},
165         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5782)},
166         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5788)},
167         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5789)},
168         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5901)},
169         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5901_2)},
170         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704S_2)},
171         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705F)},
172         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5720)},
173         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5721)},
174         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5722)},
175         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5750)},
176         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751)},
177         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5750M)},
178         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751M)},
179         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751F)},
180         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5752)},
181         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5752M)},
182         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753)},
183         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753M)},
184         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753F)},
185         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5754)},
186         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5754M)},
187         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5755)},
188         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5755M)},
189         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5756)},
190         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5786)},
191         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787)},
192         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787M)},
193         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787F)},
194         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5714)},
195         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5714S)},
196         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5715)},
197         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5715S)},
198         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5780)},
199         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5780S)},
200         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5781)},
201         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5906)},
202         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5906M)},
203         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5784)},
204         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5764)},
205         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5723)},
206         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5761)},
207         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5761E)},
208         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5785)},
209         {PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9DXX)},
210         {PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9MXX)},
211         {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1000)},
212         {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1001)},
213         {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1003)},
214         {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC9100)},
215         {PCI_DEVICE(PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_TIGON3)},
216         {}
217 };
218
219 MODULE_DEVICE_TABLE(pci, tg3_pci_tbl);
220
221 static const struct {
222         const char string[ETH_GSTRING_LEN];
223 } ethtool_stats_keys[TG3_NUM_STATS] = {
224         { "rx_octets" },
225         { "rx_fragments" },
226         { "rx_ucast_packets" },
227         { "rx_mcast_packets" },
228         { "rx_bcast_packets" },
229         { "rx_fcs_errors" },
230         { "rx_align_errors" },
231         { "rx_xon_pause_rcvd" },
232         { "rx_xoff_pause_rcvd" },
233         { "rx_mac_ctrl_rcvd" },
234         { "rx_xoff_entered" },
235         { "rx_frame_too_long_errors" },
236         { "rx_jabbers" },
237         { "rx_undersize_packets" },
238         { "rx_in_length_errors" },
239         { "rx_out_length_errors" },
240         { "rx_64_or_less_octet_packets" },
241         { "rx_65_to_127_octet_packets" },
242         { "rx_128_to_255_octet_packets" },
243         { "rx_256_to_511_octet_packets" },
244         { "rx_512_to_1023_octet_packets" },
245         { "rx_1024_to_1522_octet_packets" },
246         { "rx_1523_to_2047_octet_packets" },
247         { "rx_2048_to_4095_octet_packets" },
248         { "rx_4096_to_8191_octet_packets" },
249         { "rx_8192_to_9022_octet_packets" },
250
251         { "tx_octets" },
252         { "tx_collisions" },
253
254         { "tx_xon_sent" },
255         { "tx_xoff_sent" },
256         { "tx_flow_control" },
257         { "tx_mac_errors" },
258         { "tx_single_collisions" },
259         { "tx_mult_collisions" },
260         { "tx_deferred" },
261         { "tx_excessive_collisions" },
262         { "tx_late_collisions" },
263         { "tx_collide_2times" },
264         { "tx_collide_3times" },
265         { "tx_collide_4times" },
266         { "tx_collide_5times" },
267         { "tx_collide_6times" },
268         { "tx_collide_7times" },
269         { "tx_collide_8times" },
270         { "tx_collide_9times" },
271         { "tx_collide_10times" },
272         { "tx_collide_11times" },
273         { "tx_collide_12times" },
274         { "tx_collide_13times" },
275         { "tx_collide_14times" },
276         { "tx_collide_15times" },
277         { "tx_ucast_packets" },
278         { "tx_mcast_packets" },
279         { "tx_bcast_packets" },
280         { "tx_carrier_sense_errors" },
281         { "tx_discards" },
282         { "tx_errors" },
283
284         { "dma_writeq_full" },
285         { "dma_write_prioq_full" },
286         { "rxbds_empty" },
287         { "rx_discards" },
288         { "rx_errors" },
289         { "rx_threshold_hit" },
290
291         { "dma_readq_full" },
292         { "dma_read_prioq_full" },
293         { "tx_comp_queue_full" },
294
295         { "ring_set_send_prod_index" },
296         { "ring_status_update" },
297         { "nic_irqs" },
298         { "nic_avoided_irqs" },
299         { "nic_tx_threshold_hit" }
300 };
301
302 static const struct {
303         const char string[ETH_GSTRING_LEN];
304 } ethtool_test_keys[TG3_NUM_TEST] = {
305         { "nvram test     (online) " },
306         { "link test      (online) " },
307         { "register test  (offline)" },
308         { "memory test    (offline)" },
309         { "loopback test  (offline)" },
310         { "interrupt test (offline)" },
311 };
312
313 static void tg3_write32(struct tg3 *tp, u32 off, u32 val)
314 {
315         writel(val, tp->regs + off);
316 }
317
318 static u32 tg3_read32(struct tg3 *tp, u32 off)
319 {
320         return (readl(tp->regs + off));
321 }
322
323 static void tg3_ape_write32(struct tg3 *tp, u32 off, u32 val)
324 {
325         writel(val, tp->aperegs + off);
326 }
327
328 static u32 tg3_ape_read32(struct tg3 *tp, u32 off)
329 {
330         return (readl(tp->aperegs + off));
331 }
332
333 static void tg3_write_indirect_reg32(struct tg3 *tp, u32 off, u32 val)
334 {
335         unsigned long flags;
336
337         spin_lock_irqsave(&tp->indirect_lock, flags);
338         pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off);
339         pci_write_config_dword(tp->pdev, TG3PCI_REG_DATA, val);
340         spin_unlock_irqrestore(&tp->indirect_lock, flags);
341 }
342
343 static void tg3_write_flush_reg32(struct tg3 *tp, u32 off, u32 val)
344 {
345         writel(val, tp->regs + off);
346         readl(tp->regs + off);
347 }
348
349 static u32 tg3_read_indirect_reg32(struct tg3 *tp, u32 off)
350 {
351         unsigned long flags;
352         u32 val;
353
354         spin_lock_irqsave(&tp->indirect_lock, flags);
355         pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off);
356         pci_read_config_dword(tp->pdev, TG3PCI_REG_DATA, &val);
357         spin_unlock_irqrestore(&tp->indirect_lock, flags);
358         return val;
359 }
360
361 static void tg3_write_indirect_mbox(struct tg3 *tp, u32 off, u32 val)
362 {
363         unsigned long flags;
364
365         if (off == (MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW)) {
366                 pci_write_config_dword(tp->pdev, TG3PCI_RCV_RET_RING_CON_IDX +
367                                        TG3_64BIT_REG_LOW, val);
368                 return;
369         }
370         if (off == (MAILBOX_RCV_STD_PROD_IDX + TG3_64BIT_REG_LOW)) {
371                 pci_write_config_dword(tp->pdev, TG3PCI_STD_RING_PROD_IDX +
372                                        TG3_64BIT_REG_LOW, val);
373                 return;
374         }
375
376         spin_lock_irqsave(&tp->indirect_lock, flags);
377         pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off + 0x5600);
378         pci_write_config_dword(tp->pdev, TG3PCI_REG_DATA, val);
379         spin_unlock_irqrestore(&tp->indirect_lock, flags);
380
381         /* In indirect mode when disabling interrupts, we also need
382          * to clear the interrupt bit in the GRC local ctrl register.
383          */
384         if ((off == (MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW)) &&
385             (val == 0x1)) {
386                 pci_write_config_dword(tp->pdev, TG3PCI_MISC_LOCAL_CTRL,
387                                        tp->grc_local_ctrl|GRC_LCLCTRL_CLEARINT);
388         }
389 }
390
391 static u32 tg3_read_indirect_mbox(struct tg3 *tp, u32 off)
392 {
393         unsigned long flags;
394         u32 val;
395
396         spin_lock_irqsave(&tp->indirect_lock, flags);
397         pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off + 0x5600);
398         pci_read_config_dword(tp->pdev, TG3PCI_REG_DATA, &val);
399         spin_unlock_irqrestore(&tp->indirect_lock, flags);
400         return val;
401 }
402
403 /* usec_wait specifies the wait time in usec when writing to certain registers
404  * where it is unsafe to read back the register without some delay.
405  * GRC_LOCAL_CTRL is one example if the GPIOs are toggled to switch power.
406  * TG3PCI_CLOCK_CTRL is another example if the clock frequencies are changed.
407  */
408 static void _tw32_flush(struct tg3 *tp, u32 off, u32 val, u32 usec_wait)
409 {
410         if ((tp->tg3_flags & TG3_FLAG_PCIX_TARGET_HWBUG) ||
411             (tp->tg3_flags2 & TG3_FLG2_ICH_WORKAROUND))
412                 /* Non-posted methods */
413                 tp->write32(tp, off, val);
414         else {
415                 /* Posted method */
416                 tg3_write32(tp, off, val);
417                 if (usec_wait)
418                         udelay(usec_wait);
419                 tp->read32(tp, off);
420         }
421         /* Wait again after the read for the posted method to guarantee that
422          * the wait time is met.
423          */
424         if (usec_wait)
425                 udelay(usec_wait);
426 }
427
428 static inline void tw32_mailbox_flush(struct tg3 *tp, u32 off, u32 val)
429 {
430         tp->write32_mbox(tp, off, val);
431         if (!(tp->tg3_flags & TG3_FLAG_MBOX_WRITE_REORDER) &&
432             !(tp->tg3_flags2 & TG3_FLG2_ICH_WORKAROUND))
433                 tp->read32_mbox(tp, off);
434 }
435
436 static void tg3_write32_tx_mbox(struct tg3 *tp, u32 off, u32 val)
437 {
438         void __iomem *mbox = tp->regs + off;
439         writel(val, mbox);
440         if (tp->tg3_flags & TG3_FLAG_TXD_MBOX_HWBUG)
441                 writel(val, mbox);
442         if (tp->tg3_flags & TG3_FLAG_MBOX_WRITE_REORDER)
443                 readl(mbox);
444 }
445
446 static u32 tg3_read32_mbox_5906(struct tg3 *tp, u32 off)
447 {
448         return (readl(tp->regs + off + GRCMBOX_BASE));
449 }
450
451 static void tg3_write32_mbox_5906(struct tg3 *tp, u32 off, u32 val)
452 {
453         writel(val, tp->regs + off + GRCMBOX_BASE);
454 }
455
456 #define tw32_mailbox(reg, val)  tp->write32_mbox(tp, reg, val)
457 #define tw32_mailbox_f(reg, val)        tw32_mailbox_flush(tp, (reg), (val))
458 #define tw32_rx_mbox(reg, val)  tp->write32_rx_mbox(tp, reg, val)
459 #define tw32_tx_mbox(reg, val)  tp->write32_tx_mbox(tp, reg, val)
460 #define tr32_mailbox(reg)       tp->read32_mbox(tp, reg)
461
462 #define tw32(reg,val)           tp->write32(tp, reg, val)
463 #define tw32_f(reg,val)         _tw32_flush(tp,(reg),(val), 0)
464 #define tw32_wait_f(reg,val,us) _tw32_flush(tp,(reg),(val), (us))
465 #define tr32(reg)               tp->read32(tp, reg)
466
467 static void tg3_write_mem(struct tg3 *tp, u32 off, u32 val)
468 {
469         unsigned long flags;
470
471         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) &&
472             (off >= NIC_SRAM_STATS_BLK) && (off < NIC_SRAM_TX_BUFFER_DESC))
473                 return;
474
475         spin_lock_irqsave(&tp->indirect_lock, flags);
476         if (tp->tg3_flags & TG3_FLAG_SRAM_USE_CONFIG) {
477                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off);
478                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
479
480                 /* Always leave this as zero. */
481                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
482         } else {
483                 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, off);
484                 tw32_f(TG3PCI_MEM_WIN_DATA, val);
485
486                 /* Always leave this as zero. */
487                 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, 0);
488         }
489         spin_unlock_irqrestore(&tp->indirect_lock, flags);
490 }
491
492 static void tg3_read_mem(struct tg3 *tp, u32 off, u32 *val)
493 {
494         unsigned long flags;
495
496         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) &&
497             (off >= NIC_SRAM_STATS_BLK) && (off < NIC_SRAM_TX_BUFFER_DESC)) {
498                 *val = 0;
499                 return;
500         }
501
502         spin_lock_irqsave(&tp->indirect_lock, flags);
503         if (tp->tg3_flags & TG3_FLAG_SRAM_USE_CONFIG) {
504                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off);
505                 pci_read_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
506
507                 /* Always leave this as zero. */
508                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
509         } else {
510                 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, off);
511                 *val = tr32(TG3PCI_MEM_WIN_DATA);
512
513                 /* Always leave this as zero. */
514                 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, 0);
515         }
516         spin_unlock_irqrestore(&tp->indirect_lock, flags);
517 }
518
519 static void tg3_ape_lock_init(struct tg3 *tp)
520 {
521         int i;
522
523         /* Make sure the driver hasn't any stale locks. */
524         for (i = 0; i < 8; i++)
525                 tg3_ape_write32(tp, TG3_APE_LOCK_GRANT + 4 * i,
526                                 APE_LOCK_GRANT_DRIVER);
527 }
528
529 static int tg3_ape_lock(struct tg3 *tp, int locknum)
530 {
531         int i, off;
532         int ret = 0;
533         u32 status;
534
535         if (!(tp->tg3_flags3 & TG3_FLG3_ENABLE_APE))
536                 return 0;
537
538         switch (locknum) {
539                 case TG3_APE_LOCK_GRC:
540                 case TG3_APE_LOCK_MEM:
541                         break;
542                 default:
543                         return -EINVAL;
544         }
545
546         off = 4 * locknum;
547
548         tg3_ape_write32(tp, TG3_APE_LOCK_REQ + off, APE_LOCK_REQ_DRIVER);
549
550         /* Wait for up to 1 millisecond to acquire lock. */
551         for (i = 0; i < 100; i++) {
552                 status = tg3_ape_read32(tp, TG3_APE_LOCK_GRANT + off);
553                 if (status == APE_LOCK_GRANT_DRIVER)
554                         break;
555                 udelay(10);
556         }
557
558         if (status != APE_LOCK_GRANT_DRIVER) {
559                 /* Revoke the lock request. */
560                 tg3_ape_write32(tp, TG3_APE_LOCK_GRANT + off,
561                                 APE_LOCK_GRANT_DRIVER);
562
563                 ret = -EBUSY;
564         }
565
566         return ret;
567 }
568
569 static void tg3_ape_unlock(struct tg3 *tp, int locknum)
570 {
571         int off;
572
573         if (!(tp->tg3_flags3 & TG3_FLG3_ENABLE_APE))
574                 return;
575
576         switch (locknum) {
577                 case TG3_APE_LOCK_GRC:
578                 case TG3_APE_LOCK_MEM:
579                         break;
580                 default:
581                         return;
582         }
583
584         off = 4 * locknum;
585         tg3_ape_write32(tp, TG3_APE_LOCK_GRANT + off, APE_LOCK_GRANT_DRIVER);
586 }
587
588 static void tg3_disable_ints(struct tg3 *tp)
589 {
590         tw32(TG3PCI_MISC_HOST_CTRL,
591              (tp->misc_host_ctrl | MISC_HOST_CTRL_MASK_PCI_INT));
592         tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
593 }
594
595 static inline void tg3_cond_int(struct tg3 *tp)
596 {
597         if (!(tp->tg3_flags & TG3_FLAG_TAGGED_STATUS) &&
598             (tp->hw_status->status & SD_STATUS_UPDATED))
599                 tw32(GRC_LOCAL_CTRL, tp->grc_local_ctrl | GRC_LCLCTRL_SETINT);
600         else
601                 tw32(HOSTCC_MODE, tp->coalesce_mode |
602                      (HOSTCC_MODE_ENABLE | HOSTCC_MODE_NOW));
603 }
604
605 static void tg3_enable_ints(struct tg3 *tp)
606 {
607         tp->irq_sync = 0;
608         wmb();
609
610         tw32(TG3PCI_MISC_HOST_CTRL,
611              (tp->misc_host_ctrl & ~MISC_HOST_CTRL_MASK_PCI_INT));
612         tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
613                        (tp->last_tag << 24));
614         if (tp->tg3_flags2 & TG3_FLG2_1SHOT_MSI)
615                 tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
616                                (tp->last_tag << 24));
617         tg3_cond_int(tp);
618 }
619
620 static inline unsigned int tg3_has_work(struct tg3 *tp)
621 {
622         struct tg3_hw_status *sblk = tp->hw_status;
623         unsigned int work_exists = 0;
624
625         /* check for phy events */
626         if (!(tp->tg3_flags &
627               (TG3_FLAG_USE_LINKCHG_REG |
628                TG3_FLAG_POLL_SERDES))) {
629                 if (sblk->status & SD_STATUS_LINK_CHG)
630                         work_exists = 1;
631         }
632         /* check for RX/TX work to do */
633         if (sblk->idx[0].tx_consumer != tp->tx_cons ||
634             sblk->idx[0].rx_producer != tp->rx_rcb_ptr)
635                 work_exists = 1;
636
637         return work_exists;
638 }
639
640 /* tg3_restart_ints
641  *  similar to tg3_enable_ints, but it accurately determines whether there
642  *  is new work pending and can return without flushing the PIO write
643  *  which reenables interrupts
644  */
645 static void tg3_restart_ints(struct tg3 *tp)
646 {
647         tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
648                      tp->last_tag << 24);
649         mmiowb();
650
651         /* When doing tagged status, this work check is unnecessary.
652          * The last_tag we write above tells the chip which piece of
653          * work we've completed.
654          */
655         if (!(tp->tg3_flags & TG3_FLAG_TAGGED_STATUS) &&
656             tg3_has_work(tp))
657                 tw32(HOSTCC_MODE, tp->coalesce_mode |
658                      (HOSTCC_MODE_ENABLE | HOSTCC_MODE_NOW));
659 }
660
661 static inline void tg3_netif_stop(struct tg3 *tp)
662 {
663         tp->dev->trans_start = jiffies; /* prevent tx timeout */
664         napi_disable(&tp->napi);
665         netif_tx_disable(tp->dev);
666 }
667
668 static inline void tg3_netif_start(struct tg3 *tp)
669 {
670         netif_wake_queue(tp->dev);
671         /* NOTE: unconditional netif_wake_queue is only appropriate
672          * so long as all callers are assured to have free tx slots
673          * (such as after tg3_init_hw)
674          */
675         napi_enable(&tp->napi);
676         tp->hw_status->status |= SD_STATUS_UPDATED;
677         tg3_enable_ints(tp);
678 }
679
680 static void tg3_switch_clocks(struct tg3 *tp)
681 {
682         u32 clock_ctrl = tr32(TG3PCI_CLOCK_CTRL);
683         u32 orig_clock_ctrl;
684
685         if ((tp->tg3_flags & TG3_FLAG_CPMU_PRESENT) ||
686             (tp->tg3_flags2 & TG3_FLG2_5780_CLASS))
687                 return;
688
689         orig_clock_ctrl = clock_ctrl;
690         clock_ctrl &= (CLOCK_CTRL_FORCE_CLKRUN |
691                        CLOCK_CTRL_CLKRUN_OENABLE |
692                        0x1f);
693         tp->pci_clock_ctrl = clock_ctrl;
694
695         if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
696                 if (orig_clock_ctrl & CLOCK_CTRL_625_CORE) {
697                         tw32_wait_f(TG3PCI_CLOCK_CTRL,
698                                     clock_ctrl | CLOCK_CTRL_625_CORE, 40);
699                 }
700         } else if ((orig_clock_ctrl & CLOCK_CTRL_44MHZ_CORE) != 0) {
701                 tw32_wait_f(TG3PCI_CLOCK_CTRL,
702                             clock_ctrl |
703                             (CLOCK_CTRL_44MHZ_CORE | CLOCK_CTRL_ALTCLK),
704                             40);
705                 tw32_wait_f(TG3PCI_CLOCK_CTRL,
706                             clock_ctrl | (CLOCK_CTRL_ALTCLK),
707                             40);
708         }
709         tw32_wait_f(TG3PCI_CLOCK_CTRL, clock_ctrl, 40);
710 }
711
712 #define PHY_BUSY_LOOPS  5000
713
714 static int tg3_readphy(struct tg3 *tp, int reg, u32 *val)
715 {
716         u32 frame_val;
717         unsigned int loops;
718         int ret;
719
720         if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
721                 tw32_f(MAC_MI_MODE,
722                      (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
723                 udelay(80);
724         }
725
726         *val = 0x0;
727
728         frame_val  = ((PHY_ADDR << MI_COM_PHY_ADDR_SHIFT) &
729                       MI_COM_PHY_ADDR_MASK);
730         frame_val |= ((reg << MI_COM_REG_ADDR_SHIFT) &
731                       MI_COM_REG_ADDR_MASK);
732         frame_val |= (MI_COM_CMD_READ | MI_COM_START);
733
734         tw32_f(MAC_MI_COM, frame_val);
735
736         loops = PHY_BUSY_LOOPS;
737         while (loops != 0) {
738                 udelay(10);
739                 frame_val = tr32(MAC_MI_COM);
740
741                 if ((frame_val & MI_COM_BUSY) == 0) {
742                         udelay(5);
743                         frame_val = tr32(MAC_MI_COM);
744                         break;
745                 }
746                 loops -= 1;
747         }
748
749         ret = -EBUSY;
750         if (loops != 0) {
751                 *val = frame_val & MI_COM_DATA_MASK;
752                 ret = 0;
753         }
754
755         if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
756                 tw32_f(MAC_MI_MODE, tp->mi_mode);
757                 udelay(80);
758         }
759
760         return ret;
761 }
762
763 static int tg3_writephy(struct tg3 *tp, int reg, u32 val)
764 {
765         u32 frame_val;
766         unsigned int loops;
767         int ret;
768
769         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906 &&
770             (reg == MII_TG3_CTRL || reg == MII_TG3_AUX_CTRL))
771                 return 0;
772
773         if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
774                 tw32_f(MAC_MI_MODE,
775                      (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
776                 udelay(80);
777         }
778
779         frame_val  = ((PHY_ADDR << MI_COM_PHY_ADDR_SHIFT) &
780                       MI_COM_PHY_ADDR_MASK);
781         frame_val |= ((reg << MI_COM_REG_ADDR_SHIFT) &
782                       MI_COM_REG_ADDR_MASK);
783         frame_val |= (val & MI_COM_DATA_MASK);
784         frame_val |= (MI_COM_CMD_WRITE | MI_COM_START);
785
786         tw32_f(MAC_MI_COM, frame_val);
787
788         loops = PHY_BUSY_LOOPS;
789         while (loops != 0) {
790                 udelay(10);
791                 frame_val = tr32(MAC_MI_COM);
792                 if ((frame_val & MI_COM_BUSY) == 0) {
793                         udelay(5);
794                         frame_val = tr32(MAC_MI_COM);
795                         break;
796                 }
797                 loops -= 1;
798         }
799
800         ret = -EBUSY;
801         if (loops != 0)
802                 ret = 0;
803
804         if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
805                 tw32_f(MAC_MI_MODE, tp->mi_mode);
806                 udelay(80);
807         }
808
809         return ret;
810 }
811
812 static int tg3_bmcr_reset(struct tg3 *tp)
813 {
814         u32 phy_control;
815         int limit, err;
816
817         /* OK, reset it, and poll the BMCR_RESET bit until it
818          * clears or we time out.
819          */
820         phy_control = BMCR_RESET;
821         err = tg3_writephy(tp, MII_BMCR, phy_control);
822         if (err != 0)
823                 return -EBUSY;
824
825         limit = 5000;
826         while (limit--) {
827                 err = tg3_readphy(tp, MII_BMCR, &phy_control);
828                 if (err != 0)
829                         return -EBUSY;
830
831                 if ((phy_control & BMCR_RESET) == 0) {
832                         udelay(40);
833                         break;
834                 }
835                 udelay(10);
836         }
837         if (limit <= 0)
838                 return -EBUSY;
839
840         return 0;
841 }
842
843 static int tg3_mdio_read(struct mii_bus *bp, int mii_id, int reg)
844 {
845         struct tg3 *tp = (struct tg3 *)bp->priv;
846         u32 val;
847
848         if (tp->tg3_flags3 & TG3_FLG3_MDIOBUS_PAUSED)
849                 return -EAGAIN;
850
851         if (tg3_readphy(tp, reg, &val))
852                 return -EIO;
853
854         return val;
855 }
856
857 static int tg3_mdio_write(struct mii_bus *bp, int mii_id, int reg, u16 val)
858 {
859         struct tg3 *tp = (struct tg3 *)bp->priv;
860
861         if (tp->tg3_flags3 & TG3_FLG3_MDIOBUS_PAUSED)
862                 return -EAGAIN;
863
864         if (tg3_writephy(tp, reg, val))
865                 return -EIO;
866
867         return 0;
868 }
869
870 static int tg3_mdio_reset(struct mii_bus *bp)
871 {
872         return 0;
873 }
874
875 static void tg3_mdio_config(struct tg3 *tp)
876 {
877         u32 val;
878
879         if (tp->mdio_bus->phy_map[PHY_ADDR]->interface !=
880             PHY_INTERFACE_MODE_RGMII)
881                 return;
882
883         val = tr32(MAC_PHYCFG1) & ~(MAC_PHYCFG1_RGMII_EXT_RX_DEC |
884                                     MAC_PHYCFG1_RGMII_SND_STAT_EN);
885         if (tp->tg3_flags3 & TG3_FLG3_RGMII_STD_IBND_DISABLE) {
886                 if (tp->tg3_flags3 & TG3_FLG3_RGMII_EXT_IBND_RX_EN)
887                         val |= MAC_PHYCFG1_RGMII_EXT_RX_DEC;
888                 if (tp->tg3_flags3 & TG3_FLG3_RGMII_EXT_IBND_TX_EN)
889                         val |= MAC_PHYCFG1_RGMII_SND_STAT_EN;
890         }
891         tw32(MAC_PHYCFG1, val | MAC_PHYCFG1_RGMII_INT | MAC_PHYCFG1_TXC_DRV);
892
893         val = tr32(MAC_PHYCFG2) & ~(MAC_PHYCFG2_INBAND_ENABLE);
894         if (!(tp->tg3_flags3 & TG3_FLG3_RGMII_STD_IBND_DISABLE))
895                 val |= MAC_PHYCFG2_INBAND_ENABLE;
896         tw32(MAC_PHYCFG2, val);
897
898         val = tr32(MAC_EXT_RGMII_MODE);
899         val &= ~(MAC_RGMII_MODE_RX_INT_B |
900                  MAC_RGMII_MODE_RX_QUALITY |
901                  MAC_RGMII_MODE_RX_ACTIVITY |
902                  MAC_RGMII_MODE_RX_ENG_DET |
903                  MAC_RGMII_MODE_TX_ENABLE |
904                  MAC_RGMII_MODE_TX_LOWPWR |
905                  MAC_RGMII_MODE_TX_RESET);
906         if (tp->tg3_flags3 & TG3_FLG3_RGMII_STD_IBND_DISABLE) {
907                 if (tp->tg3_flags3 & TG3_FLG3_RGMII_EXT_IBND_RX_EN)
908                         val |= MAC_RGMII_MODE_RX_INT_B |
909                                MAC_RGMII_MODE_RX_QUALITY |
910                                MAC_RGMII_MODE_RX_ACTIVITY |
911                                MAC_RGMII_MODE_RX_ENG_DET;
912                 if (tp->tg3_flags3 & TG3_FLG3_RGMII_EXT_IBND_TX_EN)
913                         val |= MAC_RGMII_MODE_TX_ENABLE |
914                                MAC_RGMII_MODE_TX_LOWPWR |
915                                MAC_RGMII_MODE_TX_RESET;
916         }
917         tw32(MAC_EXT_RGMII_MODE, val);
918 }
919
920 static void tg3_mdio_start(struct tg3 *tp)
921 {
922         if (tp->tg3_flags3 & TG3_FLG3_MDIOBUS_INITED) {
923                 mutex_lock(&tp->mdio_bus->mdio_lock);
924                 tp->tg3_flags3 &= ~TG3_FLG3_MDIOBUS_PAUSED;
925                 mutex_unlock(&tp->mdio_bus->mdio_lock);
926         }
927
928         tp->mi_mode &= ~MAC_MI_MODE_AUTO_POLL;
929         tw32_f(MAC_MI_MODE, tp->mi_mode);
930         udelay(80);
931
932         if (tp->tg3_flags3 & TG3_FLG3_MDIOBUS_INITED)
933                 tg3_mdio_config(tp);
934 }
935
936 static void tg3_mdio_stop(struct tg3 *tp)
937 {
938         if (tp->tg3_flags3 & TG3_FLG3_MDIOBUS_INITED) {
939                 mutex_lock(&tp->mdio_bus->mdio_lock);
940                 tp->tg3_flags3 |= TG3_FLG3_MDIOBUS_PAUSED;
941                 mutex_unlock(&tp->mdio_bus->mdio_lock);
942         }
943 }
944
945 static int tg3_mdio_init(struct tg3 *tp)
946 {
947         int i;
948         u32 reg;
949         struct phy_device *phydev;
950
951         tg3_mdio_start(tp);
952
953         if (!(tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB) ||
954             (tp->tg3_flags3 & TG3_FLG3_MDIOBUS_INITED))
955                 return 0;
956
957         tp->mdio_bus = mdiobus_alloc();
958         if (tp->mdio_bus == NULL)
959                 return -ENOMEM;
960
961         tp->mdio_bus->name     = "tg3 mdio bus";
962         snprintf(tp->mdio_bus->id, MII_BUS_ID_SIZE, "%x",
963                  (tp->pdev->bus->number << 8) | tp->pdev->devfn);
964         tp->mdio_bus->priv     = tp;
965         tp->mdio_bus->parent   = &tp->pdev->dev;
966         tp->mdio_bus->read     = &tg3_mdio_read;
967         tp->mdio_bus->write    = &tg3_mdio_write;
968         tp->mdio_bus->reset    = &tg3_mdio_reset;
969         tp->mdio_bus->phy_mask = ~(1 << PHY_ADDR);
970         tp->mdio_bus->irq      = &tp->mdio_irq[0];
971
972         for (i = 0; i < PHY_MAX_ADDR; i++)
973                 tp->mdio_bus->irq[i] = PHY_POLL;
974
975         /* The bus registration will look for all the PHYs on the mdio bus.
976          * Unfortunately, it does not ensure the PHY is powered up before
977          * accessing the PHY ID registers.  A chip reset is the
978          * quickest way to bring the device back to an operational state..
979          */
980         if (tg3_readphy(tp, MII_BMCR, &reg) || (reg & BMCR_PDOWN))
981                 tg3_bmcr_reset(tp);
982
983         i = mdiobus_register(tp->mdio_bus);
984         if (i) {
985                 printk(KERN_WARNING "%s: mdiobus_reg failed (0x%x)\n",
986                         tp->dev->name, i);
987                 return i;
988         }
989
990         tp->tg3_flags3 |= TG3_FLG3_MDIOBUS_INITED;
991
992         phydev = tp->mdio_bus->phy_map[PHY_ADDR];
993
994         switch (phydev->phy_id) {
995         case TG3_PHY_ID_BCM50610:
996                 phydev->interface = PHY_INTERFACE_MODE_RGMII;
997                 if (tp->tg3_flags3 & TG3_FLG3_RGMII_STD_IBND_DISABLE)
998                         phydev->dev_flags |= PHY_BRCM_STD_IBND_DISABLE;
999                 if (tp->tg3_flags3 & TG3_FLG3_RGMII_EXT_IBND_RX_EN)
1000                         phydev->dev_flags |= PHY_BRCM_EXT_IBND_RX_ENABLE;
1001                 if (tp->tg3_flags3 & TG3_FLG3_RGMII_EXT_IBND_TX_EN)
1002                         phydev->dev_flags |= PHY_BRCM_EXT_IBND_TX_ENABLE;
1003                 break;
1004         case TG3_PHY_ID_BCMAC131:
1005                 phydev->interface = PHY_INTERFACE_MODE_MII;
1006                 break;
1007         }
1008
1009         tg3_mdio_config(tp);
1010
1011         return 0;
1012 }
1013
1014 static void tg3_mdio_fini(struct tg3 *tp)
1015 {
1016         if (tp->tg3_flags3 & TG3_FLG3_MDIOBUS_INITED) {
1017                 tp->tg3_flags3 &= ~TG3_FLG3_MDIOBUS_INITED;
1018                 mdiobus_unregister(tp->mdio_bus);
1019                 mdiobus_free(tp->mdio_bus);
1020                 tp->tg3_flags3 &= ~TG3_FLG3_MDIOBUS_PAUSED;
1021         }
1022 }
1023
1024 /* tp->lock is held. */
1025 static inline void tg3_generate_fw_event(struct tg3 *tp)
1026 {
1027         u32 val;
1028
1029         val = tr32(GRC_RX_CPU_EVENT);
1030         val |= GRC_RX_CPU_DRIVER_EVENT;
1031         tw32_f(GRC_RX_CPU_EVENT, val);
1032
1033         tp->last_event_jiffies = jiffies;
1034 }
1035
1036 #define TG3_FW_EVENT_TIMEOUT_USEC 2500
1037
1038 /* tp->lock is held. */
1039 static void tg3_wait_for_event_ack(struct tg3 *tp)
1040 {
1041         int i;
1042         unsigned int delay_cnt;
1043         long time_remain;
1044
1045         /* If enough time has passed, no wait is necessary. */
1046         time_remain = (long)(tp->last_event_jiffies + 1 +
1047                       usecs_to_jiffies(TG3_FW_EVENT_TIMEOUT_USEC)) -
1048                       (long)jiffies;
1049         if (time_remain < 0)
1050                 return;
1051
1052         /* Check if we can shorten the wait time. */
1053         delay_cnt = jiffies_to_usecs(time_remain);
1054         if (delay_cnt > TG3_FW_EVENT_TIMEOUT_USEC)
1055                 delay_cnt = TG3_FW_EVENT_TIMEOUT_USEC;
1056         delay_cnt = (delay_cnt >> 3) + 1;
1057
1058         for (i = 0; i < delay_cnt; i++) {
1059                 if (!(tr32(GRC_RX_CPU_EVENT) & GRC_RX_CPU_DRIVER_EVENT))
1060                         break;
1061                 udelay(8);
1062         }
1063 }
1064
1065 /* tp->lock is held. */
1066 static void tg3_ump_link_report(struct tg3 *tp)
1067 {
1068         u32 reg;
1069         u32 val;
1070
1071         if (!(tp->tg3_flags2 & TG3_FLG2_5780_CLASS) ||
1072             !(tp->tg3_flags  & TG3_FLAG_ENABLE_ASF))
1073                 return;
1074
1075         tg3_wait_for_event_ack(tp);
1076
1077         tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX, FWCMD_NICDRV_LINK_UPDATE);
1078
1079         tg3_write_mem(tp, NIC_SRAM_FW_CMD_LEN_MBOX, 14);
1080
1081         val = 0;
1082         if (!tg3_readphy(tp, MII_BMCR, &reg))
1083                 val = reg << 16;
1084         if (!tg3_readphy(tp, MII_BMSR, &reg))
1085                 val |= (reg & 0xffff);
1086         tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX, val);
1087
1088         val = 0;
1089         if (!tg3_readphy(tp, MII_ADVERTISE, &reg))
1090                 val = reg << 16;
1091         if (!tg3_readphy(tp, MII_LPA, &reg))
1092                 val |= (reg & 0xffff);
1093         tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 4, val);
1094
1095         val = 0;
1096         if (!(tp->tg3_flags2 & TG3_FLG2_MII_SERDES)) {
1097                 if (!tg3_readphy(tp, MII_CTRL1000, &reg))
1098                         val = reg << 16;
1099                 if (!tg3_readphy(tp, MII_STAT1000, &reg))
1100                         val |= (reg & 0xffff);
1101         }
1102         tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 8, val);
1103
1104         if (!tg3_readphy(tp, MII_PHYADDR, &reg))
1105                 val = reg << 16;
1106         else
1107                 val = 0;
1108         tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 12, val);
1109
1110         tg3_generate_fw_event(tp);
1111 }
1112
1113 static void tg3_link_report(struct tg3 *tp)
1114 {
1115         if (!netif_carrier_ok(tp->dev)) {
1116                 if (netif_msg_link(tp))
1117                         printk(KERN_INFO PFX "%s: Link is down.\n",
1118                                tp->dev->name);
1119                 tg3_ump_link_report(tp);
1120         } else if (netif_msg_link(tp)) {
1121                 printk(KERN_INFO PFX "%s: Link is up at %d Mbps, %s duplex.\n",
1122                        tp->dev->name,
1123                        (tp->link_config.active_speed == SPEED_1000 ?
1124                         1000 :
1125                         (tp->link_config.active_speed == SPEED_100 ?
1126                          100 : 10)),
1127                        (tp->link_config.active_duplex == DUPLEX_FULL ?
1128                         "full" : "half"));
1129
1130                 printk(KERN_INFO PFX
1131                        "%s: Flow control is %s for TX and %s for RX.\n",
1132                        tp->dev->name,
1133                        (tp->link_config.active_flowctrl & TG3_FLOW_CTRL_TX) ?
1134                        "on" : "off",
1135                        (tp->link_config.active_flowctrl & TG3_FLOW_CTRL_RX) ?
1136                        "on" : "off");
1137                 tg3_ump_link_report(tp);
1138         }
1139 }
1140
1141 static u16 tg3_advert_flowctrl_1000T(u8 flow_ctrl)
1142 {
1143         u16 miireg;
1144
1145         if ((flow_ctrl & TG3_FLOW_CTRL_TX) && (flow_ctrl & TG3_FLOW_CTRL_RX))
1146                 miireg = ADVERTISE_PAUSE_CAP;
1147         else if (flow_ctrl & TG3_FLOW_CTRL_TX)
1148                 miireg = ADVERTISE_PAUSE_ASYM;
1149         else if (flow_ctrl & TG3_FLOW_CTRL_RX)
1150                 miireg = ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM;
1151         else
1152                 miireg = 0;
1153
1154         return miireg;
1155 }
1156
1157 static u16 tg3_advert_flowctrl_1000X(u8 flow_ctrl)
1158 {
1159         u16 miireg;
1160
1161         if ((flow_ctrl & TG3_FLOW_CTRL_TX) && (flow_ctrl & TG3_FLOW_CTRL_RX))
1162                 miireg = ADVERTISE_1000XPAUSE;
1163         else if (flow_ctrl & TG3_FLOW_CTRL_TX)
1164                 miireg = ADVERTISE_1000XPSE_ASYM;
1165         else if (flow_ctrl & TG3_FLOW_CTRL_RX)
1166                 miireg = ADVERTISE_1000XPAUSE | ADVERTISE_1000XPSE_ASYM;
1167         else
1168                 miireg = 0;
1169
1170         return miireg;
1171 }
1172
1173 static u8 tg3_resolve_flowctrl_1000T(u16 lcladv, u16 rmtadv)
1174 {
1175         u8 cap = 0;
1176
1177         if (lcladv & ADVERTISE_PAUSE_CAP) {
1178                 if (lcladv & ADVERTISE_PAUSE_ASYM) {
1179                         if (rmtadv & LPA_PAUSE_CAP)
1180                                 cap = TG3_FLOW_CTRL_TX | TG3_FLOW_CTRL_RX;
1181                         else if (rmtadv & LPA_PAUSE_ASYM)
1182                                 cap = TG3_FLOW_CTRL_RX;
1183                 } else {
1184                         if (rmtadv & LPA_PAUSE_CAP)
1185                                 cap = TG3_FLOW_CTRL_TX | TG3_FLOW_CTRL_RX;
1186                 }
1187         } else if (lcladv & ADVERTISE_PAUSE_ASYM) {
1188                 if ((rmtadv & LPA_PAUSE_CAP) && (rmtadv & LPA_PAUSE_ASYM))
1189                         cap = TG3_FLOW_CTRL_TX;
1190         }
1191
1192         return cap;
1193 }
1194
1195 static u8 tg3_resolve_flowctrl_1000X(u16 lcladv, u16 rmtadv)
1196 {
1197         u8 cap = 0;
1198
1199         if (lcladv & ADVERTISE_1000XPAUSE) {
1200                 if (lcladv & ADVERTISE_1000XPSE_ASYM) {
1201                         if (rmtadv & LPA_1000XPAUSE)
1202                                 cap = TG3_FLOW_CTRL_TX | TG3_FLOW_CTRL_RX;
1203                         else if (rmtadv & LPA_1000XPAUSE_ASYM)
1204                                 cap = TG3_FLOW_CTRL_RX;
1205                 } else {
1206                         if (rmtadv & LPA_1000XPAUSE)
1207                                 cap = TG3_FLOW_CTRL_TX | TG3_FLOW_CTRL_RX;
1208                 }
1209         } else if (lcladv & ADVERTISE_1000XPSE_ASYM) {
1210                 if ((rmtadv & LPA_1000XPAUSE) && (rmtadv & LPA_1000XPAUSE_ASYM))
1211                         cap = TG3_FLOW_CTRL_TX;
1212         }
1213
1214         return cap;
1215 }
1216
1217 static void tg3_setup_flow_control(struct tg3 *tp, u32 lcladv, u32 rmtadv)
1218 {
1219         u8 autoneg;
1220         u8 flowctrl = 0;
1221         u32 old_rx_mode = tp->rx_mode;
1222         u32 old_tx_mode = tp->tx_mode;
1223
1224         if (tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB)
1225                 autoneg = tp->mdio_bus->phy_map[PHY_ADDR]->autoneg;
1226         else
1227                 autoneg = tp->link_config.autoneg;
1228
1229         if (autoneg == AUTONEG_ENABLE &&
1230             (tp->tg3_flags & TG3_FLAG_PAUSE_AUTONEG)) {
1231                 if (tp->tg3_flags2 & TG3_FLG2_ANY_SERDES)
1232                         flowctrl = tg3_resolve_flowctrl_1000X(lcladv, rmtadv);
1233                 else
1234                         flowctrl = tg3_resolve_flowctrl_1000T(lcladv, rmtadv);
1235         } else
1236                 flowctrl = tp->link_config.flowctrl;
1237
1238         tp->link_config.active_flowctrl = flowctrl;
1239
1240         if (flowctrl & TG3_FLOW_CTRL_RX)
1241                 tp->rx_mode |= RX_MODE_FLOW_CTRL_ENABLE;
1242         else
1243                 tp->rx_mode &= ~RX_MODE_FLOW_CTRL_ENABLE;
1244
1245         if (old_rx_mode != tp->rx_mode)
1246                 tw32_f(MAC_RX_MODE, tp->rx_mode);
1247
1248         if (flowctrl & TG3_FLOW_CTRL_TX)
1249                 tp->tx_mode |= TX_MODE_FLOW_CTRL_ENABLE;
1250         else
1251                 tp->tx_mode &= ~TX_MODE_FLOW_CTRL_ENABLE;
1252
1253         if (old_tx_mode != tp->tx_mode)
1254                 tw32_f(MAC_TX_MODE, tp->tx_mode);
1255 }
1256
1257 static void tg3_adjust_link(struct net_device *dev)
1258 {
1259         u8 oldflowctrl, linkmesg = 0;
1260         u32 mac_mode, lcl_adv, rmt_adv;
1261         struct tg3 *tp = netdev_priv(dev);
1262         struct phy_device *phydev = tp->mdio_bus->phy_map[PHY_ADDR];
1263
1264         spin_lock(&tp->lock);
1265
1266         mac_mode = tp->mac_mode & ~(MAC_MODE_PORT_MODE_MASK |
1267                                     MAC_MODE_HALF_DUPLEX);
1268
1269         oldflowctrl = tp->link_config.active_flowctrl;
1270
1271         if (phydev->link) {
1272                 lcl_adv = 0;
1273                 rmt_adv = 0;
1274
1275                 if (phydev->speed == SPEED_100 || phydev->speed == SPEED_10)
1276                         mac_mode |= MAC_MODE_PORT_MODE_MII;
1277                 else
1278                         mac_mode |= MAC_MODE_PORT_MODE_GMII;
1279
1280                 if (phydev->duplex == DUPLEX_HALF)
1281                         mac_mode |= MAC_MODE_HALF_DUPLEX;
1282                 else {
1283                         lcl_adv = tg3_advert_flowctrl_1000T(
1284                                   tp->link_config.flowctrl);
1285
1286                         if (phydev->pause)
1287                                 rmt_adv = LPA_PAUSE_CAP;
1288                         if (phydev->asym_pause)
1289                                 rmt_adv |= LPA_PAUSE_ASYM;
1290                 }
1291
1292                 tg3_setup_flow_control(tp, lcl_adv, rmt_adv);
1293         } else
1294                 mac_mode |= MAC_MODE_PORT_MODE_GMII;
1295
1296         if (mac_mode != tp->mac_mode) {
1297                 tp->mac_mode = mac_mode;
1298                 tw32_f(MAC_MODE, tp->mac_mode);
1299                 udelay(40);
1300         }
1301
1302         if (phydev->speed == SPEED_1000 && phydev->duplex == DUPLEX_HALF)
1303                 tw32(MAC_TX_LENGTHS,
1304                      ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
1305                       (6 << TX_LENGTHS_IPG_SHIFT) |
1306                       (0xff << TX_LENGTHS_SLOT_TIME_SHIFT)));
1307         else
1308                 tw32(MAC_TX_LENGTHS,
1309                      ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
1310                       (6 << TX_LENGTHS_IPG_SHIFT) |
1311                       (32 << TX_LENGTHS_SLOT_TIME_SHIFT)));
1312
1313         if ((phydev->link && tp->link_config.active_speed == SPEED_INVALID) ||
1314             (!phydev->link && tp->link_config.active_speed != SPEED_INVALID) ||
1315             phydev->speed != tp->link_config.active_speed ||
1316             phydev->duplex != tp->link_config.active_duplex ||
1317             oldflowctrl != tp->link_config.active_flowctrl)
1318             linkmesg = 1;
1319
1320         tp->link_config.active_speed = phydev->speed;
1321         tp->link_config.active_duplex = phydev->duplex;
1322
1323         spin_unlock(&tp->lock);
1324
1325         if (linkmesg)
1326                 tg3_link_report(tp);
1327 }
1328
1329 static int tg3_phy_init(struct tg3 *tp)
1330 {
1331         struct phy_device *phydev;
1332
1333         if (tp->tg3_flags3 & TG3_FLG3_PHY_CONNECTED)
1334                 return 0;
1335
1336         /* Bring the PHY back to a known state. */
1337         tg3_bmcr_reset(tp);
1338
1339         phydev = tp->mdio_bus->phy_map[PHY_ADDR];
1340
1341         /* Attach the MAC to the PHY. */
1342         phydev = phy_connect(tp->dev, phydev->dev.bus_id, tg3_adjust_link,
1343                              phydev->dev_flags, phydev->interface);
1344         if (IS_ERR(phydev)) {
1345                 printk(KERN_ERR "%s: Could not attach to PHY\n", tp->dev->name);
1346                 return PTR_ERR(phydev);
1347         }
1348
1349         tp->tg3_flags3 |= TG3_FLG3_PHY_CONNECTED;
1350
1351         /* Mask with MAC supported features. */
1352         phydev->supported &= (PHY_GBIT_FEATURES |
1353                               SUPPORTED_Pause |
1354                               SUPPORTED_Asym_Pause);
1355
1356         phydev->advertising = phydev->supported;
1357
1358         printk(KERN_INFO
1359                "%s: attached PHY driver [%s] (mii_bus:phy_addr=%s)\n",
1360                tp->dev->name, phydev->drv->name, phydev->dev.bus_id);
1361
1362         return 0;
1363 }
1364
1365 static void tg3_phy_start(struct tg3 *tp)
1366 {
1367         struct phy_device *phydev;
1368
1369         if (!(tp->tg3_flags3 & TG3_FLG3_PHY_CONNECTED))
1370                 return;
1371
1372         phydev = tp->mdio_bus->phy_map[PHY_ADDR];
1373
1374         if (tp->link_config.phy_is_low_power) {
1375                 tp->link_config.phy_is_low_power = 0;
1376                 phydev->speed = tp->link_config.orig_speed;
1377                 phydev->duplex = tp->link_config.orig_duplex;
1378                 phydev->autoneg = tp->link_config.orig_autoneg;
1379                 phydev->advertising = tp->link_config.orig_advertising;
1380         }
1381
1382         phy_start(phydev);
1383
1384         phy_start_aneg(phydev);
1385 }
1386
1387 static void tg3_phy_stop(struct tg3 *tp)
1388 {
1389         if (!(tp->tg3_flags3 & TG3_FLG3_PHY_CONNECTED))
1390                 return;
1391
1392         phy_stop(tp->mdio_bus->phy_map[PHY_ADDR]);
1393 }
1394
1395 static void tg3_phy_fini(struct tg3 *tp)
1396 {
1397         if (tp->tg3_flags3 & TG3_FLG3_PHY_CONNECTED) {
1398                 phy_disconnect(tp->mdio_bus->phy_map[PHY_ADDR]);
1399                 tp->tg3_flags3 &= ~TG3_FLG3_PHY_CONNECTED;
1400         }
1401 }
1402
1403 static void tg3_phydsp_write(struct tg3 *tp, u32 reg, u32 val)
1404 {
1405         tg3_writephy(tp, MII_TG3_DSP_ADDRESS, reg);
1406         tg3_writephy(tp, MII_TG3_DSP_RW_PORT, val);
1407 }
1408
1409 static void tg3_phy_toggle_automdix(struct tg3 *tp, int enable)
1410 {
1411         u32 phy;
1412
1413         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS) ||
1414             (tp->tg3_flags2 & TG3_FLG2_ANY_SERDES))
1415                 return;
1416
1417         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
1418                 u32 ephy;
1419
1420                 if (!tg3_readphy(tp, MII_TG3_EPHY_TEST, &ephy)) {
1421                         tg3_writephy(tp, MII_TG3_EPHY_TEST,
1422                                      ephy | MII_TG3_EPHY_SHADOW_EN);
1423                         if (!tg3_readphy(tp, MII_TG3_EPHYTST_MISCCTRL, &phy)) {
1424                                 if (enable)
1425                                         phy |= MII_TG3_EPHYTST_MISCCTRL_MDIX;
1426                                 else
1427                                         phy &= ~MII_TG3_EPHYTST_MISCCTRL_MDIX;
1428                                 tg3_writephy(tp, MII_TG3_EPHYTST_MISCCTRL, phy);
1429                         }
1430                         tg3_writephy(tp, MII_TG3_EPHY_TEST, ephy);
1431                 }
1432         } else {
1433                 phy = MII_TG3_AUXCTL_MISC_RDSEL_MISC |
1434                       MII_TG3_AUXCTL_SHDWSEL_MISC;
1435                 if (!tg3_writephy(tp, MII_TG3_AUX_CTRL, phy) &&
1436                     !tg3_readphy(tp, MII_TG3_AUX_CTRL, &phy)) {
1437                         if (enable)
1438                                 phy |= MII_TG3_AUXCTL_MISC_FORCE_AMDIX;
1439                         else
1440                                 phy &= ~MII_TG3_AUXCTL_MISC_FORCE_AMDIX;
1441                         phy |= MII_TG3_AUXCTL_MISC_WREN;
1442                         tg3_writephy(tp, MII_TG3_AUX_CTRL, phy);
1443                 }
1444         }
1445 }
1446
1447 static void tg3_phy_set_wirespeed(struct tg3 *tp)
1448 {
1449         u32 val;
1450
1451         if (tp->tg3_flags2 & TG3_FLG2_NO_ETH_WIRE_SPEED)
1452                 return;
1453
1454         if (!tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x7007) &&
1455             !tg3_readphy(tp, MII_TG3_AUX_CTRL, &val))
1456                 tg3_writephy(tp, MII_TG3_AUX_CTRL,
1457                              (val | (1 << 15) | (1 << 4)));
1458 }
1459
1460 static void tg3_phy_apply_otp(struct tg3 *tp)
1461 {
1462         u32 otp, phy;
1463
1464         if (!tp->phy_otp)
1465                 return;
1466
1467         otp = tp->phy_otp;
1468
1469         /* Enable SM_DSP clock and tx 6dB coding. */
1470         phy = MII_TG3_AUXCTL_SHDWSEL_AUXCTL |
1471               MII_TG3_AUXCTL_ACTL_SMDSP_ENA |
1472               MII_TG3_AUXCTL_ACTL_TX_6DB;
1473         tg3_writephy(tp, MII_TG3_AUX_CTRL, phy);
1474
1475         phy = ((otp & TG3_OTP_AGCTGT_MASK) >> TG3_OTP_AGCTGT_SHIFT);
1476         phy |= MII_TG3_DSP_TAP1_AGCTGT_DFLT;
1477         tg3_phydsp_write(tp, MII_TG3_DSP_TAP1, phy);
1478
1479         phy = ((otp & TG3_OTP_HPFFLTR_MASK) >> TG3_OTP_HPFFLTR_SHIFT) |
1480               ((otp & TG3_OTP_HPFOVER_MASK) >> TG3_OTP_HPFOVER_SHIFT);
1481         tg3_phydsp_write(tp, MII_TG3_DSP_AADJ1CH0, phy);
1482
1483         phy = ((otp & TG3_OTP_LPFDIS_MASK) >> TG3_OTP_LPFDIS_SHIFT);
1484         phy |= MII_TG3_DSP_AADJ1CH3_ADCCKADJ;
1485         tg3_phydsp_write(tp, MII_TG3_DSP_AADJ1CH3, phy);
1486
1487         phy = ((otp & TG3_OTP_VDAC_MASK) >> TG3_OTP_VDAC_SHIFT);
1488         tg3_phydsp_write(tp, MII_TG3_DSP_EXP75, phy);
1489
1490         phy = ((otp & TG3_OTP_10BTAMP_MASK) >> TG3_OTP_10BTAMP_SHIFT);
1491         tg3_phydsp_write(tp, MII_TG3_DSP_EXP96, phy);
1492
1493         phy = ((otp & TG3_OTP_ROFF_MASK) >> TG3_OTP_ROFF_SHIFT) |
1494               ((otp & TG3_OTP_RCOFF_MASK) >> TG3_OTP_RCOFF_SHIFT);
1495         tg3_phydsp_write(tp, MII_TG3_DSP_EXP97, phy);
1496
1497         /* Turn off SM_DSP clock. */
1498         phy = MII_TG3_AUXCTL_SHDWSEL_AUXCTL |
1499               MII_TG3_AUXCTL_ACTL_TX_6DB;
1500         tg3_writephy(tp, MII_TG3_AUX_CTRL, phy);
1501 }
1502
1503 static int tg3_wait_macro_done(struct tg3 *tp)
1504 {
1505         int limit = 100;
1506
1507         while (limit--) {
1508                 u32 tmp32;
1509
1510                 if (!tg3_readphy(tp, 0x16, &tmp32)) {
1511                         if ((tmp32 & 0x1000) == 0)
1512                                 break;
1513                 }
1514         }
1515         if (limit <= 0)
1516                 return -EBUSY;
1517
1518         return 0;
1519 }
1520
1521 static int tg3_phy_write_and_check_testpat(struct tg3 *tp, int *resetp)
1522 {
1523         static const u32 test_pat[4][6] = {
1524         { 0x00005555, 0x00000005, 0x00002aaa, 0x0000000a, 0x00003456, 0x00000003 },
1525         { 0x00002aaa, 0x0000000a, 0x00003333, 0x00000003, 0x0000789a, 0x00000005 },
1526         { 0x00005a5a, 0x00000005, 0x00002a6a, 0x0000000a, 0x00001bcd, 0x00000003 },
1527         { 0x00002a5a, 0x0000000a, 0x000033c3, 0x00000003, 0x00002ef1, 0x00000005 }
1528         };
1529         int chan;
1530
1531         for (chan = 0; chan < 4; chan++) {
1532                 int i;
1533
1534                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
1535                              (chan * 0x2000) | 0x0200);
1536                 tg3_writephy(tp, 0x16, 0x0002);
1537
1538                 for (i = 0; i < 6; i++)
1539                         tg3_writephy(tp, MII_TG3_DSP_RW_PORT,
1540                                      test_pat[chan][i]);
1541
1542                 tg3_writephy(tp, 0x16, 0x0202);
1543                 if (tg3_wait_macro_done(tp)) {
1544                         *resetp = 1;
1545                         return -EBUSY;
1546                 }
1547
1548                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
1549                              (chan * 0x2000) | 0x0200);
1550                 tg3_writephy(tp, 0x16, 0x0082);
1551                 if (tg3_wait_macro_done(tp)) {
1552                         *resetp = 1;
1553                         return -EBUSY;
1554                 }
1555
1556                 tg3_writephy(tp, 0x16, 0x0802);
1557                 if (tg3_wait_macro_done(tp)) {
1558                         *resetp = 1;
1559                         return -EBUSY;
1560                 }
1561
1562                 for (i = 0; i < 6; i += 2) {
1563                         u32 low, high;
1564
1565                         if (tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &low) ||
1566                             tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &high) ||
1567                             tg3_wait_macro_done(tp)) {
1568                                 *resetp = 1;
1569                                 return -EBUSY;
1570                         }
1571                         low &= 0x7fff;
1572                         high &= 0x000f;
1573                         if (low != test_pat[chan][i] ||
1574                             high != test_pat[chan][i+1]) {
1575                                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000b);
1576                                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x4001);
1577                                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x4005);
1578
1579                                 return -EBUSY;
1580                         }
1581                 }
1582         }
1583
1584         return 0;
1585 }
1586
1587 static int tg3_phy_reset_chanpat(struct tg3 *tp)
1588 {
1589         int chan;
1590
1591         for (chan = 0; chan < 4; chan++) {
1592                 int i;
1593
1594                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
1595                              (chan * 0x2000) | 0x0200);
1596                 tg3_writephy(tp, 0x16, 0x0002);
1597                 for (i = 0; i < 6; i++)
1598                         tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x000);
1599                 tg3_writephy(tp, 0x16, 0x0202);
1600                 if (tg3_wait_macro_done(tp))
1601                         return -EBUSY;
1602         }
1603
1604         return 0;
1605 }
1606
1607 static int tg3_phy_reset_5703_4_5(struct tg3 *tp)
1608 {
1609         u32 reg32, phy9_orig;
1610         int retries, do_phy_reset, err;
1611
1612         retries = 10;
1613         do_phy_reset = 1;
1614         do {
1615                 if (do_phy_reset) {
1616                         err = tg3_bmcr_reset(tp);
1617                         if (err)
1618                                 return err;
1619                         do_phy_reset = 0;
1620                 }
1621
1622                 /* Disable transmitter and interrupt.  */
1623                 if (tg3_readphy(tp, MII_TG3_EXT_CTRL, &reg32))
1624                         continue;
1625
1626                 reg32 |= 0x3000;
1627                 tg3_writephy(tp, MII_TG3_EXT_CTRL, reg32);
1628
1629                 /* Set full-duplex, 1000 mbps.  */
1630                 tg3_writephy(tp, MII_BMCR,
1631                              BMCR_FULLDPLX | TG3_BMCR_SPEED1000);
1632
1633                 /* Set to master mode.  */
1634                 if (tg3_readphy(tp, MII_TG3_CTRL, &phy9_orig))
1635                         continue;
1636
1637                 tg3_writephy(tp, MII_TG3_CTRL,
1638                              (MII_TG3_CTRL_AS_MASTER |
1639                               MII_TG3_CTRL_ENABLE_AS_MASTER));
1640
1641                 /* Enable SM_DSP_CLOCK and 6dB.  */
1642                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0c00);
1643
1644                 /* Block the PHY control access.  */
1645                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8005);
1646                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0800);
1647
1648                 err = tg3_phy_write_and_check_testpat(tp, &do_phy_reset);
1649                 if (!err)
1650                         break;
1651         } while (--retries);
1652
1653         err = tg3_phy_reset_chanpat(tp);
1654         if (err)
1655                 return err;
1656
1657         tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8005);
1658         tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0000);
1659
1660         tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8200);
1661         tg3_writephy(tp, 0x16, 0x0000);
1662
1663         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
1664             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
1665                 /* Set Extended packet length bit for jumbo frames */
1666                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x4400);
1667         }
1668         else {
1669                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0400);
1670         }
1671
1672         tg3_writephy(tp, MII_TG3_CTRL, phy9_orig);
1673
1674         if (!tg3_readphy(tp, MII_TG3_EXT_CTRL, &reg32)) {
1675                 reg32 &= ~0x3000;
1676                 tg3_writephy(tp, MII_TG3_EXT_CTRL, reg32);
1677         } else if (!err)
1678                 err = -EBUSY;
1679
1680         return err;
1681 }
1682
1683 /* This will reset the tigon3 PHY if there is no valid
1684  * link unless the FORCE argument is non-zero.
1685  */
1686 static int tg3_phy_reset(struct tg3 *tp)
1687 {
1688         u32 cpmuctrl;
1689         u32 phy_status;
1690         int err;
1691
1692         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
1693                 u32 val;
1694
1695                 val = tr32(GRC_MISC_CFG);
1696                 tw32_f(GRC_MISC_CFG, val & ~GRC_MISC_CFG_EPHY_IDDQ);
1697                 udelay(40);
1698         }
1699         err  = tg3_readphy(tp, MII_BMSR, &phy_status);
1700         err |= tg3_readphy(tp, MII_BMSR, &phy_status);
1701         if (err != 0)
1702                 return -EBUSY;
1703
1704         if (netif_running(tp->dev) && netif_carrier_ok(tp->dev)) {
1705                 netif_carrier_off(tp->dev);
1706                 tg3_link_report(tp);
1707         }
1708
1709         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
1710             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
1711             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
1712                 err = tg3_phy_reset_5703_4_5(tp);
1713                 if (err)
1714                         return err;
1715                 goto out;
1716         }
1717
1718         cpmuctrl = 0;
1719         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 &&
1720             GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5784_AX) {
1721                 cpmuctrl = tr32(TG3_CPMU_CTRL);
1722                 if (cpmuctrl & CPMU_CTRL_GPHY_10MB_RXONLY)
1723                         tw32(TG3_CPMU_CTRL,
1724                              cpmuctrl & ~CPMU_CTRL_GPHY_10MB_RXONLY);
1725         }
1726
1727         err = tg3_bmcr_reset(tp);
1728         if (err)
1729                 return err;
1730
1731         if (cpmuctrl & CPMU_CTRL_GPHY_10MB_RXONLY) {
1732                 u32 phy;
1733
1734                 phy = MII_TG3_DSP_EXP8_AEDW | MII_TG3_DSP_EXP8_REJ2MHz;
1735                 tg3_phydsp_write(tp, MII_TG3_DSP_EXP8, phy);
1736
1737                 tw32(TG3_CPMU_CTRL, cpmuctrl);
1738         }
1739
1740         if (tp->tg3_flags3 & TG3_FLG3_5761_5784_AX_FIXES) {
1741                 u32 val;
1742
1743                 val = tr32(TG3_CPMU_LSPD_1000MB_CLK);
1744                 if ((val & CPMU_LSPD_1000MB_MACCLK_MASK) ==
1745                     CPMU_LSPD_1000MB_MACCLK_12_5) {
1746                         val &= ~CPMU_LSPD_1000MB_MACCLK_MASK;
1747                         udelay(40);
1748                         tw32_f(TG3_CPMU_LSPD_1000MB_CLK, val);
1749                 }
1750
1751                 /* Disable GPHY autopowerdown. */
1752                 tg3_writephy(tp, MII_TG3_MISC_SHDW,
1753                              MII_TG3_MISC_SHDW_WREN |
1754                              MII_TG3_MISC_SHDW_APD_SEL |
1755                              MII_TG3_MISC_SHDW_APD_WKTM_84MS);
1756         }
1757
1758         tg3_phy_apply_otp(tp);
1759
1760 out:
1761         if (tp->tg3_flags2 & TG3_FLG2_PHY_ADC_BUG) {
1762                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0c00);
1763                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x201f);
1764                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x2aaa);
1765                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000a);
1766                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0323);
1767                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0400);
1768         }
1769         if (tp->tg3_flags2 & TG3_FLG2_PHY_5704_A0_BUG) {
1770                 tg3_writephy(tp, 0x1c, 0x8d68);
1771                 tg3_writephy(tp, 0x1c, 0x8d68);
1772         }
1773         if (tp->tg3_flags2 & TG3_FLG2_PHY_BER_BUG) {
1774                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0c00);
1775                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000a);
1776                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x310b);
1777                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x201f);
1778                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x9506);
1779                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x401f);
1780                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x14e2);
1781                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0400);
1782         }
1783         else if (tp->tg3_flags2 & TG3_FLG2_PHY_JITTER_BUG) {
1784                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0c00);
1785                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000a);
1786                 if (tp->tg3_flags2 & TG3_FLG2_PHY_ADJUST_TRIM) {
1787                         tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x110b);
1788                         tg3_writephy(tp, MII_TG3_TEST1,
1789                                      MII_TG3_TEST1_TRIM_EN | 0x4);
1790                 } else
1791                         tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x010b);
1792                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0400);
1793         }
1794         /* Set Extended packet length bit (bit 14) on all chips that */
1795         /* support jumbo frames */
1796         if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5401) {
1797                 /* Cannot do read-modify-write on 5401 */
1798                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x4c20);
1799         } else if (tp->tg3_flags2 & TG3_FLG2_JUMBO_CAPABLE) {
1800                 u32 phy_reg;
1801
1802                 /* Set bit 14 with read-modify-write to preserve other bits */
1803                 if (!tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0007) &&
1804                     !tg3_readphy(tp, MII_TG3_AUX_CTRL, &phy_reg))
1805                         tg3_writephy(tp, MII_TG3_AUX_CTRL, phy_reg | 0x4000);
1806         }
1807
1808         /* Set phy register 0x10 bit 0 to high fifo elasticity to support
1809          * jumbo frames transmission.
1810          */
1811         if (tp->tg3_flags2 & TG3_FLG2_JUMBO_CAPABLE) {
1812                 u32 phy_reg;
1813
1814                 if (!tg3_readphy(tp, MII_TG3_EXT_CTRL, &phy_reg))
1815                     tg3_writephy(tp, MII_TG3_EXT_CTRL,
1816                                  phy_reg | MII_TG3_EXT_CTRL_FIFO_ELASTIC);
1817         }
1818
1819         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
1820                 /* adjust output voltage */
1821                 tg3_writephy(tp, MII_TG3_EPHY_PTEST, 0x12);
1822         }
1823
1824         tg3_phy_toggle_automdix(tp, 1);
1825         tg3_phy_set_wirespeed(tp);
1826         return 0;
1827 }
1828
1829 static void tg3_frob_aux_power(struct tg3 *tp)
1830 {
1831         struct tg3 *tp_peer = tp;
1832
1833         if ((tp->tg3_flags2 & TG3_FLG2_IS_NIC) == 0)
1834                 return;
1835
1836         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) ||
1837             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714)) {
1838                 struct net_device *dev_peer;
1839
1840                 dev_peer = pci_get_drvdata(tp->pdev_peer);
1841                 /* remove_one() may have been run on the peer. */
1842                 if (!dev_peer)
1843                         tp_peer = tp;
1844                 else
1845                         tp_peer = netdev_priv(dev_peer);
1846         }
1847
1848         if ((tp->tg3_flags & TG3_FLAG_WOL_ENABLE) != 0 ||
1849             (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) != 0 ||
1850             (tp_peer->tg3_flags & TG3_FLAG_WOL_ENABLE) != 0 ||
1851             (tp_peer->tg3_flags & TG3_FLAG_ENABLE_ASF) != 0) {
1852                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
1853                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
1854                         tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1855                                     (GRC_LCLCTRL_GPIO_OE0 |
1856                                      GRC_LCLCTRL_GPIO_OE1 |
1857                                      GRC_LCLCTRL_GPIO_OE2 |
1858                                      GRC_LCLCTRL_GPIO_OUTPUT0 |
1859                                      GRC_LCLCTRL_GPIO_OUTPUT1),
1860                                     100);
1861                 } else if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761) {
1862                         /* The 5761 non-e device swaps GPIO 0 and GPIO 2. */
1863                         u32 grc_local_ctrl = GRC_LCLCTRL_GPIO_OE0 |
1864                                              GRC_LCLCTRL_GPIO_OE1 |
1865                                              GRC_LCLCTRL_GPIO_OE2 |
1866                                              GRC_LCLCTRL_GPIO_OUTPUT0 |
1867                                              GRC_LCLCTRL_GPIO_OUTPUT1 |
1868                                              tp->grc_local_ctrl;
1869                         tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl, 100);
1870
1871                         grc_local_ctrl |= GRC_LCLCTRL_GPIO_OUTPUT2;
1872                         tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl, 100);
1873
1874                         grc_local_ctrl &= ~GRC_LCLCTRL_GPIO_OUTPUT0;
1875                         tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl, 100);
1876                 } else {
1877                         u32 no_gpio2;
1878                         u32 grc_local_ctrl = 0;
1879
1880                         if (tp_peer != tp &&
1881                             (tp_peer->tg3_flags & TG3_FLAG_INIT_COMPLETE) != 0)
1882                                 return;
1883
1884                         /* Workaround to prevent overdrawing Amps. */
1885                         if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
1886                             ASIC_REV_5714) {
1887                                 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE3;
1888                                 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1889                                             grc_local_ctrl, 100);
1890                         }
1891
1892                         /* On 5753 and variants, GPIO2 cannot be used. */
1893                         no_gpio2 = tp->nic_sram_data_cfg &
1894                                     NIC_SRAM_DATA_CFG_NO_GPIO2;
1895
1896                         grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE0 |
1897                                          GRC_LCLCTRL_GPIO_OE1 |
1898                                          GRC_LCLCTRL_GPIO_OE2 |
1899                                          GRC_LCLCTRL_GPIO_OUTPUT1 |
1900                                          GRC_LCLCTRL_GPIO_OUTPUT2;
1901                         if (no_gpio2) {
1902                                 grc_local_ctrl &= ~(GRC_LCLCTRL_GPIO_OE2 |
1903                                                     GRC_LCLCTRL_GPIO_OUTPUT2);
1904                         }
1905                         tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1906                                                     grc_local_ctrl, 100);
1907
1908                         grc_local_ctrl |= GRC_LCLCTRL_GPIO_OUTPUT0;
1909
1910                         tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1911                                                     grc_local_ctrl, 100);
1912
1913                         if (!no_gpio2) {
1914                                 grc_local_ctrl &= ~GRC_LCLCTRL_GPIO_OUTPUT2;
1915                                 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1916                                             grc_local_ctrl, 100);
1917                         }
1918                 }
1919         } else {
1920                 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
1921                     GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701) {
1922                         if (tp_peer != tp &&
1923                             (tp_peer->tg3_flags & TG3_FLAG_INIT_COMPLETE) != 0)
1924                                 return;
1925
1926                         tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1927                                     (GRC_LCLCTRL_GPIO_OE1 |
1928                                      GRC_LCLCTRL_GPIO_OUTPUT1), 100);
1929
1930                         tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1931                                     GRC_LCLCTRL_GPIO_OE1, 100);
1932
1933                         tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1934                                     (GRC_LCLCTRL_GPIO_OE1 |
1935                                      GRC_LCLCTRL_GPIO_OUTPUT1), 100);
1936                 }
1937         }
1938 }
1939
1940 static int tg3_5700_link_polarity(struct tg3 *tp, u32 speed)
1941 {
1942         if (tp->led_ctrl == LED_CTRL_MODE_PHY_2)
1943                 return 1;
1944         else if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5411) {
1945                 if (speed != SPEED_10)
1946                         return 1;
1947         } else if (speed == SPEED_10)
1948                 return 1;
1949
1950         return 0;
1951 }
1952
1953 static int tg3_setup_phy(struct tg3 *, int);
1954
1955 #define RESET_KIND_SHUTDOWN     0
1956 #define RESET_KIND_INIT         1
1957 #define RESET_KIND_SUSPEND      2
1958
1959 static void tg3_write_sig_post_reset(struct tg3 *, int);
1960 static int tg3_halt_cpu(struct tg3 *, u32);
1961 static int tg3_nvram_lock(struct tg3 *);
1962 static void tg3_nvram_unlock(struct tg3 *);
1963
1964 static void tg3_power_down_phy(struct tg3 *tp)
1965 {
1966         u32 val;
1967
1968         if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) {
1969                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
1970                         u32 sg_dig_ctrl = tr32(SG_DIG_CTRL);
1971                         u32 serdes_cfg = tr32(MAC_SERDES_CFG);
1972
1973                         sg_dig_ctrl |=
1974                                 SG_DIG_USING_HW_AUTONEG | SG_DIG_SOFT_RESET;
1975                         tw32(SG_DIG_CTRL, sg_dig_ctrl);
1976                         tw32(MAC_SERDES_CFG, serdes_cfg | (1 << 15));
1977                 }
1978                 return;
1979         }
1980
1981         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
1982                 tg3_bmcr_reset(tp);
1983                 val = tr32(GRC_MISC_CFG);
1984                 tw32_f(GRC_MISC_CFG, val | GRC_MISC_CFG_EPHY_IDDQ);
1985                 udelay(40);
1986                 return;
1987         } else if (!(tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB)) {
1988                 tg3_writephy(tp, MII_TG3_EXT_CTRL,
1989                              MII_TG3_EXT_CTRL_FORCE_LED_OFF);
1990                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x01b2);
1991         }
1992
1993         /* The PHY should not be powered down on some chips because
1994          * of bugs.
1995          */
1996         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
1997             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
1998             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780 &&
1999              (tp->tg3_flags2 & TG3_FLG2_MII_SERDES)))
2000                 return;
2001
2002         if (tp->tg3_flags3 & TG3_FLG3_5761_5784_AX_FIXES) {
2003                 val = tr32(TG3_CPMU_LSPD_1000MB_CLK);
2004                 val &= ~CPMU_LSPD_1000MB_MACCLK_MASK;
2005                 val |= CPMU_LSPD_1000MB_MACCLK_12_5;
2006                 tw32_f(TG3_CPMU_LSPD_1000MB_CLK, val);
2007         }
2008
2009         tg3_writephy(tp, MII_BMCR, BMCR_PDOWN);
2010 }
2011
2012 static int tg3_set_power_state(struct tg3 *tp, pci_power_t state)
2013 {
2014         u32 misc_host_ctrl;
2015
2016         /* Make sure register accesses (indirect or otherwise)
2017          * will function correctly.
2018          */
2019         pci_write_config_dword(tp->pdev,
2020                                TG3PCI_MISC_HOST_CTRL,
2021                                tp->misc_host_ctrl);
2022
2023         switch (state) {
2024         case PCI_D0:
2025                 pci_enable_wake(tp->pdev, state, false);
2026                 pci_set_power_state(tp->pdev, PCI_D0);
2027
2028                 /* Switch out of Vaux if it is a NIC */
2029                 if (tp->tg3_flags2 & TG3_FLG2_IS_NIC)
2030                         tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl, 100);
2031
2032                 return 0;
2033
2034         case PCI_D1:
2035         case PCI_D2:
2036         case PCI_D3hot:
2037                 break;
2038
2039         default:
2040                 printk(KERN_ERR PFX "%s: Invalid power state (D%d) requested\n",
2041                         tp->dev->name, state);
2042                 return -EINVAL;
2043         }
2044         misc_host_ctrl = tr32(TG3PCI_MISC_HOST_CTRL);
2045         tw32(TG3PCI_MISC_HOST_CTRL,
2046              misc_host_ctrl | MISC_HOST_CTRL_MASK_PCI_INT);
2047
2048         if (tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB) {
2049                 if ((tp->tg3_flags3 & TG3_FLG3_PHY_CONNECTED) &&
2050                     !tp->link_config.phy_is_low_power) {
2051                         struct phy_device *phydev;
2052                         u32 advertising;
2053
2054                         phydev = tp->mdio_bus->phy_map[PHY_ADDR];
2055
2056                         tp->link_config.phy_is_low_power = 1;
2057
2058                         tp->link_config.orig_speed = phydev->speed;
2059                         tp->link_config.orig_duplex = phydev->duplex;
2060                         tp->link_config.orig_autoneg = phydev->autoneg;
2061                         tp->link_config.orig_advertising = phydev->advertising;
2062
2063                         advertising = ADVERTISED_TP |
2064                                       ADVERTISED_Pause |
2065                                       ADVERTISED_Autoneg |
2066                                       ADVERTISED_10baseT_Half;
2067
2068                         if ((tp->tg3_flags & TG3_FLAG_ENABLE_ASF) ||
2069                             (tp->tg3_flags & TG3_FLAG_WOL_ENABLE)) {
2070                                 if (tp->tg3_flags & TG3_FLAG_WOL_SPEED_100MB)
2071                                         advertising |=
2072                                                 ADVERTISED_100baseT_Half |
2073                                                 ADVERTISED_100baseT_Full |
2074                                                 ADVERTISED_10baseT_Full;
2075                                 else
2076                                         advertising |= ADVERTISED_10baseT_Full;
2077                         }
2078
2079                         phydev->advertising = advertising;
2080
2081                         phy_start_aneg(phydev);
2082                 }
2083         } else {
2084                 if (tp->link_config.phy_is_low_power == 0) {
2085                         tp->link_config.phy_is_low_power = 1;
2086                         tp->link_config.orig_speed = tp->link_config.speed;
2087                         tp->link_config.orig_duplex = tp->link_config.duplex;
2088                         tp->link_config.orig_autoneg = tp->link_config.autoneg;
2089                 }
2090
2091                 if (!(tp->tg3_flags2 & TG3_FLG2_ANY_SERDES)) {
2092                         tp->link_config.speed = SPEED_10;
2093                         tp->link_config.duplex = DUPLEX_HALF;
2094                         tp->link_config.autoneg = AUTONEG_ENABLE;
2095                         tg3_setup_phy(tp, 0);
2096                 }
2097         }
2098
2099         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
2100                 u32 val;
2101
2102                 val = tr32(GRC_VCPU_EXT_CTRL);
2103                 tw32(GRC_VCPU_EXT_CTRL, val | GRC_VCPU_EXT_CTRL_DISABLE_WOL);
2104         } else if (!(tp->tg3_flags & TG3_FLAG_ENABLE_ASF)) {
2105                 int i;
2106                 u32 val;
2107
2108                 for (i = 0; i < 200; i++) {
2109                         tg3_read_mem(tp, NIC_SRAM_FW_ASF_STATUS_MBOX, &val);
2110                         if (val == ~NIC_SRAM_FIRMWARE_MBOX_MAGIC1)
2111                                 break;
2112                         msleep(1);
2113                 }
2114         }
2115         if (tp->tg3_flags & TG3_FLAG_WOL_CAP)
2116                 tg3_write_mem(tp, NIC_SRAM_WOL_MBOX, WOL_SIGNATURE |
2117                                                      WOL_DRV_STATE_SHUTDOWN |
2118                                                      WOL_DRV_WOL |
2119                                                      WOL_SET_MAGIC_PKT);
2120
2121         if (tp->tg3_flags & TG3_FLAG_WOL_ENABLE) {
2122                 u32 mac_mode;
2123
2124                 if (!(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)) {
2125                         if (!(tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB)) {
2126                                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x5a);
2127                                 udelay(40);
2128                         }
2129
2130                         if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES)
2131                                 mac_mode = MAC_MODE_PORT_MODE_GMII;
2132                         else
2133                                 mac_mode = MAC_MODE_PORT_MODE_MII;
2134
2135                         mac_mode |= tp->mac_mode & MAC_MODE_LINK_POLARITY;
2136                         if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
2137                             ASIC_REV_5700) {
2138                                 u32 speed = (tp->tg3_flags &
2139                                              TG3_FLAG_WOL_SPEED_100MB) ?
2140                                              SPEED_100 : SPEED_10;
2141                                 if (tg3_5700_link_polarity(tp, speed))
2142                                         mac_mode |= MAC_MODE_LINK_POLARITY;
2143                                 else
2144                                         mac_mode &= ~MAC_MODE_LINK_POLARITY;
2145                         }
2146                 } else {
2147                         mac_mode = MAC_MODE_PORT_MODE_TBI;
2148                 }
2149
2150                 if (!(tp->tg3_flags2 & TG3_FLG2_5750_PLUS))
2151                         tw32(MAC_LED_CTRL, tp->led_ctrl);
2152
2153                 if (pci_pme_capable(tp->pdev, state) &&
2154                      (tp->tg3_flags & TG3_FLAG_WOL_ENABLE))
2155                         mac_mode |= MAC_MODE_MAGIC_PKT_ENABLE;
2156
2157                 if (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE) {
2158                         mac_mode |= tp->mac_mode &
2159                                     (MAC_MODE_APE_TX_EN | MAC_MODE_APE_RX_EN);
2160                         if (mac_mode & MAC_MODE_APE_TX_EN)
2161                                 mac_mode |= MAC_MODE_TDE_ENABLE;
2162                 }
2163
2164                 tw32_f(MAC_MODE, mac_mode);
2165                 udelay(100);
2166
2167                 tw32_f(MAC_RX_MODE, RX_MODE_ENABLE);
2168                 udelay(10);
2169         }
2170
2171         if (!(tp->tg3_flags & TG3_FLAG_WOL_SPEED_100MB) &&
2172             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
2173              GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)) {
2174                 u32 base_val;
2175
2176                 base_val = tp->pci_clock_ctrl;
2177                 base_val |= (CLOCK_CTRL_RXCLK_DISABLE |
2178                              CLOCK_CTRL_TXCLK_DISABLE);
2179
2180                 tw32_wait_f(TG3PCI_CLOCK_CTRL, base_val | CLOCK_CTRL_ALTCLK |
2181                             CLOCK_CTRL_PWRDOWN_PLL133, 40);
2182         } else if ((tp->tg3_flags2 & TG3_FLG2_5780_CLASS) ||
2183                    (tp->tg3_flags & TG3_FLAG_CPMU_PRESENT) ||
2184                    (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)) {
2185                 /* do nothing */
2186         } else if (!((tp->tg3_flags2 & TG3_FLG2_5750_PLUS) &&
2187                      (tp->tg3_flags & TG3_FLAG_ENABLE_ASF))) {
2188                 u32 newbits1, newbits2;
2189
2190                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
2191                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
2192                         newbits1 = (CLOCK_CTRL_RXCLK_DISABLE |
2193                                     CLOCK_CTRL_TXCLK_DISABLE |
2194                                     CLOCK_CTRL_ALTCLK);
2195                         newbits2 = newbits1 | CLOCK_CTRL_44MHZ_CORE;
2196                 } else if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
2197                         newbits1 = CLOCK_CTRL_625_CORE;
2198                         newbits2 = newbits1 | CLOCK_CTRL_ALTCLK;
2199                 } else {
2200                         newbits1 = CLOCK_CTRL_ALTCLK;
2201                         newbits2 = newbits1 | CLOCK_CTRL_44MHZ_CORE;
2202                 }
2203
2204                 tw32_wait_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl | newbits1,
2205                             40);
2206
2207                 tw32_wait_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl | newbits2,
2208                             40);
2209
2210                 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
2211                         u32 newbits3;
2212
2213                         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
2214                             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
2215                                 newbits3 = (CLOCK_CTRL_RXCLK_DISABLE |
2216                                             CLOCK_CTRL_TXCLK_DISABLE |
2217                                             CLOCK_CTRL_44MHZ_CORE);
2218                         } else {
2219                                 newbits3 = CLOCK_CTRL_44MHZ_CORE;
2220                         }
2221
2222                         tw32_wait_f(TG3PCI_CLOCK_CTRL,
2223                                     tp->pci_clock_ctrl | newbits3, 40);
2224                 }
2225         }
2226
2227         if (!(tp->tg3_flags & TG3_FLAG_WOL_ENABLE) &&
2228             !(tp->tg3_flags & TG3_FLAG_ENABLE_ASF) &&
2229             !(tp->tg3_flags3 & TG3_FLG3_ENABLE_APE))
2230                 tg3_power_down_phy(tp);
2231
2232         tg3_frob_aux_power(tp);
2233
2234         /* Workaround for unstable PLL clock */
2235         if ((GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_AX) ||
2236             (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_BX)) {
2237                 u32 val = tr32(0x7d00);
2238
2239                 val &= ~((1 << 16) | (1 << 4) | (1 << 2) | (1 << 1) | 1);
2240                 tw32(0x7d00, val);
2241                 if (!(tp->tg3_flags & TG3_FLAG_ENABLE_ASF)) {
2242                         int err;
2243
2244                         err = tg3_nvram_lock(tp);
2245                         tg3_halt_cpu(tp, RX_CPU_BASE);
2246                         if (!err)
2247                                 tg3_nvram_unlock(tp);
2248                 }
2249         }
2250
2251         tg3_write_sig_post_reset(tp, RESET_KIND_SHUTDOWN);
2252
2253         if (tp->tg3_flags & TG3_FLAG_WOL_ENABLE)
2254                 pci_enable_wake(tp->pdev, state, true);
2255
2256         /* Finally, set the new power state. */
2257         pci_set_power_state(tp->pdev, state);
2258
2259         return 0;
2260 }
2261
2262 static void tg3_aux_stat_to_speed_duplex(struct tg3 *tp, u32 val, u16 *speed, u8 *duplex)
2263 {
2264         switch (val & MII_TG3_AUX_STAT_SPDMASK) {
2265         case MII_TG3_AUX_STAT_10HALF:
2266                 *speed = SPEED_10;
2267                 *duplex = DUPLEX_HALF;
2268                 break;
2269
2270         case MII_TG3_AUX_STAT_10FULL:
2271                 *speed = SPEED_10;
2272                 *duplex = DUPLEX_FULL;
2273                 break;
2274
2275         case MII_TG3_AUX_STAT_100HALF:
2276                 *speed = SPEED_100;
2277                 *duplex = DUPLEX_HALF;
2278                 break;
2279
2280         case MII_TG3_AUX_STAT_100FULL:
2281                 *speed = SPEED_100;
2282                 *duplex = DUPLEX_FULL;
2283                 break;
2284
2285         case MII_TG3_AUX_STAT_1000HALF:
2286                 *speed = SPEED_1000;
2287                 *duplex = DUPLEX_HALF;
2288                 break;
2289
2290         case MII_TG3_AUX_STAT_1000FULL:
2291                 *speed = SPEED_1000;
2292                 *duplex = DUPLEX_FULL;
2293                 break;
2294
2295         default:
2296                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
2297                         *speed = (val & MII_TG3_AUX_STAT_100) ? SPEED_100 :
2298                                  SPEED_10;
2299                         *duplex = (val & MII_TG3_AUX_STAT_FULL) ? DUPLEX_FULL :
2300                                   DUPLEX_HALF;
2301                         break;
2302                 }
2303                 *speed = SPEED_INVALID;
2304                 *duplex = DUPLEX_INVALID;
2305                 break;
2306         }
2307 }
2308
2309 static void tg3_phy_copper_begin(struct tg3 *tp)
2310 {
2311         u32 new_adv;
2312         int i;
2313
2314         if (tp->link_config.phy_is_low_power) {
2315                 /* Entering low power mode.  Disable gigabit and
2316                  * 100baseT advertisements.
2317                  */
2318                 tg3_writephy(tp, MII_TG3_CTRL, 0);
2319
2320                 new_adv = (ADVERTISE_10HALF | ADVERTISE_10FULL |
2321                            ADVERTISE_CSMA | ADVERTISE_PAUSE_CAP);
2322                 if (tp->tg3_flags & TG3_FLAG_WOL_SPEED_100MB)
2323                         new_adv |= (ADVERTISE_100HALF | ADVERTISE_100FULL);
2324
2325                 tg3_writephy(tp, MII_ADVERTISE, new_adv);
2326         } else if (tp->link_config.speed == SPEED_INVALID) {
2327                 if (tp->tg3_flags & TG3_FLAG_10_100_ONLY)
2328                         tp->link_config.advertising &=
2329                                 ~(ADVERTISED_1000baseT_Half |
2330                                   ADVERTISED_1000baseT_Full);
2331
2332                 new_adv = ADVERTISE_CSMA;
2333                 if (tp->link_config.advertising & ADVERTISED_10baseT_Half)
2334                         new_adv |= ADVERTISE_10HALF;
2335                 if (tp->link_config.advertising & ADVERTISED_10baseT_Full)
2336                         new_adv |= ADVERTISE_10FULL;
2337                 if (tp->link_config.advertising & ADVERTISED_100baseT_Half)
2338                         new_adv |= ADVERTISE_100HALF;
2339                 if (tp->link_config.advertising & ADVERTISED_100baseT_Full)
2340                         new_adv |= ADVERTISE_100FULL;
2341
2342                 new_adv |= tg3_advert_flowctrl_1000T(tp->link_config.flowctrl);
2343
2344                 tg3_writephy(tp, MII_ADVERTISE, new_adv);
2345
2346                 if (tp->link_config.advertising &
2347                     (ADVERTISED_1000baseT_Half | ADVERTISED_1000baseT_Full)) {
2348                         new_adv = 0;
2349                         if (tp->link_config.advertising & ADVERTISED_1000baseT_Half)
2350                                 new_adv |= MII_TG3_CTRL_ADV_1000_HALF;
2351                         if (tp->link_config.advertising & ADVERTISED_1000baseT_Full)
2352                                 new_adv |= MII_TG3_CTRL_ADV_1000_FULL;
2353                         if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY) &&
2354                             (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
2355                              tp->pci_chip_rev_id == CHIPREV_ID_5701_B0))
2356                                 new_adv |= (MII_TG3_CTRL_AS_MASTER |
2357                                             MII_TG3_CTRL_ENABLE_AS_MASTER);
2358                         tg3_writephy(tp, MII_TG3_CTRL, new_adv);
2359                 } else {
2360                         tg3_writephy(tp, MII_TG3_CTRL, 0);
2361                 }
2362         } else {
2363                 new_adv = tg3_advert_flowctrl_1000T(tp->link_config.flowctrl);
2364                 new_adv |= ADVERTISE_CSMA;
2365
2366                 /* Asking for a specific link mode. */
2367                 if (tp->link_config.speed == SPEED_1000) {
2368                         tg3_writephy(tp, MII_ADVERTISE, new_adv);
2369
2370                         if (tp->link_config.duplex == DUPLEX_FULL)
2371                                 new_adv = MII_TG3_CTRL_ADV_1000_FULL;
2372                         else
2373                                 new_adv = MII_TG3_CTRL_ADV_1000_HALF;
2374                         if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
2375                             tp->pci_chip_rev_id == CHIPREV_ID_5701_B0)
2376                                 new_adv |= (MII_TG3_CTRL_AS_MASTER |
2377                                             MII_TG3_CTRL_ENABLE_AS_MASTER);
2378                 } else {
2379                         if (tp->link_config.speed == SPEED_100) {
2380                                 if (tp->link_config.duplex == DUPLEX_FULL)
2381                                         new_adv |= ADVERTISE_100FULL;
2382                                 else
2383                                         new_adv |= ADVERTISE_100HALF;
2384                         } else {
2385                                 if (tp->link_config.duplex == DUPLEX_FULL)
2386                                         new_adv |= ADVERTISE_10FULL;
2387                                 else
2388                                         new_adv |= ADVERTISE_10HALF;
2389                         }
2390                         tg3_writephy(tp, MII_ADVERTISE, new_adv);
2391
2392                         new_adv = 0;
2393                 }
2394
2395                 tg3_writephy(tp, MII_TG3_CTRL, new_adv);
2396         }
2397
2398         if (tp->link_config.autoneg == AUTONEG_DISABLE &&
2399             tp->link_config.speed != SPEED_INVALID) {
2400                 u32 bmcr, orig_bmcr;
2401
2402                 tp->link_config.active_speed = tp->link_config.speed;
2403                 tp->link_config.active_duplex = tp->link_config.duplex;
2404
2405                 bmcr = 0;
2406                 switch (tp->link_config.speed) {
2407                 default:
2408                 case SPEED_10:
2409                         break;
2410
2411                 case SPEED_100:
2412                         bmcr |= BMCR_SPEED100;
2413                         break;
2414
2415                 case SPEED_1000:
2416                         bmcr |= TG3_BMCR_SPEED1000;
2417                         break;
2418                 }
2419
2420                 if (tp->link_config.duplex == DUPLEX_FULL)
2421                         bmcr |= BMCR_FULLDPLX;
2422
2423                 if (!tg3_readphy(tp, MII_BMCR, &orig_bmcr) &&
2424                     (bmcr != orig_bmcr)) {
2425                         tg3_writephy(tp, MII_BMCR, BMCR_LOOPBACK);
2426                         for (i = 0; i < 1500; i++) {
2427                                 u32 tmp;
2428
2429                                 udelay(10);
2430                                 if (tg3_readphy(tp, MII_BMSR, &tmp) ||
2431                                     tg3_readphy(tp, MII_BMSR, &tmp))
2432                                         continue;
2433                                 if (!(tmp & BMSR_LSTATUS)) {
2434                                         udelay(40);
2435                                         break;
2436                                 }
2437                         }
2438                         tg3_writephy(tp, MII_BMCR, bmcr);
2439                         udelay(40);
2440                 }
2441         } else {
2442                 tg3_writephy(tp, MII_BMCR,
2443                              BMCR_ANENABLE | BMCR_ANRESTART);
2444         }
2445 }
2446
2447 static int tg3_init_5401phy_dsp(struct tg3 *tp)
2448 {
2449         int err;
2450
2451         /* Turn off tap power management. */
2452         /* Set Extended packet length bit */
2453         err  = tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x4c20);
2454
2455         err |= tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x0012);
2456         err |= tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x1804);
2457
2458         err |= tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x0013);
2459         err |= tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x1204);
2460
2461         err |= tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8006);
2462         err |= tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0132);
2463
2464         err |= tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8006);
2465         err |= tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0232);
2466
2467         err |= tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x201f);
2468         err |= tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0a20);
2469
2470         udelay(40);
2471
2472         return err;
2473 }
2474
2475 static int tg3_copper_is_advertising_all(struct tg3 *tp, u32 mask)
2476 {
2477         u32 adv_reg, all_mask = 0;
2478
2479         if (mask & ADVERTISED_10baseT_Half)
2480                 all_mask |= ADVERTISE_10HALF;
2481         if (mask & ADVERTISED_10baseT_Full)
2482                 all_mask |= ADVERTISE_10FULL;
2483         if (mask & ADVERTISED_100baseT_Half)
2484                 all_mask |= ADVERTISE_100HALF;
2485         if (mask & ADVERTISED_100baseT_Full)
2486                 all_mask |= ADVERTISE_100FULL;
2487
2488         if (tg3_readphy(tp, MII_ADVERTISE, &adv_reg))
2489                 return 0;
2490
2491         if ((adv_reg & all_mask) != all_mask)
2492                 return 0;
2493         if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY)) {
2494                 u32 tg3_ctrl;
2495
2496                 all_mask = 0;
2497                 if (mask & ADVERTISED_1000baseT_Half)
2498                         all_mask |= ADVERTISE_1000HALF;
2499                 if (mask & ADVERTISED_1000baseT_Full)
2500                         all_mask |= ADVERTISE_1000FULL;
2501
2502                 if (tg3_readphy(tp, MII_TG3_CTRL, &tg3_ctrl))
2503                         return 0;
2504
2505                 if ((tg3_ctrl & all_mask) != all_mask)
2506                         return 0;
2507         }
2508         return 1;
2509 }
2510
2511 static int tg3_adv_1000T_flowctrl_ok(struct tg3 *tp, u32 *lcladv, u32 *rmtadv)
2512 {
2513         u32 curadv, reqadv;
2514
2515         if (tg3_readphy(tp, MII_ADVERTISE, lcladv))
2516                 return 1;
2517
2518         curadv = *lcladv & (ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM);
2519         reqadv = tg3_advert_flowctrl_1000T(tp->link_config.flowctrl);
2520
2521         if (tp->link_config.active_duplex == DUPLEX_FULL) {
2522                 if (curadv != reqadv)
2523                         return 0;
2524
2525                 if (tp->tg3_flags & TG3_FLAG_PAUSE_AUTONEG)
2526                         tg3_readphy(tp, MII_LPA, rmtadv);
2527         } else {
2528                 /* Reprogram the advertisement register, even if it
2529                  * does not affect the current link.  If the link
2530                  * gets renegotiated in the future, we can save an
2531                  * additional renegotiation cycle by advertising
2532                  * it correctly in the first place.
2533                  */
2534                 if (curadv != reqadv) {
2535                         *lcladv &= ~(ADVERTISE_PAUSE_CAP |
2536                                      ADVERTISE_PAUSE_ASYM);
2537                         tg3_writephy(tp, MII_ADVERTISE, *lcladv | reqadv);
2538                 }
2539         }
2540
2541         return 1;
2542 }
2543
2544 static int tg3_setup_copper_phy(struct tg3 *tp, int force_reset)
2545 {
2546         int current_link_up;
2547         u32 bmsr, dummy;
2548         u32 lcl_adv, rmt_adv;
2549         u16 current_speed;
2550         u8 current_duplex;
2551         int i, err;
2552
2553         tw32(MAC_EVENT, 0);
2554
2555         tw32_f(MAC_STATUS,
2556              (MAC_STATUS_SYNC_CHANGED |
2557               MAC_STATUS_CFG_CHANGED |
2558               MAC_STATUS_MI_COMPLETION |
2559               MAC_STATUS_LNKSTATE_CHANGED));
2560         udelay(40);
2561
2562         if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
2563                 tw32_f(MAC_MI_MODE,
2564                      (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
2565                 udelay(80);
2566         }
2567
2568         tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x02);
2569
2570         /* Some third-party PHYs need to be reset on link going
2571          * down.
2572          */
2573         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
2574              GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
2575              GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) &&
2576             netif_carrier_ok(tp->dev)) {
2577                 tg3_readphy(tp, MII_BMSR, &bmsr);
2578                 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
2579                     !(bmsr & BMSR_LSTATUS))
2580                         force_reset = 1;
2581         }
2582         if (force_reset)
2583                 tg3_phy_reset(tp);
2584
2585         if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5401) {
2586                 tg3_readphy(tp, MII_BMSR, &bmsr);
2587                 if (tg3_readphy(tp, MII_BMSR, &bmsr) ||
2588                     !(tp->tg3_flags & TG3_FLAG_INIT_COMPLETE))
2589                         bmsr = 0;
2590
2591                 if (!(bmsr & BMSR_LSTATUS)) {
2592                         err = tg3_init_5401phy_dsp(tp);
2593                         if (err)
2594                                 return err;
2595
2596                         tg3_readphy(tp, MII_BMSR, &bmsr);
2597                         for (i = 0; i < 1000; i++) {
2598                                 udelay(10);
2599                                 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
2600                                     (bmsr & BMSR_LSTATUS)) {
2601                                         udelay(40);
2602                                         break;
2603                                 }
2604                         }
2605
2606                         if ((tp->phy_id & PHY_ID_REV_MASK) == PHY_REV_BCM5401_B0 &&
2607                             !(bmsr & BMSR_LSTATUS) &&
2608                             tp->link_config.active_speed == SPEED_1000) {
2609                                 err = tg3_phy_reset(tp);
2610                                 if (!err)
2611                                         err = tg3_init_5401phy_dsp(tp);
2612                                 if (err)
2613                                         return err;
2614                         }
2615                 }
2616         } else if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
2617                    tp->pci_chip_rev_id == CHIPREV_ID_5701_B0) {
2618                 /* 5701 {A0,B0} CRC bug workaround */
2619                 tg3_writephy(tp, 0x15, 0x0a75);
2620                 tg3_writephy(tp, 0x1c, 0x8c68);
2621                 tg3_writephy(tp, 0x1c, 0x8d68);
2622                 tg3_writephy(tp, 0x1c, 0x8c68);
2623         }
2624
2625         /* Clear pending interrupts... */
2626         tg3_readphy(tp, MII_TG3_ISTAT, &dummy);
2627         tg3_readphy(tp, MII_TG3_ISTAT, &dummy);
2628
2629         if (tp->tg3_flags & TG3_FLAG_USE_MI_INTERRUPT)
2630                 tg3_writephy(tp, MII_TG3_IMASK, ~MII_TG3_INT_LINKCHG);
2631         else if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5906)
2632                 tg3_writephy(tp, MII_TG3_IMASK, ~0);
2633
2634         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
2635             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
2636                 if (tp->led_ctrl == LED_CTRL_MODE_PHY_1)
2637                         tg3_writephy(tp, MII_TG3_EXT_CTRL,
2638                                      MII_TG3_EXT_CTRL_LNK3_LED_MODE);
2639                 else
2640                         tg3_writephy(tp, MII_TG3_EXT_CTRL, 0);
2641         }
2642
2643         current_link_up = 0;
2644         current_speed = SPEED_INVALID;
2645         current_duplex = DUPLEX_INVALID;
2646
2647         if (tp->tg3_flags2 & TG3_FLG2_CAPACITIVE_COUPLING) {
2648                 u32 val;
2649
2650                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x4007);
2651                 tg3_readphy(tp, MII_TG3_AUX_CTRL, &val);
2652                 if (!(val & (1 << 10))) {
2653                         val |= (1 << 10);
2654                         tg3_writephy(tp, MII_TG3_AUX_CTRL, val);
2655                         goto relink;
2656                 }
2657         }
2658
2659         bmsr = 0;
2660         for (i = 0; i < 100; i++) {
2661                 tg3_readphy(tp, MII_BMSR, &bmsr);
2662                 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
2663                     (bmsr & BMSR_LSTATUS))
2664                         break;
2665                 udelay(40);
2666         }
2667
2668         if (bmsr & BMSR_LSTATUS) {
2669                 u32 aux_stat, bmcr;
2670
2671                 tg3_readphy(tp, MII_TG3_AUX_STAT, &aux_stat);
2672                 for (i = 0; i < 2000; i++) {
2673                         udelay(10);
2674                         if (!tg3_readphy(tp, MII_TG3_AUX_STAT, &aux_stat) &&
2675                             aux_stat)
2676                                 break;
2677                 }
2678
2679                 tg3_aux_stat_to_speed_duplex(tp, aux_stat,
2680                                              &current_speed,
2681                                              &current_duplex);
2682
2683                 bmcr = 0;
2684                 for (i = 0; i < 200; i++) {
2685                         tg3_readphy(tp, MII_BMCR, &bmcr);
2686                         if (tg3_readphy(tp, MII_BMCR, &bmcr))
2687                                 continue;
2688                         if (bmcr && bmcr != 0x7fff)
2689                                 break;
2690                         udelay(10);
2691                 }
2692
2693                 lcl_adv = 0;
2694                 rmt_adv = 0;
2695
2696                 tp->link_config.active_speed = current_speed;
2697                 tp->link_config.active_duplex = current_duplex;
2698
2699                 if (tp->link_config.autoneg == AUTONEG_ENABLE) {
2700                         if ((bmcr & BMCR_ANENABLE) &&
2701                             tg3_copper_is_advertising_all(tp,
2702                                                 tp->link_config.advertising)) {
2703                                 if (tg3_adv_1000T_flowctrl_ok(tp, &lcl_adv,
2704                                                                   &rmt_adv))
2705                                         current_link_up = 1;
2706                         }
2707                 } else {
2708                         if (!(bmcr & BMCR_ANENABLE) &&
2709                             tp->link_config.speed == current_speed &&
2710                             tp->link_config.duplex == current_duplex &&
2711                             tp->link_config.flowctrl ==
2712                             tp->link_config.active_flowctrl) {
2713                                 current_link_up = 1;
2714                         }
2715                 }
2716
2717                 if (current_link_up == 1 &&
2718                     tp->link_config.active_duplex == DUPLEX_FULL)
2719                         tg3_setup_flow_control(tp, lcl_adv, rmt_adv);
2720         }
2721
2722 relink:
2723         if (current_link_up == 0 || tp->link_config.phy_is_low_power) {
2724                 u32 tmp;
2725
2726                 tg3_phy_copper_begin(tp);
2727
2728                 tg3_readphy(tp, MII_BMSR, &tmp);
2729                 if (!tg3_readphy(tp, MII_BMSR, &tmp) &&
2730                     (tmp & BMSR_LSTATUS))
2731                         current_link_up = 1;
2732         }
2733
2734         tp->mac_mode &= ~MAC_MODE_PORT_MODE_MASK;
2735         if (current_link_up == 1) {
2736                 if (tp->link_config.active_speed == SPEED_100 ||
2737                     tp->link_config.active_speed == SPEED_10)
2738                         tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
2739                 else
2740                         tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
2741         } else
2742                 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
2743
2744         tp->mac_mode &= ~MAC_MODE_HALF_DUPLEX;
2745         if (tp->link_config.active_duplex == DUPLEX_HALF)
2746                 tp->mac_mode |= MAC_MODE_HALF_DUPLEX;
2747
2748         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700) {
2749                 if (current_link_up == 1 &&
2750                     tg3_5700_link_polarity(tp, tp->link_config.active_speed))
2751                         tp->mac_mode |= MAC_MODE_LINK_POLARITY;
2752                 else
2753                         tp->mac_mode &= ~MAC_MODE_LINK_POLARITY;
2754         }
2755
2756         /* ??? Without this setting Netgear GA302T PHY does not
2757          * ??? send/receive packets...
2758          */
2759         if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5411 &&
2760             tp->pci_chip_rev_id == CHIPREV_ID_5700_ALTIMA) {
2761                 tp->mi_mode |= MAC_MI_MODE_AUTO_POLL;
2762                 tw32_f(MAC_MI_MODE, tp->mi_mode);
2763                 udelay(80);
2764         }
2765
2766         tw32_f(MAC_MODE, tp->mac_mode);
2767         udelay(40);
2768
2769         if (tp->tg3_flags & TG3_FLAG_USE_LINKCHG_REG) {
2770                 /* Polled via timer. */
2771                 tw32_f(MAC_EVENT, 0);
2772         } else {
2773                 tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
2774         }
2775         udelay(40);
2776
2777         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 &&
2778             current_link_up == 1 &&
2779             tp->link_config.active_speed == SPEED_1000 &&
2780             ((tp->tg3_flags & TG3_FLAG_PCIX_MODE) ||
2781              (tp->tg3_flags & TG3_FLAG_PCI_HIGH_SPEED))) {
2782                 udelay(120);
2783                 tw32_f(MAC_STATUS,
2784                      (MAC_STATUS_SYNC_CHANGED |
2785                       MAC_STATUS_CFG_CHANGED));
2786                 udelay(40);
2787                 tg3_write_mem(tp,
2788                               NIC_SRAM_FIRMWARE_MBOX,
2789                               NIC_SRAM_FIRMWARE_MBOX_MAGIC2);
2790         }
2791
2792         if (current_link_up != netif_carrier_ok(tp->dev)) {
2793                 if (current_link_up)
2794                         netif_carrier_on(tp->dev);
2795                 else
2796                         netif_carrier_off(tp->dev);
2797                 tg3_link_report(tp);
2798         }
2799
2800         return 0;
2801 }
2802
2803 struct tg3_fiber_aneginfo {
2804         int state;
2805 #define ANEG_STATE_UNKNOWN              0
2806 #define ANEG_STATE_AN_ENABLE            1
2807 #define ANEG_STATE_RESTART_INIT         2
2808 #define ANEG_STATE_RESTART              3
2809 #define ANEG_STATE_DISABLE_LINK_OK      4
2810 #define ANEG_STATE_ABILITY_DETECT_INIT  5
2811 #define ANEG_STATE_ABILITY_DETECT       6
2812 #define ANEG_STATE_ACK_DETECT_INIT      7
2813 #define ANEG_STATE_ACK_DETECT           8
2814 #define ANEG_STATE_COMPLETE_ACK_INIT    9
2815 #define ANEG_STATE_COMPLETE_ACK         10
2816 #define ANEG_STATE_IDLE_DETECT_INIT     11
2817 #define ANEG_STATE_IDLE_DETECT          12
2818 #define ANEG_STATE_LINK_OK              13
2819 #define ANEG_STATE_NEXT_PAGE_WAIT_INIT  14
2820 #define ANEG_STATE_NEXT_PAGE_WAIT       15
2821
2822         u32 flags;
2823 #define MR_AN_ENABLE            0x00000001
2824 #define MR_RESTART_AN           0x00000002
2825 #define MR_AN_COMPLETE          0x00000004
2826 #define MR_PAGE_RX              0x00000008
2827 #define MR_NP_LOADED            0x00000010
2828 #define MR_TOGGLE_TX            0x00000020
2829 #define MR_LP_ADV_FULL_DUPLEX   0x00000040
2830 #define MR_LP_ADV_HALF_DUPLEX   0x00000080
2831 #define MR_LP_ADV_SYM_PAUSE     0x00000100
2832 #define MR_LP_ADV_ASYM_PAUSE    0x00000200
2833 #define MR_LP_ADV_REMOTE_FAULT1 0x00000400
2834 #define MR_LP_ADV_REMOTE_FAULT2 0x00000800
2835 #define MR_LP_ADV_NEXT_PAGE     0x00001000
2836 #define MR_TOGGLE_RX            0x00002000
2837 #define MR_NP_RX                0x00004000
2838
2839 #define MR_LINK_OK              0x80000000
2840
2841         unsigned long link_time, cur_time;
2842
2843         u32 ability_match_cfg;
2844         int ability_match_count;
2845
2846         char ability_match, idle_match, ack_match;
2847
2848         u32 txconfig, rxconfig;
2849 #define ANEG_CFG_NP             0x00000080
2850 #define ANEG_CFG_ACK            0x00000040
2851 #define ANEG_CFG_RF2            0x00000020
2852 #define ANEG_CFG_RF1            0x00000010
2853 #define ANEG_CFG_PS2            0x00000001
2854 #define ANEG_CFG_PS1            0x00008000
2855 #define ANEG_CFG_HD             0x00004000
2856 #define ANEG_CFG_FD             0x00002000
2857 #define ANEG_CFG_INVAL          0x00001f06
2858
2859 };
2860 #define ANEG_OK         0
2861 #define ANEG_DONE       1
2862 #define ANEG_TIMER_ENAB 2
2863 #define ANEG_FAILED     -1
2864
2865 #define ANEG_STATE_SETTLE_TIME  10000
2866
2867 static int tg3_fiber_aneg_smachine(struct tg3 *tp,
2868                                    struct tg3_fiber_aneginfo *ap)
2869 {
2870         u16 flowctrl;
2871         unsigned long delta;
2872         u32 rx_cfg_reg;
2873         int ret;
2874
2875         if (ap->state == ANEG_STATE_UNKNOWN) {
2876                 ap->rxconfig = 0;
2877                 ap->link_time = 0;
2878                 ap->cur_time = 0;
2879                 ap->ability_match_cfg = 0;
2880                 ap->ability_match_count = 0;
2881                 ap->ability_match = 0;
2882                 ap->idle_match = 0;
2883                 ap->ack_match = 0;
2884         }
2885         ap->cur_time++;
2886
2887         if (tr32(MAC_STATUS) & MAC_STATUS_RCVD_CFG) {
2888                 rx_cfg_reg = tr32(MAC_RX_AUTO_NEG);
2889
2890                 if (rx_cfg_reg != ap->ability_match_cfg) {
2891                         ap->ability_match_cfg = rx_cfg_reg;
2892                         ap->ability_match = 0;
2893                         ap->ability_match_count = 0;
2894                 } else {
2895                         if (++ap->ability_match_count > 1) {
2896                                 ap->ability_match = 1;
2897                                 ap->ability_match_cfg = rx_cfg_reg;
2898                         }
2899                 }
2900                 if (rx_cfg_reg & ANEG_CFG_ACK)
2901                         ap->ack_match = 1;
2902                 else
2903                         ap->ack_match = 0;
2904
2905                 ap->idle_match = 0;
2906         } else {
2907                 ap->idle_match = 1;
2908                 ap->ability_match_cfg = 0;
2909                 ap->ability_match_count = 0;
2910                 ap->ability_match = 0;
2911                 ap->ack_match = 0;
2912
2913                 rx_cfg_reg = 0;
2914         }
2915
2916         ap->rxconfig = rx_cfg_reg;
2917         ret = ANEG_OK;
2918
2919         switch(ap->state) {
2920         case ANEG_STATE_UNKNOWN:
2921                 if (ap->flags & (MR_AN_ENABLE | MR_RESTART_AN))
2922                         ap->state = ANEG_STATE_AN_ENABLE;
2923
2924                 /* fallthru */
2925         case ANEG_STATE_AN_ENABLE:
2926                 ap->flags &= ~(MR_AN_COMPLETE | MR_PAGE_RX);
2927                 if (ap->flags & MR_AN_ENABLE) {
2928                         ap->link_time = 0;
2929                         ap->cur_time = 0;
2930                         ap->ability_match_cfg = 0;
2931                         ap->ability_match_count = 0;
2932                         ap->ability_match = 0;
2933                         ap->idle_match = 0;
2934                         ap->ack_match = 0;
2935
2936                         ap->state = ANEG_STATE_RESTART_INIT;
2937                 } else {
2938                         ap->state = ANEG_STATE_DISABLE_LINK_OK;
2939                 }
2940                 break;
2941
2942         case ANEG_STATE_RESTART_INIT:
2943                 ap->link_time = ap->cur_time;
2944                 ap->flags &= ~(MR_NP_LOADED);
2945                 ap->txconfig = 0;
2946                 tw32(MAC_TX_AUTO_NEG, 0);
2947                 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
2948                 tw32_f(MAC_MODE, tp->mac_mode);
2949                 udelay(40);
2950
2951                 ret = ANEG_TIMER_ENAB;
2952                 ap->state = ANEG_STATE_RESTART;
2953
2954                 /* fallthru */
2955         case ANEG_STATE_RESTART:
2956                 delta = ap->cur_time - ap->link_time;
2957                 if (delta > ANEG_STATE_SETTLE_TIME) {
2958                         ap->state = ANEG_STATE_ABILITY_DETECT_INIT;
2959                 } else {
2960                         ret = ANEG_TIMER_ENAB;
2961                 }
2962                 break;
2963
2964         case ANEG_STATE_DISABLE_LINK_OK:
2965                 ret = ANEG_DONE;
2966                 break;
2967
2968         case ANEG_STATE_ABILITY_DETECT_INIT:
2969                 ap->flags &= ~(MR_TOGGLE_TX);
2970                 ap->txconfig = ANEG_CFG_FD;
2971                 flowctrl = tg3_advert_flowctrl_1000X(tp->link_config.flowctrl);
2972                 if (flowctrl & ADVERTISE_1000XPAUSE)
2973                         ap->txconfig |= ANEG_CFG_PS1;
2974                 if (flowctrl & ADVERTISE_1000XPSE_ASYM)
2975                         ap->txconfig |= ANEG_CFG_PS2;
2976                 tw32(MAC_TX_AUTO_NEG, ap->txconfig);
2977                 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
2978                 tw32_f(MAC_MODE, tp->mac_mode);
2979                 udelay(40);
2980
2981                 ap->state = ANEG_STATE_ABILITY_DETECT;
2982                 break;
2983
2984         case ANEG_STATE_ABILITY_DETECT:
2985                 if (ap->ability_match != 0 && ap->rxconfig != 0) {
2986                         ap->state = ANEG_STATE_ACK_DETECT_INIT;
2987                 }
2988                 break;
2989
2990         case ANEG_STATE_ACK_DETECT_INIT:
2991                 ap->txconfig |= ANEG_CFG_ACK;
2992                 tw32(MAC_TX_AUTO_NEG, ap->txconfig);
2993                 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
2994                 tw32_f(MAC_MODE, tp->mac_mode);
2995                 udelay(40);
2996
2997                 ap->state = ANEG_STATE_ACK_DETECT;
2998
2999                 /* fallthru */
3000         case ANEG_STATE_ACK_DETECT:
3001                 if (ap->ack_match != 0) {
3002                         if ((ap->rxconfig & ~ANEG_CFG_ACK) ==
3003                             (ap->ability_match_cfg & ~ANEG_CFG_ACK)) {
3004                                 ap->state = ANEG_STATE_COMPLETE_ACK_INIT;
3005                         } else {
3006                                 ap->state = ANEG_STATE_AN_ENABLE;
3007                         }
3008                 } else if (ap->ability_match != 0 &&
3009                            ap->rxconfig == 0) {
3010                         ap->state = ANEG_STATE_AN_ENABLE;
3011                 }
3012                 break;
3013
3014         case ANEG_STATE_COMPLETE_ACK_INIT:
3015                 if (ap->rxconfig & ANEG_CFG_INVAL) {
3016                         ret = ANEG_FAILED;
3017                         break;
3018                 }
3019                 ap->flags &= ~(MR_LP_ADV_FULL_DUPLEX |
3020                                MR_LP_ADV_HALF_DUPLEX |
3021                                MR_LP_ADV_SYM_PAUSE |
3022                                MR_LP_ADV_ASYM_PAUSE |
3023                                MR_LP_ADV_REMOTE_FAULT1 |
3024                                MR_LP_ADV_REMOTE_FAULT2 |
3025                                MR_LP_ADV_NEXT_PAGE |
3026                                MR_TOGGLE_RX |
3027                                MR_NP_RX);
3028                 if (ap->rxconfig & ANEG_CFG_FD)
3029                         ap->flags |= MR_LP_ADV_FULL_DUPLEX;
3030                 if (ap->rxconfig & ANEG_CFG_HD)
3031                         ap->flags |= MR_LP_ADV_HALF_DUPLEX;
3032                 if (ap->rxconfig & ANEG_CFG_PS1)
3033                         ap->flags |= MR_LP_ADV_SYM_PAUSE;
3034                 if (ap->rxconfig & ANEG_CFG_PS2)
3035                         ap->flags |= MR_LP_ADV_ASYM_PAUSE;
3036                 if (ap->rxconfig & ANEG_CFG_RF1)
3037                         ap->flags |= MR_LP_ADV_REMOTE_FAULT1;
3038                 if (ap->rxconfig & ANEG_CFG_RF2)
3039                         ap->flags |= MR_LP_ADV_REMOTE_FAULT2;
3040                 if (ap->rxconfig & ANEG_CFG_NP)
3041                         ap->flags |= MR_LP_ADV_NEXT_PAGE;
3042
3043                 ap->link_time = ap->cur_time;
3044
3045                 ap->flags ^= (MR_TOGGLE_TX);
3046                 if (ap->rxconfig & 0x0008)
3047                         ap->flags |= MR_TOGGLE_RX;
3048                 if (ap->rxconfig & ANEG_CFG_NP)
3049                         ap->flags |= MR_NP_RX;
3050                 ap->flags |= MR_PAGE_RX;
3051
3052                 ap->state = ANEG_STATE_COMPLETE_ACK;
3053                 ret = ANEG_TIMER_ENAB;
3054                 break;
3055
3056         case ANEG_STATE_COMPLETE_ACK:
3057                 if (ap->ability_match != 0 &&
3058                     ap->rxconfig == 0) {
3059                         ap->state = ANEG_STATE_AN_ENABLE;
3060                         break;
3061                 }
3062                 delta = ap->cur_time - ap->link_time;
3063                 if (delta > ANEG_STATE_SETTLE_TIME) {
3064                         if (!(ap->flags & (MR_LP_ADV_NEXT_PAGE))) {
3065                                 ap->state = ANEG_STATE_IDLE_DETECT_INIT;
3066                         } else {
3067                                 if ((ap->txconfig & ANEG_CFG_NP) == 0 &&
3068                                     !(ap->flags & MR_NP_RX)) {
3069                                         ap->state = ANEG_STATE_IDLE_DETECT_INIT;
3070                                 } else {
3071                                         ret = ANEG_FAILED;
3072                                 }
3073                         }
3074                 }
3075                 break;
3076
3077         case ANEG_STATE_IDLE_DETECT_INIT:
3078                 ap->link_time = ap->cur_time;
3079                 tp->mac_mode &= ~MAC_MODE_SEND_CONFIGS;
3080                 tw32_f(MAC_MODE, tp->mac_mode);
3081                 udelay(40);
3082
3083                 ap->state = ANEG_STATE_IDLE_DETECT;
3084                 ret = ANEG_TIMER_ENAB;
3085                 break;
3086
3087         case ANEG_STATE_IDLE_DETECT:
3088                 if (ap->ability_match != 0 &&
3089                     ap->rxconfig == 0) {
3090                         ap->state = ANEG_STATE_AN_ENABLE;
3091                         break;
3092                 }
3093                 delta = ap->cur_time - ap->link_time;
3094                 if (delta > ANEG_STATE_SETTLE_TIME) {
3095                         /* XXX another gem from the Broadcom driver :( */
3096                         ap->state = ANEG_STATE_LINK_OK;
3097                 }
3098                 break;
3099
3100         case ANEG_STATE_LINK_OK:
3101                 ap->flags |= (MR_AN_COMPLETE | MR_LINK_OK);
3102                 ret = ANEG_DONE;
3103                 break;
3104
3105         case ANEG_STATE_NEXT_PAGE_WAIT_INIT:
3106                 /* ??? unimplemented */
3107                 break;
3108
3109         case ANEG_STATE_NEXT_PAGE_WAIT:
3110                 /* ??? unimplemented */
3111                 break;
3112
3113         default:
3114                 ret = ANEG_FAILED;
3115                 break;
3116         }
3117
3118         return ret;
3119 }
3120
3121 static int fiber_autoneg(struct tg3 *tp, u32 *txflags, u32 *rxflags)
3122 {
3123         int res = 0;
3124         struct tg3_fiber_aneginfo aninfo;
3125         int status = ANEG_FAILED;
3126         unsigned int tick;
3127         u32 tmp;
3128
3129         tw32_f(MAC_TX_AUTO_NEG, 0);
3130
3131         tmp = tp->mac_mode & ~MAC_MODE_PORT_MODE_MASK;
3132         tw32_f(MAC_MODE, tmp | MAC_MODE_PORT_MODE_GMII);
3133         udelay(40);
3134
3135         tw32_f(MAC_MODE, tp->mac_mode | MAC_MODE_SEND_CONFIGS);
3136         udelay(40);
3137
3138         memset(&aninfo, 0, sizeof(aninfo));
3139         aninfo.flags |= MR_AN_ENABLE;
3140         aninfo.state = ANEG_STATE_UNKNOWN;
3141         aninfo.cur_time = 0;
3142         tick = 0;
3143         while (++tick < 195000) {
3144                 status = tg3_fiber_aneg_smachine(tp, &aninfo);
3145                 if (status == ANEG_DONE || status == ANEG_FAILED)
3146                         break;
3147
3148                 udelay(1);
3149         }
3150
3151         tp->mac_mode &= ~MAC_MODE_SEND_CONFIGS;
3152         tw32_f(MAC_MODE, tp->mac_mode);
3153         udelay(40);
3154
3155         *txflags = aninfo.txconfig;
3156         *rxflags = aninfo.flags;
3157
3158         if (status == ANEG_DONE &&
3159             (aninfo.flags & (MR_AN_COMPLETE | MR_LINK_OK |
3160                              MR_LP_ADV_FULL_DUPLEX)))
3161                 res = 1;
3162
3163         return res;
3164 }
3165
3166 static void tg3_init_bcm8002(struct tg3 *tp)
3167 {
3168         u32 mac_status = tr32(MAC_STATUS);
3169         int i;
3170
3171         /* Reset when initting first time or we have a link. */
3172         if ((tp->tg3_flags & TG3_FLAG_INIT_COMPLETE) &&
3173             !(mac_status & MAC_STATUS_PCS_SYNCED))
3174                 return;
3175
3176         /* Set PLL lock range. */
3177         tg3_writephy(tp, 0x16, 0x8007);
3178
3179         /* SW reset */
3180         tg3_writephy(tp, MII_BMCR, BMCR_RESET);
3181
3182         /* Wait for reset to complete. */
3183         /* XXX schedule_timeout() ... */
3184         for (i = 0; i < 500; i++)
3185                 udelay(10);
3186
3187         /* Config mode; select PMA/Ch 1 regs. */
3188         tg3_writephy(tp, 0x10, 0x8411);
3189
3190         /* Enable auto-lock and comdet, select txclk for tx. */
3191         tg3_writephy(tp, 0x11, 0x0a10);
3192
3193         tg3_writephy(tp, 0x18, 0x00a0);
3194         tg3_writephy(tp, 0x16, 0x41ff);
3195
3196         /* Assert and deassert POR. */
3197         tg3_writephy(tp, 0x13, 0x0400);
3198         udelay(40);
3199         tg3_writephy(tp, 0x13, 0x0000);
3200
3201         tg3_writephy(tp, 0x11, 0x0a50);
3202         udelay(40);
3203         tg3_writephy(tp, 0x11, 0x0a10);
3204
3205         /* Wait for signal to stabilize */
3206         /* XXX schedule_timeout() ... */
3207         for (i = 0; i < 15000; i++)
3208                 udelay(10);
3209
3210         /* Deselect the channel register so we can read the PHYID
3211          * later.
3212          */
3213         tg3_writephy(tp, 0x10, 0x8011);
3214 }
3215
3216 static int tg3_setup_fiber_hw_autoneg(struct tg3 *tp, u32 mac_status)
3217 {
3218         u16 flowctrl;
3219         u32 sg_dig_ctrl, sg_dig_status;
3220         u32 serdes_cfg, expected_sg_dig_ctrl;
3221         int workaround, port_a;
3222         int current_link_up;
3223
3224         serdes_cfg = 0;
3225         expected_sg_dig_ctrl = 0;
3226         workaround = 0;
3227         port_a = 1;
3228         current_link_up = 0;
3229
3230         if (tp->pci_chip_rev_id != CHIPREV_ID_5704_A0 &&
3231             tp->pci_chip_rev_id != CHIPREV_ID_5704_A1) {
3232                 workaround = 1;
3233                 if (tr32(TG3PCI_DUAL_MAC_CTRL) & DUAL_MAC_CTRL_ID)
3234                         port_a = 0;
3235
3236                 /* preserve bits 0-11,13,14 for signal pre-emphasis */
3237                 /* preserve bits 20-23 for voltage regulator */
3238                 serdes_cfg = tr32(MAC_SERDES_CFG) & 0x00f06fff;
3239         }
3240
3241         sg_dig_ctrl = tr32(SG_DIG_CTRL);
3242
3243         if (tp->link_config.autoneg != AUTONEG_ENABLE) {
3244                 if (sg_dig_ctrl & SG_DIG_USING_HW_AUTONEG) {
3245                         if (workaround) {
3246                                 u32 val = serdes_cfg;
3247
3248                                 if (port_a)
3249                                         val |= 0xc010000;
3250                                 else
3251                                         val |= 0x4010000;
3252                                 tw32_f(MAC_SERDES_CFG, val);
3253                         }
3254
3255                         tw32_f(SG_DIG_CTRL, SG_DIG_COMMON_SETUP);
3256                 }
3257                 if (mac_status & MAC_STATUS_PCS_SYNCED) {
3258                         tg3_setup_flow_control(tp, 0, 0);
3259                         current_link_up = 1;
3260                 }
3261                 goto out;
3262         }
3263
3264         /* Want auto-negotiation.  */
3265         expected_sg_dig_ctrl = SG_DIG_USING_HW_AUTONEG | SG_DIG_COMMON_SETUP;
3266
3267         flowctrl = tg3_advert_flowctrl_1000X(tp->link_config.flowctrl);
3268         if (flowctrl & ADVERTISE_1000XPAUSE)
3269                 expected_sg_dig_ctrl |= SG_DIG_PAUSE_CAP;
3270         if (flowctrl & ADVERTISE_1000XPSE_ASYM)
3271                 expected_sg_dig_ctrl |= SG_DIG_ASYM_PAUSE;
3272
3273         if (sg_dig_ctrl != expected_sg_dig_ctrl) {
3274                 if ((tp->tg3_flags2 & TG3_FLG2_PARALLEL_DETECT) &&
3275                     tp->serdes_counter &&
3276                     ((mac_status & (MAC_STATUS_PCS_SYNCED |
3277                                     MAC_STATUS_RCVD_CFG)) ==
3278                      MAC_STATUS_PCS_SYNCED)) {
3279                         tp->serdes_counter--;
3280                         current_link_up = 1;
3281                         goto out;
3282                 }
3283 restart_autoneg:
3284                 if (workaround)
3285                         tw32_f(MAC_SERDES_CFG, serdes_cfg | 0xc011000);
3286                 tw32_f(SG_DIG_CTRL, expected_sg_dig_ctrl | SG_DIG_SOFT_RESET);
3287                 udelay(5);
3288                 tw32_f(SG_DIG_CTRL, expected_sg_dig_ctrl);
3289
3290                 tp->serdes_counter = SERDES_AN_TIMEOUT_5704S;
3291                 tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
3292         } else if (mac_status & (MAC_STATUS_PCS_SYNCED |
3293                                  MAC_STATUS_SIGNAL_DET)) {
3294                 sg_dig_status = tr32(SG_DIG_STATUS);
3295                 mac_status = tr32(MAC_STATUS);
3296
3297                 if ((sg_dig_status & SG_DIG_AUTONEG_COMPLETE) &&
3298                     (mac_status & MAC_STATUS_PCS_SYNCED)) {
3299                         u32 local_adv = 0, remote_adv = 0;
3300
3301                         if (sg_dig_ctrl & SG_DIG_PAUSE_CAP)
3302                                 local_adv |= ADVERTISE_1000XPAUSE;
3303                         if (sg_dig_ctrl & SG_DIG_ASYM_PAUSE)
3304                                 local_adv |= ADVERTISE_1000XPSE_ASYM;
3305
3306                         if (sg_dig_status & SG_DIG_PARTNER_PAUSE_CAPABLE)
3307                                 remote_adv |= LPA_1000XPAUSE;
3308                         if (sg_dig_status & SG_DIG_PARTNER_ASYM_PAUSE)
3309                                 remote_adv |= LPA_1000XPAUSE_ASYM;
3310
3311                         tg3_setup_flow_control(tp, local_adv, remote_adv);
3312                         current_link_up = 1;
3313                         tp->serdes_counter = 0;
3314                         tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
3315                 } else if (!(sg_dig_status & SG_DIG_AUTONEG_COMPLETE)) {
3316                         if (tp->serdes_counter)
3317                                 tp->serdes_counter--;
3318                         else {
3319                                 if (workaround) {
3320                                         u32 val = serdes_cfg;
3321
3322                                         if (port_a)
3323                                                 val |= 0xc010000;
3324                                         else
3325                                                 val |= 0x4010000;
3326
3327                                         tw32_f(MAC_SERDES_CFG, val);
3328                                 }
3329
3330                                 tw32_f(SG_DIG_CTRL, SG_DIG_COMMON_SETUP);
3331                                 udelay(40);
3332
3333                                 /* Link parallel detection - link is up */
3334                                 /* only if we have PCS_SYNC and not */
3335                                 /* receiving config code words */
3336                                 mac_status = tr32(MAC_STATUS);
3337                                 if ((mac_status & MAC_STATUS_PCS_SYNCED) &&
3338                                     !(mac_status & MAC_STATUS_RCVD_CFG)) {
3339                                         tg3_setup_flow_control(tp, 0, 0);
3340                                         current_link_up = 1;
3341                                         tp->tg3_flags2 |=
3342                                                 TG3_FLG2_PARALLEL_DETECT;
3343                                         tp->serdes_counter =
3344                                                 SERDES_PARALLEL_DET_TIMEOUT;
3345                                 } else
3346                                         goto restart_autoneg;
3347                         }
3348                 }
3349         } else {
3350                 tp->serdes_counter = SERDES_AN_TIMEOUT_5704S;
3351                 tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
3352         }
3353
3354 out:
3355         return current_link_up;
3356 }
3357
3358 static int tg3_setup_fiber_by_hand(struct tg3 *tp, u32 mac_status)
3359 {
3360         int current_link_up = 0;
3361
3362         if (!(mac_status & MAC_STATUS_PCS_SYNCED))
3363                 goto out;
3364
3365         if (tp->link_config.autoneg == AUTONEG_ENABLE) {
3366                 u32 txflags, rxflags;
3367                 int i;
3368
3369                 if (fiber_autoneg(tp, &txflags, &rxflags)) {
3370                         u32 local_adv = 0, remote_adv = 0;
3371
3372                         if (txflags & ANEG_CFG_PS1)
3373                                 local_adv |= ADVERTISE_1000XPAUSE;
3374                         if (txflags & ANEG_CFG_PS2)
3375                                 local_adv |= ADVERTISE_1000XPSE_ASYM;
3376
3377                         if (rxflags & MR_LP_ADV_SYM_PAUSE)
3378                                 remote_adv |= LPA_1000XPAUSE;
3379                         if (rxflags & MR_LP_ADV_ASYM_PAUSE)
3380                                 remote_adv |= LPA_1000XPAUSE_ASYM;
3381
3382                         tg3_setup_flow_control(tp, local_adv, remote_adv);
3383
3384                         current_link_up = 1;
3385                 }
3386                 for (i = 0; i < 30; i++) {
3387                         udelay(20);
3388                         tw32_f(MAC_STATUS,
3389                                (MAC_STATUS_SYNC_CHANGED |
3390                                 MAC_STATUS_CFG_CHANGED));
3391                         udelay(40);
3392                         if ((tr32(MAC_STATUS) &
3393                              (MAC_STATUS_SYNC_CHANGED |
3394                               MAC_STATUS_CFG_CHANGED)) == 0)
3395                                 break;
3396                 }
3397
3398                 mac_status = tr32(MAC_STATUS);
3399                 if (current_link_up == 0 &&
3400                     (mac_status & MAC_STATUS_PCS_SYNCED) &&
3401                     !(mac_status & MAC_STATUS_RCVD_CFG))
3402                         current_link_up = 1;
3403         } else {
3404                 tg3_setup_flow_control(tp, 0, 0);
3405
3406                 /* Forcing 1000FD link up. */
3407                 current_link_up = 1;
3408
3409                 tw32_f(MAC_MODE, (tp->mac_mode | MAC_MODE_SEND_CONFIGS));
3410                 udelay(40);
3411
3412                 tw32_f(MAC_MODE, tp->mac_mode);
3413                 udelay(40);
3414         }
3415
3416 out:
3417         return current_link_up;
3418 }
3419
3420 static int tg3_setup_fiber_phy(struct tg3 *tp, int force_reset)
3421 {
3422         u32 orig_pause_cfg;
3423         u16 orig_active_speed;
3424         u8 orig_active_duplex;
3425         u32 mac_status;
3426         int current_link_up;
3427         int i;
3428
3429         orig_pause_cfg = tp->link_config.active_flowctrl;
3430         orig_active_speed = tp->link_config.active_speed;
3431         orig_active_duplex = tp->link_config.active_duplex;
3432
3433         if (!(tp->tg3_flags2 & TG3_FLG2_HW_AUTONEG) &&
3434             netif_carrier_ok(tp->dev) &&
3435             (tp->tg3_flags & TG3_FLAG_INIT_COMPLETE)) {
3436                 mac_status = tr32(MAC_STATUS);
3437                 mac_status &= (MAC_STATUS_PCS_SYNCED |
3438                                MAC_STATUS_SIGNAL_DET |
3439                                MAC_STATUS_CFG_CHANGED |
3440                                MAC_STATUS_RCVD_CFG);
3441                 if (mac_status == (MAC_STATUS_PCS_SYNCED |
3442                                    MAC_STATUS_SIGNAL_DET)) {
3443                         tw32_f(MAC_STATUS, (MAC_STATUS_SYNC_CHANGED |
3444                                             MAC_STATUS_CFG_CHANGED));
3445                         return 0;
3446                 }
3447         }
3448
3449         tw32_f(MAC_TX_AUTO_NEG, 0);
3450
3451         tp->mac_mode &= ~(MAC_MODE_PORT_MODE_MASK | MAC_MODE_HALF_DUPLEX);
3452         tp->mac_mode |= MAC_MODE_PORT_MODE_TBI;
3453         tw32_f(MAC_MODE, tp->mac_mode);
3454         udelay(40);
3455
3456         if (tp->phy_id == PHY_ID_BCM8002)
3457                 tg3_init_bcm8002(tp);
3458
3459         /* Enable link change event even when serdes polling.  */
3460         tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
3461         udelay(40);
3462
3463         current_link_up = 0;
3464         mac_status = tr32(MAC_STATUS);
3465
3466         if (tp->tg3_flags2 & TG3_FLG2_HW_AUTONEG)
3467                 current_link_up = tg3_setup_fiber_hw_autoneg(tp, mac_status);
3468         else
3469                 current_link_up = tg3_setup_fiber_by_hand(tp, mac_status);
3470
3471         tp->hw_status->status =
3472                 (SD_STATUS_UPDATED |
3473                  (tp->hw_status->status & ~SD_STATUS_LINK_CHG));
3474
3475         for (i = 0; i < 100; i++) {
3476                 tw32_f(MAC_STATUS, (MAC_STATUS_SYNC_CHANGED |
3477                                     MAC_STATUS_CFG_CHANGED));
3478                 udelay(5);
3479                 if ((tr32(MAC_STATUS) & (MAC_STATUS_SYNC_CHANGED |
3480                                          MAC_STATUS_CFG_CHANGED |
3481                                          MAC_STATUS_LNKSTATE_CHANGED)) == 0)
3482                         break;
3483         }
3484
3485         mac_status = tr32(MAC_STATUS);
3486         if ((mac_status & MAC_STATUS_PCS_SYNCED) == 0) {
3487                 current_link_up = 0;
3488                 if (tp->link_config.autoneg == AUTONEG_ENABLE &&
3489                     tp->serdes_counter == 0) {
3490                         tw32_f(MAC_MODE, (tp->mac_mode |
3491                                           MAC_MODE_SEND_CONFIGS));
3492                         udelay(1);
3493                         tw32_f(MAC_MODE, tp->mac_mode);
3494                 }
3495         }
3496
3497         if (current_link_up == 1) {
3498                 tp->link_config.active_speed = SPEED_1000;
3499                 tp->link_config.active_duplex = DUPLEX_FULL;
3500                 tw32(MAC_LED_CTRL, (tp->led_ctrl |
3501                                     LED_CTRL_LNKLED_OVERRIDE |
3502                                     LED_CTRL_1000MBPS_ON));
3503         } else {
3504                 tp->link_config.active_speed = SPEED_INVALID;
3505                 tp->link_config.active_duplex = DUPLEX_INVALID;
3506                 tw32(MAC_LED_CTRL, (tp->led_ctrl |
3507                                     LED_CTRL_LNKLED_OVERRIDE |
3508                                     LED_CTRL_TRAFFIC_OVERRIDE));
3509         }
3510
3511         if (current_link_up != netif_carrier_ok(tp->dev)) {
3512                 if (current_link_up)
3513                         netif_carrier_on(tp->dev);
3514                 else
3515                         netif_carrier_off(tp->dev);
3516                 tg3_link_report(tp);
3517         } else {
3518                 u32 now_pause_cfg = tp->link_config.active_flowctrl;
3519                 if (orig_pause_cfg != now_pause_cfg ||
3520                     orig_active_speed != tp->link_config.active_speed ||
3521                     orig_active_duplex != tp->link_config.active_duplex)
3522                         tg3_link_report(tp);
3523         }
3524
3525         return 0;
3526 }
3527
3528 static int tg3_setup_fiber_mii_phy(struct tg3 *tp, int force_reset)
3529 {
3530         int current_link_up, err = 0;
3531         u32 bmsr, bmcr;
3532         u16 current_speed;
3533         u8 current_duplex;
3534         u32 local_adv, remote_adv;
3535
3536         tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
3537         tw32_f(MAC_MODE, tp->mac_mode);
3538         udelay(40);
3539
3540         tw32(MAC_EVENT, 0);
3541
3542         tw32_f(MAC_STATUS,
3543              (MAC_STATUS_SYNC_CHANGED |
3544               MAC_STATUS_CFG_CHANGED |
3545               MAC_STATUS_MI_COMPLETION |
3546               MAC_STATUS_LNKSTATE_CHANGED));
3547         udelay(40);
3548
3549         if (force_reset)
3550                 tg3_phy_reset(tp);
3551
3552         current_link_up = 0;
3553         current_speed = SPEED_INVALID;
3554         current_duplex = DUPLEX_INVALID;
3555
3556         err |= tg3_readphy(tp, MII_BMSR, &bmsr);
3557         err |= tg3_readphy(tp, MII_BMSR, &bmsr);
3558         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) {
3559                 if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
3560                         bmsr |= BMSR_LSTATUS;
3561                 else
3562                         bmsr &= ~BMSR_LSTATUS;
3563         }
3564
3565         err |= tg3_readphy(tp, MII_BMCR, &bmcr);
3566
3567         if ((tp->link_config.autoneg == AUTONEG_ENABLE) && !force_reset &&
3568             (tp->tg3_flags2 & TG3_FLG2_PARALLEL_DETECT)) {
3569                 /* do nothing, just check for link up at the end */
3570         } else if (tp->link_config.autoneg == AUTONEG_ENABLE) {
3571                 u32 adv, new_adv;
3572
3573                 err |= tg3_readphy(tp, MII_ADVERTISE, &adv);
3574                 new_adv = adv & ~(ADVERTISE_1000XFULL | ADVERTISE_1000XHALF |
3575                                   ADVERTISE_1000XPAUSE |
3576                                   ADVERTISE_1000XPSE_ASYM |
3577                                   ADVERTISE_SLCT);
3578
3579                 new_adv |= tg3_advert_flowctrl_1000X(tp->link_config.flowctrl);
3580
3581                 if (tp->link_config.advertising & ADVERTISED_1000baseT_Half)
3582                         new_adv |= ADVERTISE_1000XHALF;
3583                 if (tp->link_config.advertising & ADVERTISED_1000baseT_Full)
3584                         new_adv |= ADVERTISE_1000XFULL;
3585
3586                 if ((new_adv != adv) || !(bmcr & BMCR_ANENABLE)) {
3587                         tg3_writephy(tp, MII_ADVERTISE, new_adv);
3588                         bmcr |= BMCR_ANENABLE | BMCR_ANRESTART;
3589                         tg3_writephy(tp, MII_BMCR, bmcr);
3590
3591                         tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
3592                         tp->serdes_counter = SERDES_AN_TIMEOUT_5714S;
3593                         tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
3594
3595                         return err;
3596                 }
3597         } else {
3598                 u32 new_bmcr;
3599
3600                 bmcr &= ~BMCR_SPEED1000;
3601                 new_bmcr = bmcr & ~(BMCR_ANENABLE | BMCR_FULLDPLX);
3602
3603                 if (tp->link_config.duplex == DUPLEX_FULL)
3604                         new_bmcr |= BMCR_FULLDPLX;
3605
3606                 if (new_bmcr != bmcr) {
3607                         /* BMCR_SPEED1000 is a reserved bit that needs
3608                          * to be set on write.
3609                          */
3610                         new_bmcr |= BMCR_SPEED1000;
3611
3612                         /* Force a linkdown */
3613                         if (netif_carrier_ok(tp->dev)) {
3614                                 u32 adv;
3615
3616                                 err |= tg3_readphy(tp, MII_ADVERTISE, &adv);
3617                                 adv &= ~(ADVERTISE_1000XFULL |
3618                                          ADVERTISE_1000XHALF |
3619                                          ADVERTISE_SLCT);
3620                                 tg3_writephy(tp, MII_ADVERTISE, adv);
3621                                 tg3_writephy(tp, MII_BMCR, bmcr |
3622                                                            BMCR_ANRESTART |
3623                                                            BMCR_ANENABLE);
3624                                 udelay(10);
3625                                 netif_carrier_off(tp->dev);
3626                         }
3627                         tg3_writephy(tp, MII_BMCR, new_bmcr);
3628                         bmcr = new_bmcr;
3629                         err |= tg3_readphy(tp, MII_BMSR, &bmsr);
3630                         err |= tg3_readphy(tp, MII_BMSR, &bmsr);
3631                         if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
3632                             ASIC_REV_5714) {
3633                                 if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
3634                                         bmsr |= BMSR_LSTATUS;
3635                                 else
3636                                         bmsr &= ~BMSR_LSTATUS;
3637                         }
3638                         tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
3639                 }
3640         }
3641
3642         if (bmsr & BMSR_LSTATUS) {
3643                 current_speed = SPEED_1000;
3644                 current_link_up = 1;
3645                 if (bmcr & BMCR_FULLDPLX)
3646                         current_duplex = DUPLEX_FULL;
3647                 else
3648                         current_duplex = DUPLEX_HALF;
3649
3650                 local_adv = 0;
3651                 remote_adv = 0;
3652
3653                 if (bmcr & BMCR_ANENABLE) {
3654                         u32 common;
3655
3656                         err |= tg3_readphy(tp, MII_ADVERTISE, &local_adv);
3657                         err |= tg3_readphy(tp, MII_LPA, &remote_adv);
3658                         common = local_adv & remote_adv;
3659                         if (common & (ADVERTISE_1000XHALF |
3660                                       ADVERTISE_1000XFULL)) {
3661                                 if (common & ADVERTISE_1000XFULL)
3662                                         current_duplex = DUPLEX_FULL;
3663                                 else
3664                                         current_duplex = DUPLEX_HALF;
3665                         }
3666                         else
3667                                 current_link_up = 0;
3668                 }
3669         }
3670
3671         if (current_link_up == 1 && current_duplex == DUPLEX_FULL)
3672                 tg3_setup_flow_control(tp, local_adv, remote_adv);
3673
3674         tp->mac_mode &= ~MAC_MODE_HALF_DUPLEX;
3675         if (tp->link_config.active_duplex == DUPLEX_HALF)
3676                 tp->mac_mode |= MAC_MODE_HALF_DUPLEX;
3677
3678         tw32_f(MAC_MODE, tp->mac_mode);
3679         udelay(40);
3680
3681         tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
3682
3683         tp->link_config.active_speed = current_speed;
3684         tp->link_config.active_duplex = current_duplex;
3685
3686         if (current_link_up != netif_carrier_ok(tp->dev)) {
3687                 if (current_link_up)
3688                         netif_carrier_on(tp->dev);
3689                 else {
3690                         netif_carrier_off(tp->dev);
3691                         tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
3692                 }
3693                 tg3_link_report(tp);
3694         }
3695         return err;
3696 }
3697
3698 static void tg3_serdes_parallel_detect(struct tg3 *tp)
3699 {
3700         if (tp->serdes_counter) {
3701                 /* Give autoneg time to complete. */
3702                 tp->serdes_counter--;
3703                 return;
3704         }
3705         if (!netif_carrier_ok(tp->dev) &&
3706             (tp->link_config.autoneg == AUTONEG_ENABLE)) {
3707                 u32 bmcr;
3708
3709                 tg3_readphy(tp, MII_BMCR, &bmcr);
3710                 if (bmcr & BMCR_ANENABLE) {
3711                         u32 phy1, phy2;
3712
3713                         /* Select shadow register 0x1f */
3714                         tg3_writephy(tp, 0x1c, 0x7c00);
3715                         tg3_readphy(tp, 0x1c, &phy1);
3716
3717                         /* Select expansion interrupt status register */
3718                         tg3_writephy(tp, 0x17, 0x0f01);
3719                         tg3_readphy(tp, 0x15, &phy2);
3720                         tg3_readphy(tp, 0x15, &phy2);
3721
3722                         if ((phy1 & 0x10) && !(phy2 & 0x20)) {
3723                                 /* We have signal detect and not receiving
3724                                  * config code words, link is up by parallel
3725                                  * detection.
3726                                  */
3727
3728                                 bmcr &= ~BMCR_ANENABLE;
3729                                 bmcr |= BMCR_SPEED1000 | BMCR_FULLDPLX;
3730                                 tg3_writephy(tp, MII_BMCR, bmcr);
3731                                 tp->tg3_flags2 |= TG3_FLG2_PARALLEL_DETECT;
3732                         }
3733                 }
3734         }
3735         else if (netif_carrier_ok(tp->dev) &&
3736                  (tp->link_config.autoneg == AUTONEG_ENABLE) &&
3737                  (tp->tg3_flags2 & TG3_FLG2_PARALLEL_DETECT)) {
3738                 u32 phy2;
3739
3740                 /* Select expansion interrupt status register */
3741                 tg3_writephy(tp, 0x17, 0x0f01);
3742                 tg3_readphy(tp, 0x15, &phy2);
3743                 if (phy2 & 0x20) {
3744                         u32 bmcr;
3745
3746                         /* Config code words received, turn on autoneg. */
3747                         tg3_readphy(tp, MII_BMCR, &bmcr);
3748                         tg3_writephy(tp, MII_BMCR, bmcr | BMCR_ANENABLE);
3749
3750                         tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
3751
3752                 }
3753         }
3754 }
3755
3756 static int tg3_setup_phy(struct tg3 *tp, int force_reset)
3757 {
3758         int err;
3759
3760         if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) {
3761                 err = tg3_setup_fiber_phy(tp, force_reset);
3762         } else if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES) {
3763                 err = tg3_setup_fiber_mii_phy(tp, force_reset);
3764         } else {
3765                 err = tg3_setup_copper_phy(tp, force_reset);
3766         }
3767
3768         if (tp->pci_chip_rev_id == CHIPREV_ID_5784_A0 ||
3769             tp->pci_chip_rev_id == CHIPREV_ID_5784_A1) {
3770                 u32 val, scale;
3771
3772                 val = tr32(TG3_CPMU_CLCK_STAT) & CPMU_CLCK_STAT_MAC_CLCK_MASK;
3773                 if (val == CPMU_CLCK_STAT_MAC_CLCK_62_5)
3774                         scale = 65;
3775                 else if (val == CPMU_CLCK_STAT_MAC_CLCK_6_25)
3776                         scale = 6;
3777                 else
3778                         scale = 12;
3779
3780                 val = tr32(GRC_MISC_CFG) & ~GRC_MISC_CFG_PRESCALAR_MASK;
3781                 val |= (scale << GRC_MISC_CFG_PRESCALAR_SHIFT);
3782                 tw32(GRC_MISC_CFG, val);
3783         }
3784
3785         if (tp->link_config.active_speed == SPEED_1000 &&
3786             tp->link_config.active_duplex == DUPLEX_HALF)
3787                 tw32(MAC_TX_LENGTHS,
3788                      ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
3789                       (6 << TX_LENGTHS_IPG_SHIFT) |
3790                       (0xff << TX_LENGTHS_SLOT_TIME_SHIFT)));
3791         else
3792                 tw32(MAC_TX_LENGTHS,
3793                      ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
3794                       (6 << TX_LENGTHS_IPG_SHIFT) |
3795                       (32 << TX_LENGTHS_SLOT_TIME_SHIFT)));
3796
3797         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
3798                 if (netif_carrier_ok(tp->dev)) {
3799                         tw32(HOSTCC_STAT_COAL_TICKS,
3800                              tp->coal.stats_block_coalesce_usecs);
3801                 } else {
3802                         tw32(HOSTCC_STAT_COAL_TICKS, 0);
3803                 }
3804         }
3805
3806         if (tp->tg3_flags & TG3_FLAG_ASPM_WORKAROUND) {
3807                 u32 val = tr32(PCIE_PWR_MGMT_THRESH);
3808                 if (!netif_carrier_ok(tp->dev))
3809                         val = (val & ~PCIE_PWR_MGMT_L1_THRESH_MSK) |
3810                               tp->pwrmgmt_thresh;
3811                 else
3812                         val |= PCIE_PWR_MGMT_L1_THRESH_MSK;
3813                 tw32(PCIE_PWR_MGMT_THRESH, val);
3814         }
3815
3816         return err;
3817 }
3818
3819 /* This is called whenever we suspect that the system chipset is re-
3820  * ordering the sequence of MMIO to the tx send mailbox. The symptom
3821  * is bogus tx completions. We try to recover by setting the
3822  * TG3_FLAG_MBOX_WRITE_REORDER flag and resetting the chip later
3823  * in the workqueue.
3824  */
3825 static void tg3_tx_recover(struct tg3 *tp)
3826 {
3827         BUG_ON((tp->tg3_flags & TG3_FLAG_MBOX_WRITE_REORDER) ||
3828                tp->write32_tx_mbox == tg3_write_indirect_mbox);
3829
3830         printk(KERN_WARNING PFX "%s: The system may be re-ordering memory-"
3831                "mapped I/O cycles to the network device, attempting to "
3832                "recover. Please report the problem to the driver maintainer "
3833                "and include system chipset information.\n", tp->dev->name);
3834
3835         spin_lock(&tp->lock);
3836         tp->tg3_flags |= TG3_FLAG_TX_RECOVERY_PENDING;
3837         spin_unlock(&tp->lock);
3838 }
3839
3840 static inline u32 tg3_tx_avail(struct tg3 *tp)
3841 {
3842         smp_mb();
3843         return (tp->tx_pending -
3844                 ((tp->tx_prod - tp->tx_cons) & (TG3_TX_RING_SIZE - 1)));
3845 }
3846
3847 /* Tigon3 never reports partial packet sends.  So we do not
3848  * need special logic to handle SKBs that have not had all
3849  * of their frags sent yet, like SunGEM does.
3850  */
3851 static void tg3_tx(struct tg3 *tp)
3852 {
3853         u32 hw_idx = tp->hw_status->idx[0].tx_consumer;
3854         u32 sw_idx = tp->tx_cons;
3855
3856         while (sw_idx != hw_idx) {
3857                 struct tx_ring_info *ri = &tp->tx_buffers[sw_idx];
3858                 struct sk_buff *skb = ri->skb;
3859                 int i, tx_bug = 0;
3860
3861                 if (unlikely(skb == NULL)) {
3862                         tg3_tx_recover(tp);
3863                         return;
3864                 }
3865
3866                 skb_dma_unmap(&tp->pdev->dev, skb, DMA_TO_DEVICE);
3867
3868                 ri->skb = NULL;
3869
3870                 sw_idx = NEXT_TX(sw_idx);
3871
3872                 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
3873                         ri = &tp->tx_buffers[sw_idx];
3874                         if (unlikely(ri->skb != NULL || sw_idx == hw_idx))
3875                                 tx_bug = 1;
3876                         sw_idx = NEXT_TX(sw_idx);
3877                 }
3878
3879                 dev_kfree_skb(skb);
3880
3881                 if (unlikely(tx_bug)) {
3882                         tg3_tx_recover(tp);
3883                         return;
3884                 }
3885         }
3886
3887         tp->tx_cons = sw_idx;
3888
3889         /* Need to make the tx_cons update visible to tg3_start_xmit()
3890          * before checking for netif_queue_stopped().  Without the
3891          * memory barrier, there is a small possibility that tg3_start_xmit()
3892          * will miss it and cause the queue to be stopped forever.
3893          */
3894         smp_mb();
3895
3896         if (unlikely(netif_queue_stopped(tp->dev) &&
3897                      (tg3_tx_avail(tp) > TG3_TX_WAKEUP_THRESH(tp)))) {
3898                 netif_tx_lock(tp->dev);
3899                 if (netif_queue_stopped(tp->dev) &&
3900                     (tg3_tx_avail(tp) > TG3_TX_WAKEUP_THRESH(tp)))
3901                         netif_wake_queue(tp->dev);
3902                 netif_tx_unlock(tp->dev);
3903         }
3904 }
3905
3906 /* Returns size of skb allocated or < 0 on error.
3907  *
3908  * We only need to fill in the address because the other members
3909  * of the RX descriptor are invariant, see tg3_init_rings.
3910  *
3911  * Note the purposeful assymetry of cpu vs. chip accesses.  For
3912  * posting buffers we only dirty the first cache line of the RX
3913  * descriptor (containing the address).  Whereas for the RX status
3914  * buffers the cpu only reads the last cacheline of the RX descriptor
3915  * (to fetch the error flags, vlan tag, checksum, and opaque cookie).
3916  */
3917 static int tg3_alloc_rx_skb(struct tg3 *tp, u32 opaque_key,
3918                             int src_idx, u32 dest_idx_unmasked)
3919 {
3920         struct tg3_rx_buffer_desc *desc;
3921         struct ring_info *map, *src_map;
3922         struct sk_buff *skb;
3923         dma_addr_t mapping;
3924         int skb_size, dest_idx;
3925
3926         src_map = NULL;
3927         switch (opaque_key) {
3928         case RXD_OPAQUE_RING_STD:
3929                 dest_idx = dest_idx_unmasked % TG3_RX_RING_SIZE;
3930                 desc = &tp->rx_std[dest_idx];
3931                 map = &tp->rx_std_buffers[dest_idx];
3932                 if (src_idx >= 0)
3933                         src_map = &tp->rx_std_buffers[src_idx];
3934                 skb_size = tp->rx_pkt_buf_sz;
3935                 break;
3936
3937         case RXD_OPAQUE_RING_JUMBO:
3938                 dest_idx = dest_idx_unmasked % TG3_RX_JUMBO_RING_SIZE;
3939                 desc = &tp->rx_jumbo[dest_idx];
3940                 map = &tp->rx_jumbo_buffers[dest_idx];
3941                 if (src_idx >= 0)
3942                         src_map = &tp->rx_jumbo_buffers[src_idx];
3943                 skb_size = RX_JUMBO_PKT_BUF_SZ;
3944                 break;
3945
3946         default:
3947                 return -EINVAL;
3948         }
3949
3950         /* Do not overwrite any of the map or rp information
3951          * until we are sure we can commit to a new buffer.
3952          *
3953          * Callers depend upon this behavior and assume that
3954          * we leave everything unchanged if we fail.
3955          */
3956         skb = netdev_alloc_skb(tp->dev, skb_size);
3957         if (skb == NULL)
3958                 return -ENOMEM;
3959
3960         skb_reserve(skb, tp->rx_offset);
3961
3962         mapping = pci_map_single(tp->pdev, skb->data,
3963                                  skb_size - tp->rx_offset,
3964                                  PCI_DMA_FROMDEVICE);
3965
3966         map->skb = skb;
3967         pci_unmap_addr_set(map, mapping, mapping);
3968
3969         if (src_map != NULL)
3970                 src_map->skb = NULL;
3971
3972         desc->addr_hi = ((u64)mapping >> 32);
3973         desc->addr_lo = ((u64)mapping & 0xffffffff);
3974
3975         return skb_size;
3976 }
3977
3978 /* We only need to move over in the address because the other
3979  * members of the RX descriptor are invariant.  See notes above
3980  * tg3_alloc_rx_skb for full details.
3981  */
3982 static void tg3_recycle_rx(struct tg3 *tp, u32 opaque_key,
3983                            int src_idx, u32 dest_idx_unmasked)
3984 {
3985         struct tg3_rx_buffer_desc *src_desc, *dest_desc;
3986         struct ring_info *src_map, *dest_map;
3987         int dest_idx;
3988
3989         switch (opaque_key) {
3990         case RXD_OPAQUE_RING_STD:
3991                 dest_idx = dest_idx_unmasked % TG3_RX_RING_SIZE;
3992                 dest_desc = &tp->rx_std[dest_idx];
3993                 dest_map = &tp->rx_std_buffers[dest_idx];
3994                 src_desc = &tp->rx_std[src_idx];
3995                 src_map = &tp->rx_std_buffers[src_idx];
3996                 break;
3997
3998         case RXD_OPAQUE_RING_JUMBO:
3999                 dest_idx = dest_idx_unmasked % TG3_RX_JUMBO_RING_SIZE;
4000                 dest_desc = &tp->rx_jumbo[dest_idx];
4001                 dest_map = &tp->rx_jumbo_buffers[dest_idx];
4002                 src_desc = &tp->rx_jumbo[src_idx];
4003                 src_map = &tp->rx_jumbo_buffers[src_idx];
4004                 break;
4005
4006         default:
4007                 return;
4008         }
4009
4010         dest_map->skb = src_map->skb;
4011         pci_unmap_addr_set(dest_map, mapping,
4012                            pci_unmap_addr(src_map, mapping));
4013         dest_desc->addr_hi = src_desc->addr_hi;
4014         dest_desc->addr_lo = src_desc->addr_lo;
4015
4016         src_map->skb = NULL;
4017 }
4018
4019 #if TG3_VLAN_TAG_USED
4020 static int tg3_vlan_rx(struct tg3 *tp, struct sk_buff *skb, u16 vlan_tag)
4021 {
4022         return vlan_hwaccel_receive_skb(skb, tp->vlgrp, vlan_tag);
4023 }
4024 #endif
4025
4026 /* The RX ring scheme is composed of multiple rings which post fresh
4027  * buffers to the chip, and one special ring the chip uses to report
4028  * status back to the host.
4029  *
4030  * The special ring reports the status of received packets to the
4031  * host.  The chip does not write into the original descriptor the
4032  * RX buffer was obtained from.  The chip simply takes the original
4033  * descriptor as provided by the host, updates the status and length
4034  * field, then writes this into the next status ring entry.
4035  *
4036  * Each ring the host uses to post buffers to the chip is described
4037  * by a TG3_BDINFO entry in the chips SRAM area.  When a packet arrives,
4038  * it is first placed into the on-chip ram.  When the packet's length
4039  * is known, it walks down the TG3_BDINFO entries to select the ring.
4040  * Each TG3_BDINFO specifies a MAXLEN field and the first TG3_BDINFO
4041  * which is within the range of the new packet's length is chosen.
4042  *
4043  * The "separate ring for rx status" scheme may sound queer, but it makes
4044  * sense from a cache coherency perspective.  If only the host writes
4045  * to the buffer post rings, and only the chip writes to the rx status
4046  * rings, then cache lines never move beyond shared-modified state.
4047  * If both the host and chip were to write into the same ring, cache line
4048  * eviction could occur since both entities want it in an exclusive state.
4049  */
4050 static int tg3_rx(struct tg3 *tp, int budget)
4051 {
4052         u32 work_mask, rx_std_posted = 0;
4053         u32 sw_idx = tp->rx_rcb_ptr;
4054         u16 hw_idx;
4055         int received;
4056
4057         hw_idx = tp->hw_status->idx[0].rx_producer;
4058         /*
4059          * We need to order the read of hw_idx and the read of
4060          * the opaque cookie.
4061          */
4062         rmb();
4063         work_mask = 0;
4064         received = 0;
4065         while (sw_idx != hw_idx && budget > 0) {
4066                 struct tg3_rx_buffer_desc *desc = &tp->rx_rcb[sw_idx];
4067                 unsigned int len;
4068                 struct sk_buff *skb;
4069                 dma_addr_t dma_addr;
4070                 u32 opaque_key, desc_idx, *post_ptr;
4071
4072                 desc_idx = desc->opaque & RXD_OPAQUE_INDEX_MASK;
4073                 opaque_key = desc->opaque & RXD_OPAQUE_RING_MASK;
4074                 if (opaque_key == RXD_OPAQUE_RING_STD) {
4075                         dma_addr = pci_unmap_addr(&tp->rx_std_buffers[desc_idx],
4076                                                   mapping);
4077                         skb = tp->rx_std_buffers[desc_idx].skb;
4078                         post_ptr = &tp->rx_std_ptr;
4079                         rx_std_posted++;
4080                 } else if (opaque_key == RXD_OPAQUE_RING_JUMBO) {
4081                         dma_addr = pci_unmap_addr(&tp->rx_jumbo_buffers[desc_idx],
4082                                                   mapping);
4083                         skb = tp->rx_jumbo_buffers[desc_idx].skb;
4084                         post_ptr = &tp->rx_jumbo_ptr;
4085                 }
4086                 else {
4087                         goto next_pkt_nopost;
4088                 }
4089
4090                 work_mask |= opaque_key;
4091
4092                 if ((desc->err_vlan & RXD_ERR_MASK) != 0 &&
4093                     (desc->err_vlan != RXD_ERR_ODD_NIBBLE_RCVD_MII)) {
4094                 drop_it:
4095                         tg3_recycle_rx(tp, opaque_key,
4096                                        desc_idx, *post_ptr);
4097                 drop_it_no_recycle:
4098                         /* Other statistics kept track of by card. */
4099                         tp->net_stats.rx_dropped++;
4100                         goto next_pkt;
4101                 }
4102
4103                 len = ((desc->idx_len & RXD_LEN_MASK) >> RXD_LEN_SHIFT) - 4; /* omit crc */
4104
4105                 if (len > RX_COPY_THRESHOLD
4106                         && tp->rx_offset == 2
4107                         /* rx_offset != 2 iff this is a 5701 card running
4108                          * in PCI-X mode [see tg3_get_invariants()] */
4109                 ) {
4110                         int skb_size;
4111
4112                         skb_size = tg3_alloc_rx_skb(tp, opaque_key,
4113                                                     desc_idx, *post_ptr);
4114                         if (skb_size < 0)
4115                                 goto drop_it;
4116
4117                         pci_unmap_single(tp->pdev, dma_addr,
4118                                          skb_size - tp->rx_offset,
4119                                          PCI_DMA_FROMDEVICE);
4120
4121                         skb_put(skb, len);
4122                 } else {
4123                         struct sk_buff *copy_skb;
4124
4125                         tg3_recycle_rx(tp, opaque_key,
4126                                        desc_idx, *post_ptr);
4127
4128                         copy_skb = netdev_alloc_skb(tp->dev, len + 2);
4129                         if (copy_skb == NULL)
4130                                 goto drop_it_no_recycle;
4131
4132                         skb_reserve(copy_skb, 2);
4133                         skb_put(copy_skb, len);
4134                         pci_dma_sync_single_for_cpu(tp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE);
4135                         skb_copy_from_linear_data(skb, copy_skb->data, len);
4136                         pci_dma_sync_single_for_device(tp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE);
4137
4138                         /* We'll reuse the original ring buffer. */
4139                         skb = copy_skb;
4140                 }
4141
4142                 if ((tp->tg3_flags & TG3_FLAG_RX_CHECKSUMS) &&
4143                     (desc->type_flags & RXD_FLAG_TCPUDP_CSUM) &&
4144                     (((desc->ip_tcp_csum & RXD_TCPCSUM_MASK)
4145                       >> RXD_TCPCSUM_SHIFT) == 0xffff))
4146                         skb->ip_summed = CHECKSUM_UNNECESSARY;
4147                 else
4148                         skb->ip_summed = CHECKSUM_NONE;
4149
4150                 skb->protocol = eth_type_trans(skb, tp->dev);
4151 #if TG3_VLAN_TAG_USED
4152                 if (tp->vlgrp != NULL &&
4153                     desc->type_flags & RXD_FLAG_VLAN) {
4154                         tg3_vlan_rx(tp, skb,
4155                                     desc->err_vlan & RXD_VLAN_MASK);
4156                 } else
4157 #endif
4158                         netif_receive_skb(skb);
4159
4160                 tp->dev->last_rx = jiffies;
4161                 received++;
4162                 budget--;
4163
4164 next_pkt:
4165                 (*post_ptr)++;
4166
4167                 if (unlikely(rx_std_posted >= tp->rx_std_max_post)) {
4168                         u32 idx = *post_ptr % TG3_RX_RING_SIZE;
4169
4170                         tw32_rx_mbox(MAILBOX_RCV_STD_PROD_IDX +
4171                                      TG3_64BIT_REG_LOW, idx);
4172                         work_mask &= ~RXD_OPAQUE_RING_STD;
4173                         rx_std_posted = 0;
4174                 }
4175 next_pkt_nopost:
4176                 sw_idx++;
4177                 sw_idx &= (TG3_RX_RCB_RING_SIZE(tp) - 1);
4178
4179                 /* Refresh hw_idx to see if there is new work */
4180                 if (sw_idx == hw_idx) {
4181                         hw_idx = tp->hw_status->idx[0].rx_producer;
4182                         rmb();
4183                 }
4184         }
4185
4186         /* ACK the status ring. */
4187         tp->rx_rcb_ptr = sw_idx;
4188         tw32_rx_mbox(MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW, sw_idx);
4189
4190         /* Refill RX ring(s). */
4191         if (work_mask & RXD_OPAQUE_RING_STD) {
4192                 sw_idx = tp->rx_std_ptr % TG3_RX_RING_SIZE;
4193                 tw32_rx_mbox(MAILBOX_RCV_STD_PROD_IDX + TG3_64BIT_REG_LOW,
4194                              sw_idx);
4195         }
4196         if (work_mask & RXD_OPAQUE_RING_JUMBO) {
4197                 sw_idx = tp->rx_jumbo_ptr % TG3_RX_JUMBO_RING_SIZE;
4198                 tw32_rx_mbox(MAILBOX_RCV_JUMBO_PROD_IDX + TG3_64BIT_REG_LOW,
4199                              sw_idx);
4200         }
4201         mmiowb();
4202
4203         return received;
4204 }
4205
4206 static int tg3_poll_work(struct tg3 *tp, int work_done, int budget)
4207 {
4208         struct tg3_hw_status *sblk = tp->hw_status;
4209
4210         /* handle link change and other phy events */
4211         if (!(tp->tg3_flags &
4212               (TG3_FLAG_USE_LINKCHG_REG |
4213                TG3_FLAG_POLL_SERDES))) {
4214                 if (sblk->status & SD_STATUS_LINK_CHG) {
4215                         sblk->status = SD_STATUS_UPDATED |
4216                                 (sblk->status & ~SD_STATUS_LINK_CHG);
4217                         spin_lock(&tp->lock);
4218                         if (tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB) {
4219                                 tw32_f(MAC_STATUS,
4220                                      (MAC_STATUS_SYNC_CHANGED |
4221                                       MAC_STATUS_CFG_CHANGED |
4222                                       MAC_STATUS_MI_COMPLETION |
4223                                       MAC_STATUS_LNKSTATE_CHANGED));
4224                                 udelay(40);
4225                         } else
4226                                 tg3_setup_phy(tp, 0);
4227                         spin_unlock(&tp->lock);
4228                 }
4229         }
4230
4231         /* run TX completion thread */
4232         if (sblk->idx[0].tx_consumer != tp->tx_cons) {
4233                 tg3_tx(tp);
4234                 if (unlikely(tp->tg3_flags & TG3_FLAG_TX_RECOVERY_PENDING))
4235                         return work_done;
4236         }
4237
4238         /* run RX thread, within the bounds set by NAPI.
4239          * All RX "locking" is done by ensuring outside
4240          * code synchronizes with tg3->napi.poll()
4241          */
4242         if (sblk->idx[0].rx_producer != tp->rx_rcb_ptr)
4243                 work_done += tg3_rx(tp, budget - work_done);
4244
4245         return work_done;
4246 }
4247
4248 static int tg3_poll(struct napi_struct *napi, int budget)
4249 {
4250         struct tg3 *tp = container_of(napi, struct tg3, napi);
4251         int work_done = 0;
4252         struct tg3_hw_status *sblk = tp->hw_status;
4253
4254         while (1) {
4255                 work_done = tg3_poll_work(tp, work_done, budget);
4256
4257                 if (unlikely(tp->tg3_flags & TG3_FLAG_TX_RECOVERY_PENDING))
4258                         goto tx_recovery;
4259
4260                 if (unlikely(work_done >= budget))
4261                         break;
4262
4263                 if (tp->tg3_flags & TG3_FLAG_TAGGED_STATUS) {
4264                         /* tp->last_tag is used in tg3_restart_ints() below
4265                          * to tell the hw how much work has been processed,
4266                          * so we must read it before checking for more work.
4267                          */
4268                         tp->last_tag = sblk->status_tag;
4269                         rmb();
4270                 } else
4271                         sblk->status &= ~SD_STATUS_UPDATED;
4272
4273                 if (likely(!tg3_has_work(tp))) {
4274                         netif_rx_complete(tp->dev, napi);
4275                         tg3_restart_ints(tp);
4276                         break;
4277                 }
4278         }
4279
4280         return work_done;
4281
4282 tx_recovery:
4283         /* work_done is guaranteed to be less than budget. */
4284         netif_rx_complete(tp->dev, napi);
4285         schedule_work(&tp->reset_task);
4286         return work_done;
4287 }
4288
4289 static void tg3_irq_quiesce(struct tg3 *tp)
4290 {
4291         BUG_ON(tp->irq_sync);
4292
4293         tp->irq_sync = 1;
4294         smp_mb();
4295
4296         synchronize_irq(tp->pdev->irq);
4297 }
4298
4299 static inline int tg3_irq_sync(struct tg3 *tp)
4300 {
4301         return tp->irq_sync;
4302 }
4303
4304 /* Fully shutdown all tg3 driver activity elsewhere in the system.
4305  * If irq_sync is non-zero, then the IRQ handler must be synchronized
4306  * with as well.  Most of the time, this is not necessary except when
4307  * shutting down the device.
4308  */
4309 static inline void tg3_full_lock(struct tg3 *tp, int irq_sync)
4310 {
4311         spin_lock_bh(&tp->lock);
4312         if (irq_sync)
4313                 tg3_irq_quiesce(tp);
4314 }
4315
4316 static inline void tg3_full_unlock(struct tg3 *tp)
4317 {
4318         spin_unlock_bh(&tp->lock);
4319 }
4320
4321 /* One-shot MSI handler - Chip automatically disables interrupt
4322  * after sending MSI so driver doesn't have to do it.
4323  */
4324 static irqreturn_t tg3_msi_1shot(int irq, void *dev_id)
4325 {
4326         struct net_device *dev = dev_id;
4327         struct tg3 *tp = netdev_priv(dev);
4328
4329         prefetch(tp->hw_status);
4330         prefetch(&tp->rx_rcb[tp->rx_rcb_ptr]);
4331
4332         if (likely(!tg3_irq_sync(tp)))
4333                 netif_rx_schedule(dev, &tp->napi);
4334
4335         return IRQ_HANDLED;
4336 }
4337
4338 /* MSI ISR - No need to check for interrupt sharing and no need to
4339  * flush status block and interrupt mailbox. PCI ordering rules
4340  * guarantee that MSI will arrive after the status block.
4341  */
4342 static irqreturn_t tg3_msi(int irq, void *dev_id)
4343 {
4344         struct net_device *dev = dev_id;
4345         struct tg3 *tp = netdev_priv(dev);
4346
4347         prefetch(tp->hw_status);
4348         prefetch(&tp->rx_rcb[tp->rx_rcb_ptr]);
4349         /*
4350          * Writing any value to intr-mbox-0 clears PCI INTA# and
4351          * chip-internal interrupt pending events.
4352          * Writing non-zero to intr-mbox-0 additional tells the
4353          * NIC to stop sending us irqs, engaging "in-intr-handler"
4354          * event coalescing.
4355          */
4356         tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
4357         if (likely(!tg3_irq_sync(tp)))
4358                 netif_rx_schedule(dev, &tp->napi);
4359
4360         return IRQ_RETVAL(1);
4361 }
4362
4363 static irqreturn_t tg3_interrupt(int irq, void *dev_id)
4364 {
4365         struct net_device *dev = dev_id;
4366         struct tg3 *tp = netdev_priv(dev);
4367         struct tg3_hw_status *sblk = tp->hw_status;
4368         unsigned int handled = 1;
4369
4370         /* In INTx mode, it is possible for the interrupt to arrive at
4371          * the CPU before the status block posted prior to the interrupt.
4372          * Reading the PCI State register will confirm whether the
4373          * interrupt is ours and will flush the status block.
4374          */
4375         if (unlikely(!(sblk->status & SD_STATUS_UPDATED))) {
4376                 if ((tp->tg3_flags & TG3_FLAG_CHIP_RESETTING) ||
4377                     (tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
4378                         handled = 0;
4379                         goto out;
4380                 }
4381         }
4382
4383         /*
4384          * Writing any value to intr-mbox-0 clears PCI INTA# and
4385          * chip-internal interrupt pending events.
4386          * Writing non-zero to intr-mbox-0 additional tells the
4387          * NIC to stop sending us irqs, engaging "in-intr-handler"
4388          * event coalescing.
4389          *
4390          * Flush the mailbox to de-assert the IRQ immediately to prevent
4391          * spurious interrupts.  The flush impacts performance but
4392          * excessive spurious interrupts can be worse in some cases.
4393          */
4394         tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
4395         if (tg3_irq_sync(tp))
4396                 goto out;
4397         sblk->status &= ~SD_STATUS_UPDATED;
4398         if (likely(tg3_has_work(tp))) {
4399                 prefetch(&tp->rx_rcb[tp->rx_rcb_ptr]);
4400                 netif_rx_schedule(dev, &tp->napi);
4401         } else {
4402                 /* No work, shared interrupt perhaps?  re-enable
4403                  * interrupts, and flush that PCI write
4404                  */
4405                 tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
4406                                0x00000000);
4407         }
4408 out:
4409         return IRQ_RETVAL(handled);
4410 }
4411
4412 static irqreturn_t tg3_interrupt_tagged(int irq, void *dev_id)
4413 {
4414         struct net_device *dev = dev_id;
4415         struct tg3 *tp = netdev_priv(dev);
4416         struct tg3_hw_status *sblk = tp->hw_status;
4417         unsigned int handled = 1;
4418
4419         /* In INTx mode, it is possible for the interrupt to arrive at
4420          * the CPU before the status block posted prior to the interrupt.
4421          * Reading the PCI State register will confirm whether the
4422          * interrupt is ours and will flush the status block.
4423          */
4424         if (unlikely(sblk->status_tag == tp->last_tag)) {
4425                 if ((tp->tg3_flags & TG3_FLAG_CHIP_RESETTING) ||
4426                     (tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
4427                         handled = 0;
4428                         goto out;
4429                 }
4430         }
4431
4432         /*
4433          * writing any value to intr-mbox-0 clears PCI INTA# and
4434          * chip-internal interrupt pending events.
4435          * writing non-zero to intr-mbox-0 additional tells the
4436          * NIC to stop sending us irqs, engaging "in-intr-handler"
4437          * event coalescing.
4438          *
4439          * Flush the mailbox to de-assert the IRQ immediately to prevent
4440          * spurious interrupts.  The flush impacts performance but
4441          * excessive spurious interrupts can be worse in some cases.
4442          */
4443         tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
4444         if (tg3_irq_sync(tp))
4445                 goto out;
4446         if (netif_rx_schedule_prep(dev, &tp->napi)) {
4447                 prefetch(&tp->rx_rcb[tp->rx_rcb_ptr]);
4448                 /* Update last_tag to mark that this status has been
4449                  * seen. Because interrupt may be shared, we may be
4450                  * racing with tg3_poll(), so only update last_tag
4451                  * if tg3_poll() is not scheduled.
4452                  */
4453                 tp->last_tag = sblk->status_tag;
4454                 __netif_rx_schedule(dev, &tp->napi);
4455         }
4456 out:
4457         return IRQ_RETVAL(handled);
4458 }
4459
4460 /* ISR for interrupt test */
4461 static irqreturn_t tg3_test_isr(int irq, void *dev_id)
4462 {
4463         struct net_device *dev = dev_id;
4464         struct tg3 *tp = netdev_priv(dev);
4465         struct tg3_hw_status *sblk = tp->hw_status;
4466
4467         if ((sblk->status & SD_STATUS_UPDATED) ||
4468             !(tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
4469                 tg3_disable_ints(tp);
4470                 return IRQ_RETVAL(1);
4471         }
4472         return IRQ_RETVAL(0);
4473 }
4474
4475 static int tg3_init_hw(struct tg3 *, int);
4476 static int tg3_halt(struct tg3 *, int, int);
4477
4478 /* Restart hardware after configuration changes, self-test, etc.
4479  * Invoked with tp->lock held.
4480  */
4481 static int tg3_restart_hw(struct tg3 *tp, int reset_phy)
4482         __releases(tp->lock)
4483         __acquires(tp->lock)
4484 {
4485         int err;
4486
4487         err = tg3_init_hw(tp, reset_phy);
4488         if (err) {
4489                 printk(KERN_ERR PFX "%s: Failed to re-initialize device, "
4490                        "aborting.\n", tp->dev->name);
4491                 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
4492                 tg3_full_unlock(tp);
4493                 del_timer_sync(&tp->timer);
4494                 tp->irq_sync = 0;
4495                 napi_enable(&tp->napi);
4496                 dev_close(tp->dev);
4497                 tg3_full_lock(tp, 0);
4498         }
4499         return err;
4500 }
4501
4502 #ifdef CONFIG_NET_POLL_CONTROLLER
4503 static void tg3_poll_controller(struct net_device *dev)
4504 {
4505         struct tg3 *tp = netdev_priv(dev);
4506
4507         tg3_interrupt(tp->pdev->irq, dev);
4508 }
4509 #endif
4510
4511 static void tg3_reset_task(struct work_struct *work)
4512 {
4513         struct tg3 *tp = container_of(work, struct tg3, reset_task);
4514         int err;
4515         unsigned int restart_timer;
4516
4517         tg3_full_lock(tp, 0);
4518
4519         if (!netif_running(tp->dev)) {
4520                 tg3_full_unlock(tp);
4521                 return;
4522         }
4523
4524         tg3_full_unlock(tp);
4525
4526         tg3_phy_stop(tp);
4527
4528         tg3_netif_stop(tp);
4529
4530         tg3_full_lock(tp, 1);
4531
4532         restart_timer = tp->tg3_flags2 & TG3_FLG2_RESTART_TIMER;
4533         tp->tg3_flags2 &= ~TG3_FLG2_RESTART_TIMER;
4534
4535         if (tp->tg3_flags & TG3_FLAG_TX_RECOVERY_PENDING) {
4536                 tp->write32_tx_mbox = tg3_write32_tx_mbox;
4537                 tp->write32_rx_mbox = tg3_write_flush_reg32;
4538                 tp->tg3_flags |= TG3_FLAG_MBOX_WRITE_REORDER;
4539                 tp->tg3_flags &= ~TG3_FLAG_TX_RECOVERY_PENDING;
4540         }
4541
4542         tg3_halt(tp, RESET_KIND_SHUTDOWN, 0);
4543         err = tg3_init_hw(tp, 1);
4544         if (err)
4545                 goto out;
4546
4547         tg3_netif_start(tp);
4548
4549         if (restart_timer)
4550                 mod_timer(&tp->timer, jiffies + 1);
4551
4552 out:
4553         tg3_full_unlock(tp);
4554
4555         if (!err)
4556                 tg3_phy_start(tp);
4557 }
4558
4559 static void tg3_dump_short_state(struct tg3 *tp)
4560 {
4561         printk(KERN_ERR PFX "DEBUG: MAC_TX_STATUS[%08x] MAC_RX_STATUS[%08x]\n",
4562                tr32(MAC_TX_STATUS), tr32(MAC_RX_STATUS));
4563         printk(KERN_ERR PFX "DEBUG: RDMAC_STATUS[%08x] WDMAC_STATUS[%08x]\n",
4564                tr32(RDMAC_STATUS), tr32(WDMAC_STATUS));
4565 }
4566
4567 static void tg3_tx_timeout(struct net_device *dev)
4568 {
4569         struct tg3 *tp = netdev_priv(dev);
4570
4571         if (netif_msg_tx_err(tp)) {
4572                 printk(KERN_ERR PFX "%s: transmit timed out, resetting\n",
4573                        dev->name);
4574                 tg3_dump_short_state(tp);
4575         }
4576
4577         schedule_work(&tp->reset_task);
4578 }
4579
4580 /* Test for DMA buffers crossing any 4GB boundaries: 4G, 8G, etc */
4581 static inline int tg3_4g_overflow_test(dma_addr_t mapping, int len)
4582 {
4583         u32 base = (u32) mapping & 0xffffffff;
4584
4585         return ((base > 0xffffdcc0) &&
4586                 (base + len + 8 < base));
4587 }
4588
4589 /* Test for DMA addresses > 40-bit */
4590 static inline int tg3_40bit_overflow_test(struct tg3 *tp, dma_addr_t mapping,
4591                                           int len)
4592 {
4593 #if defined(CONFIG_HIGHMEM) && (BITS_PER_LONG == 64)
4594         if (tp->tg3_flags & TG3_FLAG_40BIT_DMA_BUG)
4595                 return (((u64) mapping + len) > DMA_40BIT_MASK);
4596         return 0;
4597 #else
4598         return 0;
4599 #endif
4600 }
4601
4602 static void tg3_set_txd(struct tg3 *, int, dma_addr_t, int, u32, u32);
4603
4604 /* Workaround 4GB and 40-bit hardware DMA bugs. */
4605 static int tigon3_dma_hwbug_workaround(struct tg3 *tp, struct sk_buff *skb,
4606                                        u32 last_plus_one, u32 *start,
4607                                        u32 base_flags, u32 mss)
4608 {
4609         struct sk_buff *new_skb;
4610         dma_addr_t new_addr = 0;
4611         u32 entry = *start;
4612         int i, ret = 0;
4613
4614         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701)
4615                 new_skb = skb_copy(skb, GFP_ATOMIC);
4616         else {
4617                 int more_headroom = 4 - ((unsigned long)skb->data & 3);
4618
4619                 new_skb = skb_copy_expand(skb,
4620                                           skb_headroom(skb) + more_headroom,
4621                                           skb_tailroom(skb), GFP_ATOMIC);
4622         }
4623
4624         if (!new_skb) {
4625                 ret = -1;
4626         } else {
4627                 /* New SKB is guaranteed to be linear. */
4628                 entry = *start;
4629                 ret = skb_dma_map(&tp->pdev->dev, new_skb, DMA_TO_DEVICE);
4630                 new_addr = skb_shinfo(new_skb)->dma_maps[0];
4631
4632                 /* Make sure new skb does not cross any 4G boundaries.
4633                  * Drop the packet if it does.
4634                  */
4635                 if (ret || tg3_4g_overflow_test(new_addr, new_skb->len)) {
4636                         if (!ret)
4637                                 skb_dma_unmap(&tp->pdev->dev, new_skb,
4638                                               DMA_TO_DEVICE);
4639                         ret = -1;
4640                         dev_kfree_skb(new_skb);
4641                         new_skb = NULL;
4642                 } else {
4643                         tg3_set_txd(tp, entry, new_addr, new_skb->len,
4644                                     base_flags, 1 | (mss << 1));
4645                         *start = NEXT_TX(entry);
4646                 }
4647         }
4648
4649         /* Now clean up the sw ring entries. */
4650         i = 0;
4651         while (entry != last_plus_one) {
4652                 if (i == 0) {
4653                         tp->tx_buffers[entry].skb = new_skb;
4654                 } else {
4655                         tp->tx_buffers[entry].skb = NULL;
4656                 }
4657                 entry = NEXT_TX(entry);
4658                 i++;
4659         }
4660
4661         skb_dma_unmap(&tp->pdev->dev, skb, DMA_TO_DEVICE);
4662         dev_kfree_skb(skb);
4663
4664         return ret;
4665 }
4666
4667 static void tg3_set_txd(struct tg3 *tp, int entry,
4668                         dma_addr_t mapping, int len, u32 flags,
4669                         u32 mss_and_is_end)
4670 {
4671         struct tg3_tx_buffer_desc *txd = &tp->tx_ring[entry];
4672         int is_end = (mss_and_is_end & 0x1);
4673         u32 mss = (mss_and_is_end >> 1);
4674         u32 vlan_tag = 0;
4675
4676         if (is_end)
4677                 flags |= TXD_FLAG_END;
4678         if (flags & TXD_FLAG_VLAN) {
4679                 vlan_tag = flags >> 16;
4680                 flags &= 0xffff;
4681         }
4682         vlan_tag |= (mss << TXD_MSS_SHIFT);
4683
4684         txd->addr_hi = ((u64) mapping >> 32);
4685         txd->addr_lo = ((u64) mapping & 0xffffffff);
4686         txd->len_flags = (len << TXD_LEN_SHIFT) | flags;
4687         txd->vlan_tag = vlan_tag << TXD_VLAN_TAG_SHIFT;
4688 }
4689
4690 /* hard_start_xmit for devices that don't have any bugs and
4691  * support TG3_FLG2_HW_TSO_2 only.
4692  */
4693 static int tg3_start_xmit(struct sk_buff *skb, struct net_device *dev)
4694 {
4695         struct tg3 *tp = netdev_priv(dev);
4696         u32 len, entry, base_flags, mss;
4697         struct skb_shared_info *sp;
4698         dma_addr_t mapping;
4699
4700         len = skb_headlen(skb);
4701
4702         /* We are running in BH disabled context with netif_tx_lock
4703          * and TX reclaim runs via tp->napi.poll inside of a software
4704          * interrupt.  Furthermore, IRQ processing runs lockless so we have
4705          * no IRQ context deadlocks to worry about either.  Rejoice!
4706          */
4707         if (unlikely(tg3_tx_avail(tp) <= (skb_shinfo(skb)->nr_frags + 1))) {
4708                 if (!netif_queue_stopped(dev)) {
4709                         netif_stop_queue(dev);
4710
4711                         /* This is a hard error, log it. */
4712                         printk(KERN_ERR PFX "%s: BUG! Tx Ring full when "
4713                                "queue awake!\n", dev->name);
4714                 }
4715                 return NETDEV_TX_BUSY;
4716         }
4717
4718         entry = tp->tx_prod;
4719         base_flags = 0;
4720         mss = 0;
4721         if ((mss = skb_shinfo(skb)->gso_size) != 0) {
4722                 int tcp_opt_len, ip_tcp_len;
4723
4724                 if (skb_header_cloned(skb) &&
4725                     pskb_expand_head(skb, 0, 0, GFP_ATOMIC)) {
4726                         dev_kfree_skb(skb);
4727                         goto out_unlock;
4728                 }
4729
4730                 if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6)
4731                         mss |= (skb_headlen(skb) - ETH_HLEN) << 9;
4732                 else {
4733                         struct iphdr *iph = ip_hdr(skb);
4734
4735                         tcp_opt_len = tcp_optlen(skb);
4736                         ip_tcp_len = ip_hdrlen(skb) + sizeof(struct tcphdr);
4737
4738                         iph->check = 0;
4739                         iph->tot_len = htons(mss + ip_tcp_len + tcp_opt_len);
4740                         mss |= (ip_tcp_len + tcp_opt_len) << 9;
4741                 }
4742
4743                 base_flags |= (TXD_FLAG_CPU_PRE_DMA |
4744                                TXD_FLAG_CPU_POST_DMA);
4745
4746                 tcp_hdr(skb)->check = 0;
4747
4748         }
4749         else if (skb->ip_summed == CHECKSUM_PARTIAL)
4750                 base_flags |= TXD_FLAG_TCPUDP_CSUM;
4751 #if TG3_VLAN_TAG_USED
4752         if (tp->vlgrp != NULL && vlan_tx_tag_present(skb))
4753                 base_flags |= (TXD_FLAG_VLAN |
4754                                (vlan_tx_tag_get(skb) << 16));
4755 #endif
4756
4757         if (skb_dma_map(&tp->pdev->dev, skb, DMA_TO_DEVICE)) {
4758                 dev_kfree_skb(skb);
4759                 goto out_unlock;
4760         }
4761
4762         sp = skb_shinfo(skb);
4763
4764         mapping = sp->dma_maps[0];
4765
4766         tp->tx_buffers[entry].skb = skb;
4767
4768         tg3_set_txd(tp, entry, mapping, len, base_flags,
4769                     (skb_shinfo(skb)->nr_frags == 0) | (mss << 1));
4770
4771         entry = NEXT_TX(entry);
4772
4773         /* Now loop through additional data fragments, and queue them. */
4774         if (skb_shinfo(skb)->nr_frags > 0) {
4775                 unsigned int i, last;
4776
4777                 last = skb_shinfo(skb)->nr_frags - 1;
4778                 for (i = 0; i <= last; i++) {
4779                         skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
4780
4781                         len = frag->size;
4782                         mapping = sp->dma_maps[i + 1];
4783                         tp->tx_buffers[entry].skb = NULL;
4784
4785                         tg3_set_txd(tp, entry, mapping, len,
4786                                     base_flags, (i == last) | (mss << 1));
4787
4788                         entry = NEXT_TX(entry);
4789                 }
4790         }
4791
4792         /* Packets are ready, update Tx producer idx local and on card. */
4793         tw32_tx_mbox((MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW), entry);
4794
4795         tp->tx_prod = entry;
4796         if (unlikely(tg3_tx_avail(tp) <= (MAX_SKB_FRAGS + 1))) {
4797                 netif_stop_queue(dev);
4798                 if (tg3_tx_avail(tp) > TG3_TX_WAKEUP_THRESH(tp))
4799                         netif_wake_queue(tp->dev);
4800         }
4801
4802 out_unlock:
4803         mmiowb();
4804
4805         dev->trans_start = jiffies;
4806
4807         return NETDEV_TX_OK;
4808 }
4809
4810 static int tg3_start_xmit_dma_bug(struct sk_buff *, struct net_device *);
4811
4812 /* Use GSO to workaround a rare TSO bug that may be triggered when the
4813  * TSO header is greater than 80 bytes.
4814  */
4815 static int tg3_tso_bug(struct tg3 *tp, struct sk_buff *skb)
4816 {
4817         struct sk_buff *segs, *nskb;
4818
4819         /* Estimate the number of fragments in the worst case */
4820         if (unlikely(tg3_tx_avail(tp) <= (skb_shinfo(skb)->gso_segs * 3))) {
4821                 netif_stop_queue(tp->dev);
4822                 if (tg3_tx_avail(tp) <= (skb_shinfo(skb)->gso_segs * 3))
4823                         return NETDEV_TX_BUSY;
4824
4825                 netif_wake_queue(tp->dev);
4826         }
4827
4828         segs = skb_gso_segment(skb, tp->dev->features & ~NETIF_F_TSO);
4829         if (IS_ERR(segs))
4830                 goto tg3_tso_bug_end;
4831
4832         do {
4833                 nskb = segs;
4834                 segs = segs->next;
4835                 nskb->next = NULL;
4836                 tg3_start_xmit_dma_bug(nskb, tp->dev);
4837         } while (segs);
4838
4839 tg3_tso_bug_end:
4840         dev_kfree_skb(skb);
4841
4842         return NETDEV_TX_OK;
4843 }
4844
4845 /* hard_start_xmit for devices that have the 4G bug and/or 40-bit bug and
4846  * support TG3_FLG2_HW_TSO_1 or firmware TSO only.
4847  */
4848 static int tg3_start_xmit_dma_bug(struct sk_buff *skb, struct net_device *dev)
4849 {
4850         struct tg3 *tp = netdev_priv(dev);
4851         u32 len, entry, base_flags, mss;
4852         struct skb_shared_info *sp;
4853         int would_hit_hwbug;
4854         dma_addr_t mapping;
4855
4856         len = skb_headlen(skb);
4857
4858         /* We are running in BH disabled context with netif_tx_lock
4859          * and TX reclaim runs via tp->napi.poll inside of a software
4860          * interrupt.  Furthermore, IRQ processing runs lockless so we have
4861          * no IRQ context deadlocks to worry about either.  Rejoice!
4862          */
4863         if (unlikely(tg3_tx_avail(tp) <= (skb_shinfo(skb)->nr_frags + 1))) {
4864                 if (!netif_queue_stopped(dev)) {
4865                         netif_stop_queue(dev);
4866
4867                         /* This is a hard error, log it. */
4868                         printk(KERN_ERR PFX "%s: BUG! Tx Ring full when "
4869                                "queue awake!\n", dev->name);
4870                 }
4871                 return NETDEV_TX_BUSY;
4872         }
4873
4874         entry = tp->tx_prod;
4875         base_flags = 0;
4876         if (skb->ip_summed == CHECKSUM_PARTIAL)
4877                 base_flags |= TXD_FLAG_TCPUDP_CSUM;
4878         mss = 0;
4879         if ((mss = skb_shinfo(skb)->gso_size) != 0) {
4880                 struct iphdr *iph;
4881                 int tcp_opt_len, ip_tcp_len, hdr_len;
4882
4883                 if (skb_header_cloned(skb) &&
4884                     pskb_expand_head(skb, 0, 0, GFP_ATOMIC)) {
4885                         dev_kfree_skb(skb);
4886                         goto out_unlock;
4887                 }
4888
4889                 tcp_opt_len = tcp_optlen(skb);
4890                 ip_tcp_len = ip_hdrlen(skb) + sizeof(struct tcphdr);
4891
4892                 hdr_len = ip_tcp_len + tcp_opt_len;
4893                 if (unlikely((ETH_HLEN + hdr_len) > 80) &&
4894                              (tp->tg3_flags2 & TG3_FLG2_TSO_BUG))
4895                         return (tg3_tso_bug(tp, skb));
4896
4897                 base_flags |= (TXD_FLAG_CPU_PRE_DMA |
4898                                TXD_FLAG_CPU_POST_DMA);
4899
4900                 iph = ip_hdr(skb);
4901                 iph->check = 0;
4902                 iph->tot_len = htons(mss + hdr_len);
4903                 if (tp->tg3_flags2 & TG3_FLG2_HW_TSO) {
4904                         tcp_hdr(skb)->check = 0;
4905                         base_flags &= ~TXD_FLAG_TCPUDP_CSUM;
4906                 } else
4907                         tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr,
4908                                                                  iph->daddr, 0,
4909                                                                  IPPROTO_TCP,
4910                                                                  0);
4911
4912                 if ((tp->tg3_flags2 & TG3_FLG2_HW_TSO) ||
4913                     (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705)) {
4914                         if (tcp_opt_len || iph->ihl > 5) {
4915                                 int tsflags;
4916
4917                                 tsflags = (iph->ihl - 5) + (tcp_opt_len >> 2);
4918                                 mss |= (tsflags << 11);
4919                         }
4920                 } else {
4921                         if (tcp_opt_len || iph->ihl > 5) {
4922                                 int tsflags;
4923
4924                                 tsflags = (iph->ihl - 5) + (tcp_opt_len >> 2);
4925                                 base_flags |= tsflags << 12;
4926                         }
4927                 }
4928         }
4929 #if TG3_VLAN_TAG_USED
4930         if (tp->vlgrp != NULL && vlan_tx_tag_present(skb))
4931                 base_flags |= (TXD_FLAG_VLAN |
4932                                (vlan_tx_tag_get(skb) << 16));
4933 #endif
4934
4935         if (skb_dma_map(&tp->pdev->dev, skb, DMA_TO_DEVICE)) {
4936                 dev_kfree_skb(skb);
4937                 goto out_unlock;
4938         }
4939
4940         sp = skb_shinfo(skb);
4941
4942         mapping = sp->dma_maps[0];
4943
4944         tp->tx_buffers[entry].skb = skb;
4945
4946         would_hit_hwbug = 0;
4947
4948         if (tp->tg3_flags3 & TG3_FLG3_5701_DMA_BUG)
4949                 would_hit_hwbug = 1;
4950         else if (tg3_4g_overflow_test(mapping, len))
4951                 would_hit_hwbug = 1;
4952
4953         tg3_set_txd(tp, entry, mapping, len, base_flags,
4954                     (skb_shinfo(skb)->nr_frags == 0) | (mss << 1));
4955
4956         entry = NEXT_TX(entry);
4957
4958         /* Now loop through additional data fragments, and queue them. */
4959         if (skb_shinfo(skb)->nr_frags > 0) {
4960                 unsigned int i, last;
4961
4962                 last = skb_shinfo(skb)->nr_frags - 1;
4963                 for (i = 0; i <= last; i++) {
4964                         skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
4965
4966                         len = frag->size;
4967                         mapping = sp->dma_maps[i + 1];
4968
4969                         tp->tx_buffers[entry].skb = NULL;
4970
4971                         if (tg3_4g_overflow_test(mapping, len))
4972                                 would_hit_hwbug = 1;
4973
4974                         if (tg3_40bit_overflow_test(tp, mapping, len))
4975                                 would_hit_hwbug = 1;
4976
4977                         if (tp->tg3_flags2 & TG3_FLG2_HW_TSO)
4978                                 tg3_set_txd(tp, entry, mapping, len,
4979                                             base_flags, (i == last)|(mss << 1));
4980                         else
4981                                 tg3_set_txd(tp, entry, mapping, len,
4982                                             base_flags, (i == last));
4983
4984                         entry = NEXT_TX(entry);
4985                 }
4986         }
4987
4988         if (would_hit_hwbug) {
4989                 u32 last_plus_one = entry;
4990                 u32 start;
4991
4992                 start = entry - 1 - skb_shinfo(skb)->nr_frags;
4993                 start &= (TG3_TX_RING_SIZE - 1);
4994
4995                 /* If the workaround fails due to memory/mapping
4996                  * failure, silently drop this packet.
4997                  */
4998                 if (tigon3_dma_hwbug_workaround(tp, skb, last_plus_one,
4999                                                 &start, base_flags, mss))
5000                         goto out_unlock;
5001
5002                 entry = start;
5003         }
5004
5005         /* Packets are ready, update Tx producer idx local and on card. */
5006         tw32_tx_mbox((MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW), entry);
5007
5008         tp->tx_prod = entry;
5009         if (unlikely(tg3_tx_avail(tp) <= (MAX_SKB_FRAGS + 1))) {
5010                 netif_stop_queue(dev);
5011                 if (tg3_tx_avail(tp) > TG3_TX_WAKEUP_THRESH(tp))
5012                         netif_wake_queue(tp->dev);
5013         }
5014
5015 out_unlock:
5016         mmiowb();
5017
5018         dev->trans_start = jiffies;
5019
5020         return NETDEV_TX_OK;
5021 }
5022
5023 static inline void tg3_set_mtu(struct net_device *dev, struct tg3 *tp,
5024                                int new_mtu)
5025 {
5026         dev->mtu = new_mtu;
5027
5028         if (new_mtu > ETH_DATA_LEN) {
5029                 if (tp->tg3_flags2 & TG3_FLG2_5780_CLASS) {
5030                         tp->tg3_flags2 &= ~TG3_FLG2_TSO_CAPABLE;
5031                         ethtool_op_set_tso(dev, 0);
5032                 }
5033                 else
5034                         tp->tg3_flags |= TG3_FLAG_JUMBO_RING_ENABLE;
5035         } else {
5036                 if (tp->tg3_flags2 & TG3_FLG2_5780_CLASS)
5037                         tp->tg3_flags2 |= TG3_FLG2_TSO_CAPABLE;
5038                 tp->tg3_flags &= ~TG3_FLAG_JUMBO_RING_ENABLE;
5039         }
5040 }
5041
5042 static int tg3_change_mtu(struct net_device *dev, int new_mtu)
5043 {
5044         struct tg3 *tp = netdev_priv(dev);
5045         int err;
5046
5047         if (new_mtu < TG3_MIN_MTU || new_mtu > TG3_MAX_MTU(tp))
5048                 return -EINVAL;
5049
5050         if (!netif_running(dev)) {
5051                 /* We'll just catch it later when the
5052                  * device is up'd.
5053                  */
5054                 tg3_set_mtu(dev, tp, new_mtu);
5055                 return 0;
5056         }
5057
5058         tg3_phy_stop(tp);
5059
5060         tg3_netif_stop(tp);
5061
5062         tg3_full_lock(tp, 1);
5063
5064         tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
5065
5066         tg3_set_mtu(dev, tp, new_mtu);
5067
5068         err = tg3_restart_hw(tp, 0);
5069
5070         if (!err)
5071                 tg3_netif_start(tp);
5072
5073         tg3_full_unlock(tp);
5074
5075         if (!err)
5076                 tg3_phy_start(tp);
5077
5078         return err;
5079 }
5080
5081 /* Free up pending packets in all rx/tx rings.
5082  *
5083  * The chip has been shut down and the driver detached from
5084  * the networking, so no interrupts or new tx packets will
5085  * end up in the driver.  tp->{tx,}lock is not held and we are not
5086  * in an interrupt context and thus may sleep.
5087  */
5088 static void tg3_free_rings(struct tg3 *tp)
5089 {
5090         struct ring_info *rxp;
5091         int i;
5092
5093         for (i = 0; i < TG3_RX_RING_SIZE; i++) {
5094                 rxp = &tp->rx_std_buffers[i];
5095
5096                 if (rxp->skb == NULL)
5097                         continue;
5098                 pci_unmap_single(tp->pdev,
5099                                  pci_unmap_addr(rxp, mapping),
5100                                  tp->rx_pkt_buf_sz - tp->rx_offset,
5101                                  PCI_DMA_FROMDEVICE);
5102                 dev_kfree_skb_any(rxp->skb);
5103                 rxp->skb = NULL;
5104         }
5105
5106         for (i = 0; i < TG3_RX_JUMBO_RING_SIZE; i++) {
5107                 rxp = &tp->rx_jumbo_buffers[i];
5108
5109                 if (rxp->skb == NULL)
5110                         continue;
5111                 pci_unmap_single(tp->pdev,
5112                                  pci_unmap_addr(rxp, mapping),
5113                                  RX_JUMBO_PKT_BUF_SZ - tp->rx_offset,
5114                                  PCI_DMA_FROMDEVICE);
5115                 dev_kfree_skb_any(rxp->skb);
5116                 rxp->skb = NULL;
5117         }
5118
5119         for (i = 0; i < TG3_TX_RING_SIZE; ) {
5120                 struct tx_ring_info *txp;
5121                 struct sk_buff *skb;
5122
5123                 txp = &tp->tx_buffers[i];
5124                 skb = txp->skb;
5125
5126                 if (skb == NULL) {
5127                         i++;
5128                         continue;
5129                 }
5130
5131                 skb_dma_unmap(&tp->pdev->dev, skb, DMA_TO_DEVICE);
5132
5133                 txp->skb = NULL;
5134
5135                 i += skb_shinfo(skb)->nr_frags + 1;
5136
5137                 dev_kfree_skb_any(skb);
5138         }
5139 }
5140
5141 /* Initialize tx/rx rings for packet processing.
5142  *
5143  * The chip has been shut down and the driver detached from
5144  * the networking, so no interrupts or new tx packets will
5145  * end up in the driver.  tp->{tx,}lock are held and thus
5146  * we may not sleep.
5147  */
5148 static int tg3_init_rings(struct tg3 *tp)
5149 {
5150         u32 i;
5151
5152         /* Free up all the SKBs. */
5153         tg3_free_rings(tp);
5154
5155         /* Zero out all descriptors. */
5156         memset(tp->rx_std, 0, TG3_RX_RING_BYTES);
5157         memset(tp->rx_jumbo, 0, TG3_RX_JUMBO_RING_BYTES);
5158         memset(tp->rx_rcb, 0, TG3_RX_RCB_RING_BYTES(tp));
5159         memset(tp->tx_ring, 0, TG3_TX_RING_BYTES);
5160
5161         tp->rx_pkt_buf_sz = RX_PKT_BUF_SZ;
5162         if ((tp->tg3_flags2 & TG3_FLG2_5780_CLASS) &&
5163             (tp->dev->mtu > ETH_DATA_LEN))
5164                 tp->rx_pkt_buf_sz = RX_JUMBO_PKT_BUF_SZ;
5165
5166         /* Initialize invariants of the rings, we only set this
5167          * stuff once.  This works because the card does not
5168          * write into the rx buffer posting rings.
5169          */
5170         for (i = 0; i < TG3_RX_RING_SIZE; i++) {
5171                 struct tg3_rx_buffer_desc *rxd;
5172
5173                 rxd = &tp->rx_std[i];
5174                 rxd->idx_len = (tp->rx_pkt_buf_sz - tp->rx_offset - 64)
5175                         << RXD_LEN_SHIFT;
5176                 rxd->type_flags = (RXD_FLAG_END << RXD_FLAGS_SHIFT);
5177                 rxd->opaque = (RXD_OPAQUE_RING_STD |
5178                                (i << RXD_OPAQUE_INDEX_SHIFT));
5179         }
5180
5181         if (tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE) {
5182                 for (i = 0; i < TG3_RX_JUMBO_RING_SIZE; i++) {
5183                         struct tg3_rx_buffer_desc *rxd;
5184
5185                         rxd = &tp->rx_jumbo[i];
5186                         rxd->idx_len = (RX_JUMBO_PKT_BUF_SZ - tp->rx_offset - 64)
5187                                 << RXD_LEN_SHIFT;
5188                         rxd->type_flags = (RXD_FLAG_END << RXD_FLAGS_SHIFT) |
5189                                 RXD_FLAG_JUMBO;
5190                         rxd->opaque = (RXD_OPAQUE_RING_JUMBO |
5191                                (i << RXD_OPAQUE_INDEX_SHIFT));
5192                 }
5193         }
5194
5195         /* Now allocate fresh SKBs for each rx ring. */
5196         for (i = 0; i < tp->rx_pending; i++) {
5197                 if (tg3_alloc_rx_skb(tp, RXD_OPAQUE_RING_STD, -1, i) < 0) {
5198                         printk(KERN_WARNING PFX
5199                                "%s: Using a smaller RX standard ring, "
5200                                "only %d out of %d buffers were allocated "
5201                                "successfully.\n",
5202                                tp->dev->name, i, tp->rx_pending);
5203                         if (i == 0)
5204                                 return -ENOMEM;
5205                         tp->rx_pending = i;
5206                         break;
5207                 }
5208         }
5209
5210         if (tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE) {
5211                 for (i = 0; i < tp->rx_jumbo_pending; i++) {
5212                         if (tg3_alloc_rx_skb(tp, RXD_OPAQUE_RING_JUMBO,
5213                                              -1, i) < 0) {
5214                                 printk(KERN_WARNING PFX
5215                                        "%s: Using a smaller RX jumbo ring, "
5216                                        "only %d out of %d buffers were "
5217                                        "allocated successfully.\n",
5218                                        tp->dev->name, i, tp->rx_jumbo_pending);
5219                                 if (i == 0) {
5220                                         tg3_free_rings(tp);
5221                                         return -ENOMEM;
5222                                 }
5223                                 tp->rx_jumbo_pending = i;
5224                                 break;
5225                         }
5226                 }
5227         }
5228         return 0;
5229 }
5230
5231 /*
5232  * Must not be invoked with interrupt sources disabled and
5233  * the hardware shutdown down.
5234  */
5235 static void tg3_free_consistent(struct tg3 *tp)
5236 {
5237         kfree(tp->rx_std_buffers);
5238         tp->rx_std_buffers = NULL;
5239         if (tp->rx_std) {
5240                 pci_free_consistent(tp->pdev, TG3_RX_RING_BYTES,
5241                                     tp->rx_std, tp->rx_std_mapping);
5242                 tp->rx_std = NULL;
5243         }
5244         if (tp->rx_jumbo) {
5245                 pci_free_consistent(tp->pdev, TG3_RX_JUMBO_RING_BYTES,
5246                                     tp->rx_jumbo, tp->rx_jumbo_mapping);
5247                 tp->rx_jumbo = NULL;
5248         }
5249         if (tp->rx_rcb) {
5250                 pci_free_consistent(tp->pdev, TG3_RX_RCB_RING_BYTES(tp),
5251                                     tp->rx_rcb, tp->rx_rcb_mapping);
5252                 tp->rx_rcb = NULL;
5253         }
5254         if (tp->tx_ring) {
5255                 pci_free_consistent(tp->pdev, TG3_TX_RING_BYTES,
5256                         tp->tx_ring, tp->tx_desc_mapping);
5257                 tp->tx_ring = NULL;
5258         }
5259         if (tp->hw_status) {
5260                 pci_free_consistent(tp->pdev, TG3_HW_STATUS_SIZE,
5261                                     tp->hw_status, tp->status_mapping);
5262                 tp->hw_status = NULL;
5263         }
5264         if (tp->hw_stats) {
5265                 pci_free_consistent(tp->pdev, sizeof(struct tg3_hw_stats),
5266                                     tp->hw_stats, tp->stats_mapping);
5267                 tp->hw_stats = NULL;
5268         }
5269 }
5270
5271 /*
5272  * Must not be invoked with interrupt sources disabled and
5273  * the hardware shutdown down.  Can sleep.
5274  */
5275 static int tg3_alloc_consistent(struct tg3 *tp)
5276 {
5277         tp->rx_std_buffers = kzalloc((sizeof(struct ring_info) *
5278                                       (TG3_RX_RING_SIZE +
5279                                        TG3_RX_JUMBO_RING_SIZE)) +
5280                                      (sizeof(struct tx_ring_info) *
5281                                       TG3_TX_RING_SIZE),
5282                                      GFP_KERNEL);
5283         if (!tp->rx_std_buffers)
5284                 return -ENOMEM;
5285
5286         tp->rx_jumbo_buffers = &tp->rx_std_buffers[TG3_RX_RING_SIZE];
5287         tp->tx_buffers = (struct tx_ring_info *)
5288                 &tp->rx_jumbo_buffers[TG3_RX_JUMBO_RING_SIZE];
5289
5290         tp->rx_std = pci_alloc_consistent(tp->pdev, TG3_RX_RING_BYTES,
5291                                           &tp->rx_std_mapping);
5292         if (!tp->rx_std)
5293                 goto err_out;
5294
5295         tp->rx_jumbo = pci_alloc_consistent(tp->pdev, TG3_RX_JUMBO_RING_BYTES,
5296                                             &tp->rx_jumbo_mapping);
5297
5298         if (!tp->rx_jumbo)
5299                 goto err_out;
5300
5301         tp->rx_rcb = pci_alloc_consistent(tp->pdev, TG3_RX_RCB_RING_BYTES(tp),
5302                                           &tp->rx_rcb_mapping);
5303         if (!tp->rx_rcb)
5304                 goto err_out;
5305
5306         tp->tx_ring = pci_alloc_consistent(tp->pdev, TG3_TX_RING_BYTES,
5307                                            &tp->tx_desc_mapping);
5308         if (!tp->tx_ring)
5309                 goto err_out;
5310
5311         tp->hw_status = pci_alloc_consistent(tp->pdev,
5312                                              TG3_HW_STATUS_SIZE,
5313                                              &tp->status_mapping);
5314         if (!tp->hw_status)
5315                 goto err_out;
5316
5317         tp->hw_stats = pci_alloc_consistent(tp->pdev,
5318                                             sizeof(struct tg3_hw_stats),
5319                                             &tp->stats_mapping);
5320         if (!tp->hw_stats)
5321                 goto err_out;
5322
5323         memset(tp->hw_status, 0, TG3_HW_STATUS_SIZE);
5324         memset(tp->hw_stats, 0, sizeof(struct tg3_hw_stats));
5325
5326         return 0;
5327
5328 err_out:
5329         tg3_free_consistent(tp);
5330         return -ENOMEM;
5331 }
5332
5333 #define MAX_WAIT_CNT 1000
5334
5335 /* To stop a block, clear the enable bit and poll till it
5336  * clears.  tp->lock is held.
5337  */
5338 static int tg3_stop_block(struct tg3 *tp, unsigned long ofs, u32 enable_bit, int silent)
5339 {
5340         unsigned int i;
5341         u32 val;
5342
5343         if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
5344                 switch (ofs) {
5345                 case RCVLSC_MODE:
5346                 case DMAC_MODE:
5347                 case MBFREE_MODE:
5348                 case BUFMGR_MODE:
5349                 case MEMARB_MODE:
5350                         /* We can't enable/disable these bits of the
5351                          * 5705/5750, just say success.
5352                          */
5353                         return 0;
5354
5355                 default:
5356                         break;
5357                 }
5358         }
5359
5360         val = tr32(ofs);
5361         val &= ~enable_bit;
5362         tw32_f(ofs, val);
5363
5364         for (i = 0; i < MAX_WAIT_CNT; i++) {
5365                 udelay(100);
5366                 val = tr32(ofs);
5367                 if ((val & enable_bit) == 0)
5368                         break;
5369         }
5370
5371         if (i == MAX_WAIT_CNT && !silent) {
5372                 printk(KERN_ERR PFX "tg3_stop_block timed out, "
5373                        "ofs=%lx enable_bit=%x\n",
5374                        ofs, enable_bit);
5375                 return -ENODEV;
5376         }
5377
5378         return 0;
5379 }
5380
5381 /* tp->lock is held. */
5382 static int tg3_abort_hw(struct tg3 *tp, int silent)
5383 {
5384         int i, err;
5385
5386         tg3_disable_ints(tp);
5387
5388         tp->rx_mode &= ~RX_MODE_ENABLE;
5389         tw32_f(MAC_RX_MODE, tp->rx_mode);
5390         udelay(10);
5391
5392         err  = tg3_stop_block(tp, RCVBDI_MODE, RCVBDI_MODE_ENABLE, silent);
5393         err |= tg3_stop_block(tp, RCVLPC_MODE, RCVLPC_MODE_ENABLE, silent);
5394         err |= tg3_stop_block(tp, RCVLSC_MODE, RCVLSC_MODE_ENABLE, silent);
5395         err |= tg3_stop_block(tp, RCVDBDI_MODE, RCVDBDI_MODE_ENABLE, silent);
5396         err |= tg3_stop_block(tp, RCVDCC_MODE, RCVDCC_MODE_ENABLE, silent);
5397         err |= tg3_stop_block(tp, RCVCC_MODE, RCVCC_MODE_ENABLE, silent);
5398
5399         err |= tg3_stop_block(tp, SNDBDS_MODE, SNDBDS_MODE_ENABLE, silent);
5400         err |= tg3_stop_block(tp, SNDBDI_MODE, SNDBDI_MODE_ENABLE, silent);
5401         err |= tg3_stop_block(tp, SNDDATAI_MODE, SNDDATAI_MODE_ENABLE, silent);
5402         err |= tg3_stop_block(tp, RDMAC_MODE, RDMAC_MODE_ENABLE, silent);
5403         err |= tg3_stop_block(tp, SNDDATAC_MODE, SNDDATAC_MODE_ENABLE, silent);
5404         err |= tg3_stop_block(tp, DMAC_MODE, DMAC_MODE_ENABLE, silent);
5405         err |= tg3_stop_block(tp, SNDBDC_MODE, SNDBDC_MODE_ENABLE, silent);
5406
5407         tp->mac_mode &= ~MAC_MODE_TDE_ENABLE;
5408         tw32_f(MAC_MODE, tp->mac_mode);
5409         udelay(40);
5410
5411         tp->tx_mode &= ~TX_MODE_ENABLE;
5412         tw32_f(MAC_TX_MODE, tp->tx_mode);
5413
5414         for (i = 0; i < MAX_WAIT_CNT; i++) {
5415                 udelay(100);
5416                 if (!(tr32(MAC_TX_MODE) & TX_MODE_ENABLE))
5417                         break;
5418         }
5419         if (i >= MAX_WAIT_CNT) {
5420                 printk(KERN_ERR PFX "tg3_abort_hw timed out for %s, "
5421                        "TX_MODE_ENABLE will not clear MAC_TX_MODE=%08x\n",
5422                        tp->dev->name, tr32(MAC_TX_MODE));
5423                 err |= -ENODEV;
5424         }
5425
5426         err |= tg3_stop_block(tp, HOSTCC_MODE, HOSTCC_MODE_ENABLE, silent);
5427         err |= tg3_stop_block(tp, WDMAC_MODE, WDMAC_MODE_ENABLE, silent);
5428         err |= tg3_stop_block(tp, MBFREE_MODE, MBFREE_MODE_ENABLE, silent);
5429
5430         tw32(FTQ_RESET, 0xffffffff);
5431         tw32(FTQ_RESET, 0x00000000);
5432
5433         err |= tg3_stop_block(tp, BUFMGR_MODE, BUFMGR_MODE_ENABLE, silent);
5434         err |= tg3_stop_block(tp, MEMARB_MODE, MEMARB_MODE_ENABLE, silent);
5435
5436         if (tp->hw_status)
5437                 memset(tp->hw_status, 0, TG3_HW_STATUS_SIZE);
5438         if (tp->hw_stats)
5439                 memset(tp->hw_stats, 0, sizeof(struct tg3_hw_stats));
5440
5441         return err;
5442 }
5443
5444 /* tp->lock is held. */
5445 static int tg3_nvram_lock(struct tg3 *tp)
5446 {
5447         if (tp->tg3_flags & TG3_FLAG_NVRAM) {
5448                 int i;
5449
5450                 if (tp->nvram_lock_cnt == 0) {
5451                         tw32(NVRAM_SWARB, SWARB_REQ_SET1);
5452                         for (i = 0; i < 8000; i++) {
5453                                 if (tr32(NVRAM_SWARB) & SWARB_GNT1)
5454                                         break;
5455                                 udelay(20);
5456                         }
5457                         if (i == 8000) {
5458                                 tw32(NVRAM_SWARB, SWARB_REQ_CLR1);
5459                                 return -ENODEV;
5460                         }
5461                 }
5462                 tp->nvram_lock_cnt++;
5463         }
5464         return 0;
5465 }
5466
5467 /* tp->lock is held. */
5468 static void tg3_nvram_unlock(struct tg3 *tp)
5469 {
5470         if (tp->tg3_flags & TG3_FLAG_NVRAM) {
5471                 if (tp->nvram_lock_cnt > 0)
5472                         tp->nvram_lock_cnt--;
5473                 if (tp->nvram_lock_cnt == 0)
5474                         tw32_f(NVRAM_SWARB, SWARB_REQ_CLR1);
5475         }
5476 }
5477
5478 /* tp->lock is held. */
5479 static void tg3_enable_nvram_access(struct tg3 *tp)
5480 {
5481         if ((tp->tg3_flags2 & TG3_FLG2_5750_PLUS) &&
5482             !(tp->tg3_flags2 & TG3_FLG2_PROTECTED_NVRAM)) {
5483                 u32 nvaccess = tr32(NVRAM_ACCESS);
5484
5485                 tw32(NVRAM_ACCESS, nvaccess | ACCESS_ENABLE);
5486         }
5487 }
5488
5489 /* tp->lock is held. */
5490 static void tg3_disable_nvram_access(struct tg3 *tp)
5491 {
5492         if ((tp->tg3_flags2 & TG3_FLG2_5750_PLUS) &&
5493             !(tp->tg3_flags2 & TG3_FLG2_PROTECTED_NVRAM)) {
5494                 u32 nvaccess = tr32(NVRAM_ACCESS);
5495
5496                 tw32(NVRAM_ACCESS, nvaccess & ~ACCESS_ENABLE);
5497         }
5498 }
5499
5500 static void tg3_ape_send_event(struct tg3 *tp, u32 event)
5501 {
5502         int i;
5503         u32 apedata;
5504
5505         apedata = tg3_ape_read32(tp, TG3_APE_SEG_SIG);
5506         if (apedata != APE_SEG_SIG_MAGIC)
5507                 return;
5508
5509         apedata = tg3_ape_read32(tp, TG3_APE_FW_STATUS);
5510         if (!(apedata & APE_FW_STATUS_READY))
5511                 return;
5512
5513         /* Wait for up to 1 millisecond for APE to service previous event. */
5514         for (i = 0; i < 10; i++) {
5515                 if (tg3_ape_lock(tp, TG3_APE_LOCK_MEM))
5516                         return;
5517
5518                 apedata = tg3_ape_read32(tp, TG3_APE_EVENT_STATUS);
5519
5520                 if (!(apedata & APE_EVENT_STATUS_EVENT_PENDING))
5521                         tg3_ape_write32(tp, TG3_APE_EVENT_STATUS,
5522                                         event | APE_EVENT_STATUS_EVENT_PENDING);
5523
5524                 tg3_ape_unlock(tp, TG3_APE_LOCK_MEM);
5525
5526                 if (!(apedata & APE_EVENT_STATUS_EVENT_PENDING))
5527                         break;
5528
5529                 udelay(100);
5530         }
5531
5532         if (!(apedata & APE_EVENT_STATUS_EVENT_PENDING))
5533                 tg3_ape_write32(tp, TG3_APE_EVENT, APE_EVENT_1);
5534 }
5535
5536 static void tg3_ape_driver_state_change(struct tg3 *tp, int kind)
5537 {
5538         u32 event;
5539         u32 apedata;
5540
5541         if (!(tp->tg3_flags3 & TG3_FLG3_ENABLE_APE))
5542                 return;
5543
5544         switch (kind) {
5545                 case RESET_KIND_INIT:
5546                         tg3_ape_write32(tp, TG3_APE_HOST_SEG_SIG,
5547                                         APE_HOST_SEG_SIG_MAGIC);
5548                         tg3_ape_write32(tp, TG3_APE_HOST_SEG_LEN,
5549                                         APE_HOST_SEG_LEN_MAGIC);
5550                         apedata = tg3_ape_read32(tp, TG3_APE_HOST_INIT_COUNT);
5551                         tg3_ape_write32(tp, TG3_APE_HOST_INIT_COUNT, ++apedata);
5552                         tg3_ape_write32(tp, TG3_APE_HOST_DRIVER_ID,
5553                                         APE_HOST_DRIVER_ID_MAGIC);
5554                         tg3_ape_write32(tp, TG3_APE_HOST_BEHAVIOR,
5555                                         APE_HOST_BEHAV_NO_PHYLOCK);
5556
5557                         event = APE_EVENT_STATUS_STATE_START;
5558                         break;
5559                 case RESET_KIND_SHUTDOWN:
5560                         event = APE_EVENT_STATUS_STATE_UNLOAD;
5561                         break;
5562                 case RESET_KIND_SUSPEND:
5563                         event = APE_EVENT_STATUS_STATE_SUSPEND;
5564                         break;
5565                 default:
5566                         return;
5567         }
5568
5569         event |= APE_EVENT_STATUS_DRIVER_EVNT | APE_EVENT_STATUS_STATE_CHNGE;
5570
5571         tg3_ape_send_event(tp, event);
5572 }
5573
5574 /* tp->lock is held. */
5575 static void tg3_write_sig_pre_reset(struct tg3 *tp, int kind)
5576 {
5577         tg3_write_mem(tp, NIC_SRAM_FIRMWARE_MBOX,
5578                       NIC_SRAM_FIRMWARE_MBOX_MAGIC1);
5579
5580         if (tp->tg3_flags2 & TG3_FLG2_ASF_NEW_HANDSHAKE) {
5581                 switch (kind) {
5582                 case RESET_KIND_INIT:
5583                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
5584                                       DRV_STATE_START);
5585                         break;
5586
5587                 case RESET_KIND_SHUTDOWN:
5588                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
5589                                       DRV_STATE_UNLOAD);
5590                         break;
5591
5592                 case RESET_KIND_SUSPEND:
5593                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
5594                                       DRV_STATE_SUSPEND);
5595                         break;
5596
5597                 default:
5598                         break;
5599                 }
5600         }
5601
5602         if (kind == RESET_KIND_INIT ||
5603             kind == RESET_KIND_SUSPEND)
5604                 tg3_ape_driver_state_change(tp, kind);
5605 }
5606
5607 /* tp->lock is held. */
5608 static void tg3_write_sig_post_reset(struct tg3 *tp, int kind)
5609 {
5610         if (tp->tg3_flags2 & TG3_FLG2_ASF_NEW_HANDSHAKE) {
5611                 switch (kind) {
5612                 case RESET_KIND_INIT:
5613                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
5614                                       DRV_STATE_START_DONE);
5615                         break;
5616
5617                 case RESET_KIND_SHUTDOWN:
5618                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
5619                                       DRV_STATE_UNLOAD_DONE);
5620                         break;
5621
5622                 default:
5623                         break;
5624                 }
5625         }
5626
5627         if (kind == RESET_KIND_SHUTDOWN)
5628                 tg3_ape_driver_state_change(tp, kind);
5629 }
5630
5631 /* tp->lock is held. */
5632 static void tg3_write_sig_legacy(struct tg3 *tp, int kind)
5633 {
5634         if (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) {
5635                 switch (kind) {
5636                 case RESET_KIND_INIT:
5637                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
5638                                       DRV_STATE_START);
5639                         break;
5640
5641                 case RESET_KIND_SHUTDOWN:
5642                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
5643                                       DRV_STATE_UNLOAD);
5644                         break;
5645
5646                 case RESET_KIND_SUSPEND:
5647                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
5648                                       DRV_STATE_SUSPEND);
5649                         break;
5650
5651                 default:
5652                         break;
5653                 }
5654         }
5655 }
5656
5657 static int tg3_poll_fw(struct tg3 *tp)
5658 {
5659         int i;
5660         u32 val;
5661
5662         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
5663                 /* Wait up to 20ms for init done. */
5664                 for (i = 0; i < 200; i++) {
5665                         if (tr32(VCPU_STATUS) & VCPU_STATUS_INIT_DONE)
5666                                 return 0;
5667                         udelay(100);
5668                 }
5669                 return -ENODEV;
5670         }
5671
5672         /* Wait for firmware initialization to complete. */
5673         for (i = 0; i < 100000; i++) {
5674                 tg3_read_mem(tp, NIC_SRAM_FIRMWARE_MBOX, &val);
5675                 if (val == ~NIC_SRAM_FIRMWARE_MBOX_MAGIC1)
5676                         break;
5677                 udelay(10);
5678         }
5679
5680         /* Chip might not be fitted with firmware.  Some Sun onboard
5681          * parts are configured like that.  So don't signal the timeout
5682          * of the above loop as an error, but do report the lack of
5683          * running firmware once.
5684          */
5685         if (i >= 100000 &&
5686             !(tp->tg3_flags2 & TG3_FLG2_NO_FWARE_REPORTED)) {
5687                 tp->tg3_flags2 |= TG3_FLG2_NO_FWARE_REPORTED;
5688
5689                 printk(KERN_INFO PFX "%s: No firmware running.\n",
5690                        tp->dev->name);
5691         }
5692
5693         return 0;
5694 }
5695
5696 /* Save PCI command register before chip reset */
5697 static void tg3_save_pci_state(struct tg3 *tp)
5698 {
5699         pci_read_config_word(tp->pdev, PCI_COMMAND, &tp->pci_cmd);
5700 }
5701
5702 /* Restore PCI state after chip reset */
5703 static void tg3_restore_pci_state(struct tg3 *tp)
5704 {
5705         u32 val;
5706
5707         /* Re-enable indirect register accesses. */
5708         pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
5709                                tp->misc_host_ctrl);
5710
5711         /* Set MAX PCI retry to zero. */
5712         val = (PCISTATE_ROM_ENABLE | PCISTATE_ROM_RETRY_ENABLE);
5713         if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0 &&
5714             (tp->tg3_flags & TG3_FLAG_PCIX_MODE))
5715                 val |= PCISTATE_RETRY_SAME_DMA;
5716         /* Allow reads and writes to the APE register and memory space. */
5717         if (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE)
5718                 val |= PCISTATE_ALLOW_APE_CTLSPC_WR |
5719                        PCISTATE_ALLOW_APE_SHMEM_WR;
5720         pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE, val);
5721
5722         pci_write_config_word(tp->pdev, PCI_COMMAND, tp->pci_cmd);
5723
5724         if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS)
5725                 pcie_set_readrq(tp->pdev, 4096);
5726         else {
5727                 pci_write_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE,
5728                                       tp->pci_cacheline_sz);
5729                 pci_write_config_byte(tp->pdev, PCI_LATENCY_TIMER,
5730                                       tp->pci_lat_timer);
5731         }
5732
5733         /* Make sure PCI-X relaxed ordering bit is clear. */
5734         if (tp->pcix_cap) {
5735                 u16 pcix_cmd;
5736
5737                 pci_read_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
5738                                      &pcix_cmd);
5739                 pcix_cmd &= ~PCI_X_CMD_ERO;
5740                 pci_write_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
5741                                       pcix_cmd);
5742         }
5743
5744         if (tp->tg3_flags2 & TG3_FLG2_5780_CLASS) {
5745
5746                 /* Chip reset on 5780 will reset MSI enable bit,
5747                  * so need to restore it.
5748                  */
5749                 if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
5750                         u16 ctrl;
5751
5752                         pci_read_config_word(tp->pdev,
5753                                              tp->msi_cap + PCI_MSI_FLAGS,
5754                                              &ctrl);
5755                         pci_write_config_word(tp->pdev,
5756                                               tp->msi_cap + PCI_MSI_FLAGS,
5757                                               ctrl | PCI_MSI_FLAGS_ENABLE);
5758                         val = tr32(MSGINT_MODE);
5759                         tw32(MSGINT_MODE, val | MSGINT_MODE_ENABLE);
5760                 }
5761         }
5762 }
5763
5764 static void tg3_stop_fw(struct tg3 *);
5765
5766 /* tp->lock is held. */
5767 static int tg3_chip_reset(struct tg3 *tp)
5768 {
5769         u32 val;
5770         void (*write_op)(struct tg3 *, u32, u32);
5771         int err;
5772
5773         tg3_nvram_lock(tp);
5774
5775         tg3_mdio_stop(tp);
5776
5777         tg3_ape_lock(tp, TG3_APE_LOCK_GRC);
5778
5779         /* No matching tg3_nvram_unlock() after t