[TG3]: Add 1000T & 1000X flowctl adv helpers
[linux-2.6.git] / drivers / net / tg3.c
1 /*
2  * tg3.c: Broadcom Tigon3 ethernet driver.
3  *
4  * Copyright (C) 2001, 2002, 2003, 2004 David S. Miller (davem@redhat.com)
5  * Copyright (C) 2001, 2002, 2003 Jeff Garzik (jgarzik@pobox.com)
6  * Copyright (C) 2004 Sun Microsystems Inc.
7  * Copyright (C) 2005-2007 Broadcom Corporation.
8  *
9  * Firmware is:
10  *      Derived from proprietary unpublished source code,
11  *      Copyright (C) 2000-2003 Broadcom Corporation.
12  *
13  *      Permission is hereby granted for the distribution of this firmware
14  *      data in hexadecimal or equivalent format, provided this copyright
15  *      notice is accompanying it.
16  */
17
18
19 #include <linux/module.h>
20 #include <linux/moduleparam.h>
21 #include <linux/kernel.h>
22 #include <linux/types.h>
23 #include <linux/compiler.h>
24 #include <linux/slab.h>
25 #include <linux/delay.h>
26 #include <linux/in.h>
27 #include <linux/init.h>
28 #include <linux/ioport.h>
29 #include <linux/pci.h>
30 #include <linux/netdevice.h>
31 #include <linux/etherdevice.h>
32 #include <linux/skbuff.h>
33 #include <linux/ethtool.h>
34 #include <linux/mii.h>
35 #include <linux/if_vlan.h>
36 #include <linux/ip.h>
37 #include <linux/tcp.h>
38 #include <linux/workqueue.h>
39 #include <linux/prefetch.h>
40 #include <linux/dma-mapping.h>
41
42 #include <net/checksum.h>
43 #include <net/ip.h>
44
45 #include <asm/system.h>
46 #include <asm/io.h>
47 #include <asm/byteorder.h>
48 #include <asm/uaccess.h>
49
50 #ifdef CONFIG_SPARC
51 #include <asm/idprom.h>
52 #include <asm/prom.h>
53 #endif
54
55 #if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE)
56 #define TG3_VLAN_TAG_USED 1
57 #else
58 #define TG3_VLAN_TAG_USED 0
59 #endif
60
61 #define TG3_TSO_SUPPORT 1
62
63 #include "tg3.h"
64
65 #define DRV_MODULE_NAME         "tg3"
66 #define PFX DRV_MODULE_NAME     ": "
67 #define DRV_MODULE_VERSION      "3.86"
68 #define DRV_MODULE_RELDATE      "November 9, 2007"
69
70 #define TG3_DEF_MAC_MODE        0
71 #define TG3_DEF_RX_MODE         0
72 #define TG3_DEF_TX_MODE         0
73 #define TG3_DEF_MSG_ENABLE        \
74         (NETIF_MSG_DRV          | \
75          NETIF_MSG_PROBE        | \
76          NETIF_MSG_LINK         | \
77          NETIF_MSG_TIMER        | \
78          NETIF_MSG_IFDOWN       | \
79          NETIF_MSG_IFUP         | \
80          NETIF_MSG_RX_ERR       | \
81          NETIF_MSG_TX_ERR)
82
83 /* length of time before we decide the hardware is borked,
84  * and dev->tx_timeout() should be called to fix the problem
85  */
86 #define TG3_TX_TIMEOUT                  (5 * HZ)
87
88 /* hardware minimum and maximum for a single frame's data payload */
89 #define TG3_MIN_MTU                     60
90 #define TG3_MAX_MTU(tp) \
91         ((tp->tg3_flags2 & TG3_FLG2_JUMBO_CAPABLE) ? 9000 : 1500)
92
93 /* These numbers seem to be hard coded in the NIC firmware somehow.
94  * You can't change the ring sizes, but you can change where you place
95  * them in the NIC onboard memory.
96  */
97 #define TG3_RX_RING_SIZE                512
98 #define TG3_DEF_RX_RING_PENDING         200
99 #define TG3_RX_JUMBO_RING_SIZE          256
100 #define TG3_DEF_RX_JUMBO_RING_PENDING   100
101
102 /* Do not place this n-ring entries value into the tp struct itself,
103  * we really want to expose these constants to GCC so that modulo et
104  * al.  operations are done with shifts and masks instead of with
105  * hw multiply/modulo instructions.  Another solution would be to
106  * replace things like '% foo' with '& (foo - 1)'.
107  */
108 #define TG3_RX_RCB_RING_SIZE(tp)        \
109         ((tp->tg3_flags2 & TG3_FLG2_5705_PLUS) ?  512 : 1024)
110
111 #define TG3_TX_RING_SIZE                512
112 #define TG3_DEF_TX_RING_PENDING         (TG3_TX_RING_SIZE - 1)
113
114 #define TG3_RX_RING_BYTES       (sizeof(struct tg3_rx_buffer_desc) * \
115                                  TG3_RX_RING_SIZE)
116 #define TG3_RX_JUMBO_RING_BYTES (sizeof(struct tg3_rx_buffer_desc) * \
117                                  TG3_RX_JUMBO_RING_SIZE)
118 #define TG3_RX_RCB_RING_BYTES(tp) (sizeof(struct tg3_rx_buffer_desc) * \
119                                    TG3_RX_RCB_RING_SIZE(tp))
120 #define TG3_TX_RING_BYTES       (sizeof(struct tg3_tx_buffer_desc) * \
121                                  TG3_TX_RING_SIZE)
122 #define NEXT_TX(N)              (((N) + 1) & (TG3_TX_RING_SIZE - 1))
123
124 #define RX_PKT_BUF_SZ           (1536 + tp->rx_offset + 64)
125 #define RX_JUMBO_PKT_BUF_SZ     (9046 + tp->rx_offset + 64)
126
127 /* minimum number of free TX descriptors required to wake up TX process */
128 #define TG3_TX_WAKEUP_THRESH(tp)                ((tp)->tx_pending / 4)
129
130 /* number of ETHTOOL_GSTATS u64's */
131 #define TG3_NUM_STATS           (sizeof(struct tg3_ethtool_stats)/sizeof(u64))
132
133 #define TG3_NUM_TEST            6
134
135 static char version[] __devinitdata =
136         DRV_MODULE_NAME ".c:v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
137
138 MODULE_AUTHOR("David S. Miller (davem@redhat.com) and Jeff Garzik (jgarzik@pobox.com)");
139 MODULE_DESCRIPTION("Broadcom Tigon3 ethernet driver");
140 MODULE_LICENSE("GPL");
141 MODULE_VERSION(DRV_MODULE_VERSION);
142
143 static int tg3_debug = -1;      /* -1 == use TG3_DEF_MSG_ENABLE as value */
144 module_param(tg3_debug, int, 0);
145 MODULE_PARM_DESC(tg3_debug, "Tigon3 bitmapped debugging message enable value");
146
147 static struct pci_device_id tg3_pci_tbl[] = {
148         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5700)},
149         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5701)},
150         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702)},
151         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703)},
152         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704)},
153         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702FE)},
154         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705)},
155         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705_2)},
156         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705M)},
157         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705M_2)},
158         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702X)},
159         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703X)},
160         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704S)},
161         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702A3)},
162         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703A3)},
163         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5782)},
164         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5788)},
165         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5789)},
166         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5901)},
167         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5901_2)},
168         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704S_2)},
169         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705F)},
170         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5720)},
171         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5721)},
172         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5722)},
173         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5750)},
174         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751)},
175         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5750M)},
176         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751M)},
177         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751F)},
178         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5752)},
179         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5752M)},
180         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753)},
181         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753M)},
182         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753F)},
183         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5754)},
184         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5754M)},
185         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5755)},
186         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5755M)},
187         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5756)},
188         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5786)},
189         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787)},
190         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787M)},
191         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787F)},
192         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5714)},
193         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5714S)},
194         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5715)},
195         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5715S)},
196         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5780)},
197         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5780S)},
198         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5781)},
199         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5906)},
200         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5906M)},
201         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5784)},
202         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5764)},
203         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5723)},
204         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5761)},
205         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5761E)},
206         {PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9DXX)},
207         {PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9MXX)},
208         {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1000)},
209         {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1001)},
210         {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1003)},
211         {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC9100)},
212         {PCI_DEVICE(PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_TIGON3)},
213         {}
214 };
215
216 MODULE_DEVICE_TABLE(pci, tg3_pci_tbl);
217
218 static const struct {
219         const char string[ETH_GSTRING_LEN];
220 } ethtool_stats_keys[TG3_NUM_STATS] = {
221         { "rx_octets" },
222         { "rx_fragments" },
223         { "rx_ucast_packets" },
224         { "rx_mcast_packets" },
225         { "rx_bcast_packets" },
226         { "rx_fcs_errors" },
227         { "rx_align_errors" },
228         { "rx_xon_pause_rcvd" },
229         { "rx_xoff_pause_rcvd" },
230         { "rx_mac_ctrl_rcvd" },
231         { "rx_xoff_entered" },
232         { "rx_frame_too_long_errors" },
233         { "rx_jabbers" },
234         { "rx_undersize_packets" },
235         { "rx_in_length_errors" },
236         { "rx_out_length_errors" },
237         { "rx_64_or_less_octet_packets" },
238         { "rx_65_to_127_octet_packets" },
239         { "rx_128_to_255_octet_packets" },
240         { "rx_256_to_511_octet_packets" },
241         { "rx_512_to_1023_octet_packets" },
242         { "rx_1024_to_1522_octet_packets" },
243         { "rx_1523_to_2047_octet_packets" },
244         { "rx_2048_to_4095_octet_packets" },
245         { "rx_4096_to_8191_octet_packets" },
246         { "rx_8192_to_9022_octet_packets" },
247
248         { "tx_octets" },
249         { "tx_collisions" },
250
251         { "tx_xon_sent" },
252         { "tx_xoff_sent" },
253         { "tx_flow_control" },
254         { "tx_mac_errors" },
255         { "tx_single_collisions" },
256         { "tx_mult_collisions" },
257         { "tx_deferred" },
258         { "tx_excessive_collisions" },
259         { "tx_late_collisions" },
260         { "tx_collide_2times" },
261         { "tx_collide_3times" },
262         { "tx_collide_4times" },
263         { "tx_collide_5times" },
264         { "tx_collide_6times" },
265         { "tx_collide_7times" },
266         { "tx_collide_8times" },
267         { "tx_collide_9times" },
268         { "tx_collide_10times" },
269         { "tx_collide_11times" },
270         { "tx_collide_12times" },
271         { "tx_collide_13times" },
272         { "tx_collide_14times" },
273         { "tx_collide_15times" },
274         { "tx_ucast_packets" },
275         { "tx_mcast_packets" },
276         { "tx_bcast_packets" },
277         { "tx_carrier_sense_errors" },
278         { "tx_discards" },
279         { "tx_errors" },
280
281         { "dma_writeq_full" },
282         { "dma_write_prioq_full" },
283         { "rxbds_empty" },
284         { "rx_discards" },
285         { "rx_errors" },
286         { "rx_threshold_hit" },
287
288         { "dma_readq_full" },
289         { "dma_read_prioq_full" },
290         { "tx_comp_queue_full" },
291
292         { "ring_set_send_prod_index" },
293         { "ring_status_update" },
294         { "nic_irqs" },
295         { "nic_avoided_irqs" },
296         { "nic_tx_threshold_hit" }
297 };
298
299 static const struct {
300         const char string[ETH_GSTRING_LEN];
301 } ethtool_test_keys[TG3_NUM_TEST] = {
302         { "nvram test     (online) " },
303         { "link test      (online) " },
304         { "register test  (offline)" },
305         { "memory test    (offline)" },
306         { "loopback test  (offline)" },
307         { "interrupt test (offline)" },
308 };
309
310 static void tg3_write32(struct tg3 *tp, u32 off, u32 val)
311 {
312         writel(val, tp->regs + off);
313 }
314
315 static u32 tg3_read32(struct tg3 *tp, u32 off)
316 {
317         return (readl(tp->regs + off));
318 }
319
320 static void tg3_ape_write32(struct tg3 *tp, u32 off, u32 val)
321 {
322         writel(val, tp->aperegs + off);
323 }
324
325 static u32 tg3_ape_read32(struct tg3 *tp, u32 off)
326 {
327         return (readl(tp->aperegs + off));
328 }
329
330 static void tg3_write_indirect_reg32(struct tg3 *tp, u32 off, u32 val)
331 {
332         unsigned long flags;
333
334         spin_lock_irqsave(&tp->indirect_lock, flags);
335         pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off);
336         pci_write_config_dword(tp->pdev, TG3PCI_REG_DATA, val);
337         spin_unlock_irqrestore(&tp->indirect_lock, flags);
338 }
339
340 static void tg3_write_flush_reg32(struct tg3 *tp, u32 off, u32 val)
341 {
342         writel(val, tp->regs + off);
343         readl(tp->regs + off);
344 }
345
346 static u32 tg3_read_indirect_reg32(struct tg3 *tp, u32 off)
347 {
348         unsigned long flags;
349         u32 val;
350
351         spin_lock_irqsave(&tp->indirect_lock, flags);
352         pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off);
353         pci_read_config_dword(tp->pdev, TG3PCI_REG_DATA, &val);
354         spin_unlock_irqrestore(&tp->indirect_lock, flags);
355         return val;
356 }
357
358 static void tg3_write_indirect_mbox(struct tg3 *tp, u32 off, u32 val)
359 {
360         unsigned long flags;
361
362         if (off == (MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW)) {
363                 pci_write_config_dword(tp->pdev, TG3PCI_RCV_RET_RING_CON_IDX +
364                                        TG3_64BIT_REG_LOW, val);
365                 return;
366         }
367         if (off == (MAILBOX_RCV_STD_PROD_IDX + TG3_64BIT_REG_LOW)) {
368                 pci_write_config_dword(tp->pdev, TG3PCI_STD_RING_PROD_IDX +
369                                        TG3_64BIT_REG_LOW, val);
370                 return;
371         }
372
373         spin_lock_irqsave(&tp->indirect_lock, flags);
374         pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off + 0x5600);
375         pci_write_config_dword(tp->pdev, TG3PCI_REG_DATA, val);
376         spin_unlock_irqrestore(&tp->indirect_lock, flags);
377
378         /* In indirect mode when disabling interrupts, we also need
379          * to clear the interrupt bit in the GRC local ctrl register.
380          */
381         if ((off == (MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW)) &&
382             (val == 0x1)) {
383                 pci_write_config_dword(tp->pdev, TG3PCI_MISC_LOCAL_CTRL,
384                                        tp->grc_local_ctrl|GRC_LCLCTRL_CLEARINT);
385         }
386 }
387
388 static u32 tg3_read_indirect_mbox(struct tg3 *tp, u32 off)
389 {
390         unsigned long flags;
391         u32 val;
392
393         spin_lock_irqsave(&tp->indirect_lock, flags);
394         pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off + 0x5600);
395         pci_read_config_dword(tp->pdev, TG3PCI_REG_DATA, &val);
396         spin_unlock_irqrestore(&tp->indirect_lock, flags);
397         return val;
398 }
399
400 /* usec_wait specifies the wait time in usec when writing to certain registers
401  * where it is unsafe to read back the register without some delay.
402  * GRC_LOCAL_CTRL is one example if the GPIOs are toggled to switch power.
403  * TG3PCI_CLOCK_CTRL is another example if the clock frequencies are changed.
404  */
405 static void _tw32_flush(struct tg3 *tp, u32 off, u32 val, u32 usec_wait)
406 {
407         if ((tp->tg3_flags & TG3_FLAG_PCIX_TARGET_HWBUG) ||
408             (tp->tg3_flags2 & TG3_FLG2_ICH_WORKAROUND))
409                 /* Non-posted methods */
410                 tp->write32(tp, off, val);
411         else {
412                 /* Posted method */
413                 tg3_write32(tp, off, val);
414                 if (usec_wait)
415                         udelay(usec_wait);
416                 tp->read32(tp, off);
417         }
418         /* Wait again after the read for the posted method to guarantee that
419          * the wait time is met.
420          */
421         if (usec_wait)
422                 udelay(usec_wait);
423 }
424
425 static inline void tw32_mailbox_flush(struct tg3 *tp, u32 off, u32 val)
426 {
427         tp->write32_mbox(tp, off, val);
428         if (!(tp->tg3_flags & TG3_FLAG_MBOX_WRITE_REORDER) &&
429             !(tp->tg3_flags2 & TG3_FLG2_ICH_WORKAROUND))
430                 tp->read32_mbox(tp, off);
431 }
432
433 static void tg3_write32_tx_mbox(struct tg3 *tp, u32 off, u32 val)
434 {
435         void __iomem *mbox = tp->regs + off;
436         writel(val, mbox);
437         if (tp->tg3_flags & TG3_FLAG_TXD_MBOX_HWBUG)
438                 writel(val, mbox);
439         if (tp->tg3_flags & TG3_FLAG_MBOX_WRITE_REORDER)
440                 readl(mbox);
441 }
442
443 static u32 tg3_read32_mbox_5906(struct tg3 *tp, u32 off)
444 {
445         return (readl(tp->regs + off + GRCMBOX_BASE));
446 }
447
448 static void tg3_write32_mbox_5906(struct tg3 *tp, u32 off, u32 val)
449 {
450         writel(val, tp->regs + off + GRCMBOX_BASE);
451 }
452
453 #define tw32_mailbox(reg, val)  tp->write32_mbox(tp, reg, val)
454 #define tw32_mailbox_f(reg, val)        tw32_mailbox_flush(tp, (reg), (val))
455 #define tw32_rx_mbox(reg, val)  tp->write32_rx_mbox(tp, reg, val)
456 #define tw32_tx_mbox(reg, val)  tp->write32_tx_mbox(tp, reg, val)
457 #define tr32_mailbox(reg)       tp->read32_mbox(tp, reg)
458
459 #define tw32(reg,val)           tp->write32(tp, reg, val)
460 #define tw32_f(reg,val)         _tw32_flush(tp,(reg),(val), 0)
461 #define tw32_wait_f(reg,val,us) _tw32_flush(tp,(reg),(val), (us))
462 #define tr32(reg)               tp->read32(tp, reg)
463
464 static void tg3_write_mem(struct tg3 *tp, u32 off, u32 val)
465 {
466         unsigned long flags;
467
468         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) &&
469             (off >= NIC_SRAM_STATS_BLK) && (off < NIC_SRAM_TX_BUFFER_DESC))
470                 return;
471
472         spin_lock_irqsave(&tp->indirect_lock, flags);
473         if (tp->tg3_flags & TG3_FLAG_SRAM_USE_CONFIG) {
474                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off);
475                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
476
477                 /* Always leave this as zero. */
478                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
479         } else {
480                 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, off);
481                 tw32_f(TG3PCI_MEM_WIN_DATA, val);
482
483                 /* Always leave this as zero. */
484                 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, 0);
485         }
486         spin_unlock_irqrestore(&tp->indirect_lock, flags);
487 }
488
489 static void tg3_read_mem(struct tg3 *tp, u32 off, u32 *val)
490 {
491         unsigned long flags;
492
493         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) &&
494             (off >= NIC_SRAM_STATS_BLK) && (off < NIC_SRAM_TX_BUFFER_DESC)) {
495                 *val = 0;
496                 return;
497         }
498
499         spin_lock_irqsave(&tp->indirect_lock, flags);
500         if (tp->tg3_flags & TG3_FLAG_SRAM_USE_CONFIG) {
501                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off);
502                 pci_read_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
503
504                 /* Always leave this as zero. */
505                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
506         } else {
507                 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, off);
508                 *val = tr32(TG3PCI_MEM_WIN_DATA);
509
510                 /* Always leave this as zero. */
511                 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, 0);
512         }
513         spin_unlock_irqrestore(&tp->indirect_lock, flags);
514 }
515
516 static void tg3_ape_lock_init(struct tg3 *tp)
517 {
518         int i;
519
520         /* Make sure the driver hasn't any stale locks. */
521         for (i = 0; i < 8; i++)
522                 tg3_ape_write32(tp, TG3_APE_LOCK_GRANT + 4 * i,
523                                 APE_LOCK_GRANT_DRIVER);
524 }
525
526 static int tg3_ape_lock(struct tg3 *tp, int locknum)
527 {
528         int i, off;
529         int ret = 0;
530         u32 status;
531
532         if (!(tp->tg3_flags3 & TG3_FLG3_ENABLE_APE))
533                 return 0;
534
535         switch (locknum) {
536                 case TG3_APE_LOCK_MEM:
537                         break;
538                 default:
539                         return -EINVAL;
540         }
541
542         off = 4 * locknum;
543
544         tg3_ape_write32(tp, TG3_APE_LOCK_REQ + off, APE_LOCK_REQ_DRIVER);
545
546         /* Wait for up to 1 millisecond to acquire lock. */
547         for (i = 0; i < 100; i++) {
548                 status = tg3_ape_read32(tp, TG3_APE_LOCK_GRANT + off);
549                 if (status == APE_LOCK_GRANT_DRIVER)
550                         break;
551                 udelay(10);
552         }
553
554         if (status != APE_LOCK_GRANT_DRIVER) {
555                 /* Revoke the lock request. */
556                 tg3_ape_write32(tp, TG3_APE_LOCK_GRANT + off,
557                                 APE_LOCK_GRANT_DRIVER);
558
559                 ret = -EBUSY;
560         }
561
562         return ret;
563 }
564
565 static void tg3_ape_unlock(struct tg3 *tp, int locknum)
566 {
567         int off;
568
569         if (!(tp->tg3_flags3 & TG3_FLG3_ENABLE_APE))
570                 return;
571
572         switch (locknum) {
573                 case TG3_APE_LOCK_MEM:
574                         break;
575                 default:
576                         return;
577         }
578
579         off = 4 * locknum;
580         tg3_ape_write32(tp, TG3_APE_LOCK_GRANT + off, APE_LOCK_GRANT_DRIVER);
581 }
582
583 static void tg3_disable_ints(struct tg3 *tp)
584 {
585         tw32(TG3PCI_MISC_HOST_CTRL,
586              (tp->misc_host_ctrl | MISC_HOST_CTRL_MASK_PCI_INT));
587         tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
588 }
589
590 static inline void tg3_cond_int(struct tg3 *tp)
591 {
592         if (!(tp->tg3_flags & TG3_FLAG_TAGGED_STATUS) &&
593             (tp->hw_status->status & SD_STATUS_UPDATED))
594                 tw32(GRC_LOCAL_CTRL, tp->grc_local_ctrl | GRC_LCLCTRL_SETINT);
595         else
596                 tw32(HOSTCC_MODE, tp->coalesce_mode |
597                      (HOSTCC_MODE_ENABLE | HOSTCC_MODE_NOW));
598 }
599
600 static void tg3_enable_ints(struct tg3 *tp)
601 {
602         tp->irq_sync = 0;
603         wmb();
604
605         tw32(TG3PCI_MISC_HOST_CTRL,
606              (tp->misc_host_ctrl & ~MISC_HOST_CTRL_MASK_PCI_INT));
607         tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
608                        (tp->last_tag << 24));
609         if (tp->tg3_flags2 & TG3_FLG2_1SHOT_MSI)
610                 tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
611                                (tp->last_tag << 24));
612         tg3_cond_int(tp);
613 }
614
615 static inline unsigned int tg3_has_work(struct tg3 *tp)
616 {
617         struct tg3_hw_status *sblk = tp->hw_status;
618         unsigned int work_exists = 0;
619
620         /* check for phy events */
621         if (!(tp->tg3_flags &
622               (TG3_FLAG_USE_LINKCHG_REG |
623                TG3_FLAG_POLL_SERDES))) {
624                 if (sblk->status & SD_STATUS_LINK_CHG)
625                         work_exists = 1;
626         }
627         /* check for RX/TX work to do */
628         if (sblk->idx[0].tx_consumer != tp->tx_cons ||
629             sblk->idx[0].rx_producer != tp->rx_rcb_ptr)
630                 work_exists = 1;
631
632         return work_exists;
633 }
634
635 /* tg3_restart_ints
636  *  similar to tg3_enable_ints, but it accurately determines whether there
637  *  is new work pending and can return without flushing the PIO write
638  *  which reenables interrupts
639  */
640 static void tg3_restart_ints(struct tg3 *tp)
641 {
642         tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
643                      tp->last_tag << 24);
644         mmiowb();
645
646         /* When doing tagged status, this work check is unnecessary.
647          * The last_tag we write above tells the chip which piece of
648          * work we've completed.
649          */
650         if (!(tp->tg3_flags & TG3_FLAG_TAGGED_STATUS) &&
651             tg3_has_work(tp))
652                 tw32(HOSTCC_MODE, tp->coalesce_mode |
653                      (HOSTCC_MODE_ENABLE | HOSTCC_MODE_NOW));
654 }
655
656 static inline void tg3_netif_stop(struct tg3 *tp)
657 {
658         tp->dev->trans_start = jiffies; /* prevent tx timeout */
659         napi_disable(&tp->napi);
660         netif_tx_disable(tp->dev);
661 }
662
663 static inline void tg3_netif_start(struct tg3 *tp)
664 {
665         netif_wake_queue(tp->dev);
666         /* NOTE: unconditional netif_wake_queue is only appropriate
667          * so long as all callers are assured to have free tx slots
668          * (such as after tg3_init_hw)
669          */
670         napi_enable(&tp->napi);
671         tp->hw_status->status |= SD_STATUS_UPDATED;
672         tg3_enable_ints(tp);
673 }
674
675 static void tg3_switch_clocks(struct tg3 *tp)
676 {
677         u32 clock_ctrl = tr32(TG3PCI_CLOCK_CTRL);
678         u32 orig_clock_ctrl;
679
680         if ((tp->tg3_flags & TG3_FLAG_CPMU_PRESENT) ||
681             (tp->tg3_flags2 & TG3_FLG2_5780_CLASS))
682                 return;
683
684         orig_clock_ctrl = clock_ctrl;
685         clock_ctrl &= (CLOCK_CTRL_FORCE_CLKRUN |
686                        CLOCK_CTRL_CLKRUN_OENABLE |
687                        0x1f);
688         tp->pci_clock_ctrl = clock_ctrl;
689
690         if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
691                 if (orig_clock_ctrl & CLOCK_CTRL_625_CORE) {
692                         tw32_wait_f(TG3PCI_CLOCK_CTRL,
693                                     clock_ctrl | CLOCK_CTRL_625_CORE, 40);
694                 }
695         } else if ((orig_clock_ctrl & CLOCK_CTRL_44MHZ_CORE) != 0) {
696                 tw32_wait_f(TG3PCI_CLOCK_CTRL,
697                             clock_ctrl |
698                             (CLOCK_CTRL_44MHZ_CORE | CLOCK_CTRL_ALTCLK),
699                             40);
700                 tw32_wait_f(TG3PCI_CLOCK_CTRL,
701                             clock_ctrl | (CLOCK_CTRL_ALTCLK),
702                             40);
703         }
704         tw32_wait_f(TG3PCI_CLOCK_CTRL, clock_ctrl, 40);
705 }
706
707 #define PHY_BUSY_LOOPS  5000
708
709 static int tg3_readphy(struct tg3 *tp, int reg, u32 *val)
710 {
711         u32 frame_val;
712         unsigned int loops;
713         int ret;
714
715         if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
716                 tw32_f(MAC_MI_MODE,
717                      (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
718                 udelay(80);
719         }
720
721         *val = 0x0;
722
723         frame_val  = ((PHY_ADDR << MI_COM_PHY_ADDR_SHIFT) &
724                       MI_COM_PHY_ADDR_MASK);
725         frame_val |= ((reg << MI_COM_REG_ADDR_SHIFT) &
726                       MI_COM_REG_ADDR_MASK);
727         frame_val |= (MI_COM_CMD_READ | MI_COM_START);
728
729         tw32_f(MAC_MI_COM, frame_val);
730
731         loops = PHY_BUSY_LOOPS;
732         while (loops != 0) {
733                 udelay(10);
734                 frame_val = tr32(MAC_MI_COM);
735
736                 if ((frame_val & MI_COM_BUSY) == 0) {
737                         udelay(5);
738                         frame_val = tr32(MAC_MI_COM);
739                         break;
740                 }
741                 loops -= 1;
742         }
743
744         ret = -EBUSY;
745         if (loops != 0) {
746                 *val = frame_val & MI_COM_DATA_MASK;
747                 ret = 0;
748         }
749
750         if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
751                 tw32_f(MAC_MI_MODE, tp->mi_mode);
752                 udelay(80);
753         }
754
755         return ret;
756 }
757
758 static int tg3_writephy(struct tg3 *tp, int reg, u32 val)
759 {
760         u32 frame_val;
761         unsigned int loops;
762         int ret;
763
764         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906 &&
765             (reg == MII_TG3_CTRL || reg == MII_TG3_AUX_CTRL))
766                 return 0;
767
768         if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
769                 tw32_f(MAC_MI_MODE,
770                      (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
771                 udelay(80);
772         }
773
774         frame_val  = ((PHY_ADDR << MI_COM_PHY_ADDR_SHIFT) &
775                       MI_COM_PHY_ADDR_MASK);
776         frame_val |= ((reg << MI_COM_REG_ADDR_SHIFT) &
777                       MI_COM_REG_ADDR_MASK);
778         frame_val |= (val & MI_COM_DATA_MASK);
779         frame_val |= (MI_COM_CMD_WRITE | MI_COM_START);
780
781         tw32_f(MAC_MI_COM, frame_val);
782
783         loops = PHY_BUSY_LOOPS;
784         while (loops != 0) {
785                 udelay(10);
786                 frame_val = tr32(MAC_MI_COM);
787                 if ((frame_val & MI_COM_BUSY) == 0) {
788                         udelay(5);
789                         frame_val = tr32(MAC_MI_COM);
790                         break;
791                 }
792                 loops -= 1;
793         }
794
795         ret = -EBUSY;
796         if (loops != 0)
797                 ret = 0;
798
799         if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
800                 tw32_f(MAC_MI_MODE, tp->mi_mode);
801                 udelay(80);
802         }
803
804         return ret;
805 }
806
807 static void tg3_phy_toggle_automdix(struct tg3 *tp, int enable)
808 {
809         u32 phy;
810
811         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS) ||
812             (tp->tg3_flags2 & TG3_FLG2_ANY_SERDES))
813                 return;
814
815         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
816                 u32 ephy;
817
818                 if (!tg3_readphy(tp, MII_TG3_EPHY_TEST, &ephy)) {
819                         tg3_writephy(tp, MII_TG3_EPHY_TEST,
820                                      ephy | MII_TG3_EPHY_SHADOW_EN);
821                         if (!tg3_readphy(tp, MII_TG3_EPHYTST_MISCCTRL, &phy)) {
822                                 if (enable)
823                                         phy |= MII_TG3_EPHYTST_MISCCTRL_MDIX;
824                                 else
825                                         phy &= ~MII_TG3_EPHYTST_MISCCTRL_MDIX;
826                                 tg3_writephy(tp, MII_TG3_EPHYTST_MISCCTRL, phy);
827                         }
828                         tg3_writephy(tp, MII_TG3_EPHY_TEST, ephy);
829                 }
830         } else {
831                 phy = MII_TG3_AUXCTL_MISC_RDSEL_MISC |
832                       MII_TG3_AUXCTL_SHDWSEL_MISC;
833                 if (!tg3_writephy(tp, MII_TG3_AUX_CTRL, phy) &&
834                     !tg3_readphy(tp, MII_TG3_AUX_CTRL, &phy)) {
835                         if (enable)
836                                 phy |= MII_TG3_AUXCTL_MISC_FORCE_AMDIX;
837                         else
838                                 phy &= ~MII_TG3_AUXCTL_MISC_FORCE_AMDIX;
839                         phy |= MII_TG3_AUXCTL_MISC_WREN;
840                         tg3_writephy(tp, MII_TG3_AUX_CTRL, phy);
841                 }
842         }
843 }
844
845 static void tg3_phy_set_wirespeed(struct tg3 *tp)
846 {
847         u32 val;
848
849         if (tp->tg3_flags2 & TG3_FLG2_NO_ETH_WIRE_SPEED)
850                 return;
851
852         if (!tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x7007) &&
853             !tg3_readphy(tp, MII_TG3_AUX_CTRL, &val))
854                 tg3_writephy(tp, MII_TG3_AUX_CTRL,
855                              (val | (1 << 15) | (1 << 4)));
856 }
857
858 static int tg3_bmcr_reset(struct tg3 *tp)
859 {
860         u32 phy_control;
861         int limit, err;
862
863         /* OK, reset it, and poll the BMCR_RESET bit until it
864          * clears or we time out.
865          */
866         phy_control = BMCR_RESET;
867         err = tg3_writephy(tp, MII_BMCR, phy_control);
868         if (err != 0)
869                 return -EBUSY;
870
871         limit = 5000;
872         while (limit--) {
873                 err = tg3_readphy(tp, MII_BMCR, &phy_control);
874                 if (err != 0)
875                         return -EBUSY;
876
877                 if ((phy_control & BMCR_RESET) == 0) {
878                         udelay(40);
879                         break;
880                 }
881                 udelay(10);
882         }
883         if (limit <= 0)
884                 return -EBUSY;
885
886         return 0;
887 }
888
889 static int tg3_wait_macro_done(struct tg3 *tp)
890 {
891         int limit = 100;
892
893         while (limit--) {
894                 u32 tmp32;
895
896                 if (!tg3_readphy(tp, 0x16, &tmp32)) {
897                         if ((tmp32 & 0x1000) == 0)
898                                 break;
899                 }
900         }
901         if (limit <= 0)
902                 return -EBUSY;
903
904         return 0;
905 }
906
907 static int tg3_phy_write_and_check_testpat(struct tg3 *tp, int *resetp)
908 {
909         static const u32 test_pat[4][6] = {
910         { 0x00005555, 0x00000005, 0x00002aaa, 0x0000000a, 0x00003456, 0x00000003 },
911         { 0x00002aaa, 0x0000000a, 0x00003333, 0x00000003, 0x0000789a, 0x00000005 },
912         { 0x00005a5a, 0x00000005, 0x00002a6a, 0x0000000a, 0x00001bcd, 0x00000003 },
913         { 0x00002a5a, 0x0000000a, 0x000033c3, 0x00000003, 0x00002ef1, 0x00000005 }
914         };
915         int chan;
916
917         for (chan = 0; chan < 4; chan++) {
918                 int i;
919
920                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
921                              (chan * 0x2000) | 0x0200);
922                 tg3_writephy(tp, 0x16, 0x0002);
923
924                 for (i = 0; i < 6; i++)
925                         tg3_writephy(tp, MII_TG3_DSP_RW_PORT,
926                                      test_pat[chan][i]);
927
928                 tg3_writephy(tp, 0x16, 0x0202);
929                 if (tg3_wait_macro_done(tp)) {
930                         *resetp = 1;
931                         return -EBUSY;
932                 }
933
934                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
935                              (chan * 0x2000) | 0x0200);
936                 tg3_writephy(tp, 0x16, 0x0082);
937                 if (tg3_wait_macro_done(tp)) {
938                         *resetp = 1;
939                         return -EBUSY;
940                 }
941
942                 tg3_writephy(tp, 0x16, 0x0802);
943                 if (tg3_wait_macro_done(tp)) {
944                         *resetp = 1;
945                         return -EBUSY;
946                 }
947
948                 for (i = 0; i < 6; i += 2) {
949                         u32 low, high;
950
951                         if (tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &low) ||
952                             tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &high) ||
953                             tg3_wait_macro_done(tp)) {
954                                 *resetp = 1;
955                                 return -EBUSY;
956                         }
957                         low &= 0x7fff;
958                         high &= 0x000f;
959                         if (low != test_pat[chan][i] ||
960                             high != test_pat[chan][i+1]) {
961                                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000b);
962                                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x4001);
963                                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x4005);
964
965                                 return -EBUSY;
966                         }
967                 }
968         }
969
970         return 0;
971 }
972
973 static int tg3_phy_reset_chanpat(struct tg3 *tp)
974 {
975         int chan;
976
977         for (chan = 0; chan < 4; chan++) {
978                 int i;
979
980                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
981                              (chan * 0x2000) | 0x0200);
982                 tg3_writephy(tp, 0x16, 0x0002);
983                 for (i = 0; i < 6; i++)
984                         tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x000);
985                 tg3_writephy(tp, 0x16, 0x0202);
986                 if (tg3_wait_macro_done(tp))
987                         return -EBUSY;
988         }
989
990         return 0;
991 }
992
993 static int tg3_phy_reset_5703_4_5(struct tg3 *tp)
994 {
995         u32 reg32, phy9_orig;
996         int retries, do_phy_reset, err;
997
998         retries = 10;
999         do_phy_reset = 1;
1000         do {
1001                 if (do_phy_reset) {
1002                         err = tg3_bmcr_reset(tp);
1003                         if (err)
1004                                 return err;
1005                         do_phy_reset = 0;
1006                 }
1007
1008                 /* Disable transmitter and interrupt.  */
1009                 if (tg3_readphy(tp, MII_TG3_EXT_CTRL, &reg32))
1010                         continue;
1011
1012                 reg32 |= 0x3000;
1013                 tg3_writephy(tp, MII_TG3_EXT_CTRL, reg32);
1014
1015                 /* Set full-duplex, 1000 mbps.  */
1016                 tg3_writephy(tp, MII_BMCR,
1017                              BMCR_FULLDPLX | TG3_BMCR_SPEED1000);
1018
1019                 /* Set to master mode.  */
1020                 if (tg3_readphy(tp, MII_TG3_CTRL, &phy9_orig))
1021                         continue;
1022
1023                 tg3_writephy(tp, MII_TG3_CTRL,
1024                              (MII_TG3_CTRL_AS_MASTER |
1025                               MII_TG3_CTRL_ENABLE_AS_MASTER));
1026
1027                 /* Enable SM_DSP_CLOCK and 6dB.  */
1028                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0c00);
1029
1030                 /* Block the PHY control access.  */
1031                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8005);
1032                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0800);
1033
1034                 err = tg3_phy_write_and_check_testpat(tp, &do_phy_reset);
1035                 if (!err)
1036                         break;
1037         } while (--retries);
1038
1039         err = tg3_phy_reset_chanpat(tp);
1040         if (err)
1041                 return err;
1042
1043         tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8005);
1044         tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0000);
1045
1046         tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8200);
1047         tg3_writephy(tp, 0x16, 0x0000);
1048
1049         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
1050             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
1051                 /* Set Extended packet length bit for jumbo frames */
1052                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x4400);
1053         }
1054         else {
1055                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0400);
1056         }
1057
1058         tg3_writephy(tp, MII_TG3_CTRL, phy9_orig);
1059
1060         if (!tg3_readphy(tp, MII_TG3_EXT_CTRL, &reg32)) {
1061                 reg32 &= ~0x3000;
1062                 tg3_writephy(tp, MII_TG3_EXT_CTRL, reg32);
1063         } else if (!err)
1064                 err = -EBUSY;
1065
1066         return err;
1067 }
1068
1069 static void tg3_link_report(struct tg3 *);
1070
1071 /* This will reset the tigon3 PHY if there is no valid
1072  * link unless the FORCE argument is non-zero.
1073  */
1074 static int tg3_phy_reset(struct tg3 *tp)
1075 {
1076         u32 phy_status;
1077         int err;
1078
1079         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
1080                 u32 val;
1081
1082                 val = tr32(GRC_MISC_CFG);
1083                 tw32_f(GRC_MISC_CFG, val & ~GRC_MISC_CFG_EPHY_IDDQ);
1084                 udelay(40);
1085         }
1086         err  = tg3_readphy(tp, MII_BMSR, &phy_status);
1087         err |= tg3_readphy(tp, MII_BMSR, &phy_status);
1088         if (err != 0)
1089                 return -EBUSY;
1090
1091         if (netif_running(tp->dev) && netif_carrier_ok(tp->dev)) {
1092                 netif_carrier_off(tp->dev);
1093                 tg3_link_report(tp);
1094         }
1095
1096         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
1097             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
1098             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
1099                 err = tg3_phy_reset_5703_4_5(tp);
1100                 if (err)
1101                         return err;
1102                 goto out;
1103         }
1104
1105         err = tg3_bmcr_reset(tp);
1106         if (err)
1107                 return err;
1108
1109         if (tp->tg3_flags3 & TG3_FLG3_5761_5784_AX_FIXES) {
1110                 u32 val;
1111
1112                 val = tr32(TG3_CPMU_LSPD_1000MB_CLK);
1113                 if ((val & CPMU_LSPD_1000MB_MACCLK_MASK) ==
1114                     CPMU_LSPD_1000MB_MACCLK_12_5) {
1115                         val &= ~CPMU_LSPD_1000MB_MACCLK_MASK;
1116                         udelay(40);
1117                         tw32_f(TG3_CPMU_LSPD_1000MB_CLK, val);
1118                 }
1119
1120                 /* Disable GPHY autopowerdown. */
1121                 tg3_writephy(tp, MII_TG3_MISC_SHDW,
1122                              MII_TG3_MISC_SHDW_WREN |
1123                              MII_TG3_MISC_SHDW_APD_SEL |
1124                              MII_TG3_MISC_SHDW_APD_WKTM_84MS);
1125         }
1126
1127 out:
1128         if (tp->tg3_flags2 & TG3_FLG2_PHY_ADC_BUG) {
1129                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0c00);
1130                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x201f);
1131                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x2aaa);
1132                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000a);
1133                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0323);
1134                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0400);
1135         }
1136         if (tp->tg3_flags2 & TG3_FLG2_PHY_5704_A0_BUG) {
1137                 tg3_writephy(tp, 0x1c, 0x8d68);
1138                 tg3_writephy(tp, 0x1c, 0x8d68);
1139         }
1140         if (tp->tg3_flags2 & TG3_FLG2_PHY_BER_BUG) {
1141                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0c00);
1142                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000a);
1143                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x310b);
1144                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x201f);
1145                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x9506);
1146                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x401f);
1147                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x14e2);
1148                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0400);
1149         }
1150         else if (tp->tg3_flags2 & TG3_FLG2_PHY_JITTER_BUG) {
1151                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0c00);
1152                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000a);
1153                 if (tp->tg3_flags2 & TG3_FLG2_PHY_ADJUST_TRIM) {
1154                         tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x110b);
1155                         tg3_writephy(tp, MII_TG3_TEST1,
1156                                      MII_TG3_TEST1_TRIM_EN | 0x4);
1157                 } else
1158                         tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x010b);
1159                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0400);
1160         }
1161         /* Set Extended packet length bit (bit 14) on all chips that */
1162         /* support jumbo frames */
1163         if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5401) {
1164                 /* Cannot do read-modify-write on 5401 */
1165                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x4c20);
1166         } else if (tp->tg3_flags2 & TG3_FLG2_JUMBO_CAPABLE) {
1167                 u32 phy_reg;
1168
1169                 /* Set bit 14 with read-modify-write to preserve other bits */
1170                 if (!tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0007) &&
1171                     !tg3_readphy(tp, MII_TG3_AUX_CTRL, &phy_reg))
1172                         tg3_writephy(tp, MII_TG3_AUX_CTRL, phy_reg | 0x4000);
1173         }
1174
1175         /* Set phy register 0x10 bit 0 to high fifo elasticity to support
1176          * jumbo frames transmission.
1177          */
1178         if (tp->tg3_flags2 & TG3_FLG2_JUMBO_CAPABLE) {
1179                 u32 phy_reg;
1180
1181                 if (!tg3_readphy(tp, MII_TG3_EXT_CTRL, &phy_reg))
1182                     tg3_writephy(tp, MII_TG3_EXT_CTRL,
1183                                  phy_reg | MII_TG3_EXT_CTRL_FIFO_ELASTIC);
1184         }
1185
1186         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
1187                 /* adjust output voltage */
1188                 tg3_writephy(tp, MII_TG3_EPHY_PTEST, 0x12);
1189         }
1190
1191         tg3_phy_toggle_automdix(tp, 1);
1192         tg3_phy_set_wirespeed(tp);
1193         return 0;
1194 }
1195
1196 static void tg3_frob_aux_power(struct tg3 *tp)
1197 {
1198         struct tg3 *tp_peer = tp;
1199
1200         if ((tp->tg3_flags2 & TG3_FLG2_IS_NIC) == 0)
1201                 return;
1202
1203         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) ||
1204             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714)) {
1205                 struct net_device *dev_peer;
1206
1207                 dev_peer = pci_get_drvdata(tp->pdev_peer);
1208                 /* remove_one() may have been run on the peer. */
1209                 if (!dev_peer)
1210                         tp_peer = tp;
1211                 else
1212                         tp_peer = netdev_priv(dev_peer);
1213         }
1214
1215         if ((tp->tg3_flags & TG3_FLAG_WOL_ENABLE) != 0 ||
1216             (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) != 0 ||
1217             (tp_peer->tg3_flags & TG3_FLAG_WOL_ENABLE) != 0 ||
1218             (tp_peer->tg3_flags & TG3_FLAG_ENABLE_ASF) != 0) {
1219                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
1220                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
1221                         tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1222                                     (GRC_LCLCTRL_GPIO_OE0 |
1223                                      GRC_LCLCTRL_GPIO_OE1 |
1224                                      GRC_LCLCTRL_GPIO_OE2 |
1225                                      GRC_LCLCTRL_GPIO_OUTPUT0 |
1226                                      GRC_LCLCTRL_GPIO_OUTPUT1),
1227                                     100);
1228                 } else {
1229                         u32 no_gpio2;
1230                         u32 grc_local_ctrl = 0;
1231
1232                         if (tp_peer != tp &&
1233                             (tp_peer->tg3_flags & TG3_FLAG_INIT_COMPLETE) != 0)
1234                                 return;
1235
1236                         /* Workaround to prevent overdrawing Amps. */
1237                         if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
1238                             ASIC_REV_5714) {
1239                                 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE3;
1240                                 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1241                                             grc_local_ctrl, 100);
1242                         }
1243
1244                         /* On 5753 and variants, GPIO2 cannot be used. */
1245                         no_gpio2 = tp->nic_sram_data_cfg &
1246                                     NIC_SRAM_DATA_CFG_NO_GPIO2;
1247
1248                         grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE0 |
1249                                          GRC_LCLCTRL_GPIO_OE1 |
1250                                          GRC_LCLCTRL_GPIO_OE2 |
1251                                          GRC_LCLCTRL_GPIO_OUTPUT1 |
1252                                          GRC_LCLCTRL_GPIO_OUTPUT2;
1253                         if (no_gpio2) {
1254                                 grc_local_ctrl &= ~(GRC_LCLCTRL_GPIO_OE2 |
1255                                                     GRC_LCLCTRL_GPIO_OUTPUT2);
1256                         }
1257                         tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1258                                                     grc_local_ctrl, 100);
1259
1260                         grc_local_ctrl |= GRC_LCLCTRL_GPIO_OUTPUT0;
1261
1262                         tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1263                                                     grc_local_ctrl, 100);
1264
1265                         if (!no_gpio2) {
1266                                 grc_local_ctrl &= ~GRC_LCLCTRL_GPIO_OUTPUT2;
1267                                 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1268                                             grc_local_ctrl, 100);
1269                         }
1270                 }
1271         } else {
1272                 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
1273                     GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701) {
1274                         if (tp_peer != tp &&
1275                             (tp_peer->tg3_flags & TG3_FLAG_INIT_COMPLETE) != 0)
1276                                 return;
1277
1278                         tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1279                                     (GRC_LCLCTRL_GPIO_OE1 |
1280                                      GRC_LCLCTRL_GPIO_OUTPUT1), 100);
1281
1282                         tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1283                                     GRC_LCLCTRL_GPIO_OE1, 100);
1284
1285                         tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1286                                     (GRC_LCLCTRL_GPIO_OE1 |
1287                                      GRC_LCLCTRL_GPIO_OUTPUT1), 100);
1288                 }
1289         }
1290 }
1291
1292 static int tg3_5700_link_polarity(struct tg3 *tp, u32 speed)
1293 {
1294         if (tp->led_ctrl == LED_CTRL_MODE_PHY_2)
1295                 return 1;
1296         else if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5411) {
1297                 if (speed != SPEED_10)
1298                         return 1;
1299         } else if (speed == SPEED_10)
1300                 return 1;
1301
1302         return 0;
1303 }
1304
1305 static int tg3_setup_phy(struct tg3 *, int);
1306
1307 #define RESET_KIND_SHUTDOWN     0
1308 #define RESET_KIND_INIT         1
1309 #define RESET_KIND_SUSPEND      2
1310
1311 static void tg3_write_sig_post_reset(struct tg3 *, int);
1312 static int tg3_halt_cpu(struct tg3 *, u32);
1313 static int tg3_nvram_lock(struct tg3 *);
1314 static void tg3_nvram_unlock(struct tg3 *);
1315
1316 static void tg3_power_down_phy(struct tg3 *tp)
1317 {
1318         u32 val;
1319
1320         if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) {
1321                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
1322                         u32 sg_dig_ctrl = tr32(SG_DIG_CTRL);
1323                         u32 serdes_cfg = tr32(MAC_SERDES_CFG);
1324
1325                         sg_dig_ctrl |=
1326                                 SG_DIG_USING_HW_AUTONEG | SG_DIG_SOFT_RESET;
1327                         tw32(SG_DIG_CTRL, sg_dig_ctrl);
1328                         tw32(MAC_SERDES_CFG, serdes_cfg | (1 << 15));
1329                 }
1330                 return;
1331         }
1332
1333         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
1334                 tg3_bmcr_reset(tp);
1335                 val = tr32(GRC_MISC_CFG);
1336                 tw32_f(GRC_MISC_CFG, val | GRC_MISC_CFG_EPHY_IDDQ);
1337                 udelay(40);
1338                 return;
1339         } else {
1340                 tg3_writephy(tp, MII_TG3_EXT_CTRL,
1341                              MII_TG3_EXT_CTRL_FORCE_LED_OFF);
1342                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x01b2);
1343         }
1344
1345         /* The PHY should not be powered down on some chips because
1346          * of bugs.
1347          */
1348         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
1349             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
1350             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780 &&
1351              (tp->tg3_flags2 & TG3_FLG2_MII_SERDES)))
1352                 return;
1353
1354         if (tp->tg3_flags3 & TG3_FLG3_5761_5784_AX_FIXES) {
1355                 val = tr32(TG3_CPMU_LSPD_1000MB_CLK);
1356                 val &= ~CPMU_LSPD_1000MB_MACCLK_MASK;
1357                 val |= CPMU_LSPD_1000MB_MACCLK_12_5;
1358                 tw32_f(TG3_CPMU_LSPD_1000MB_CLK, val);
1359         }
1360
1361         tg3_writephy(tp, MII_BMCR, BMCR_PDOWN);
1362 }
1363
1364 static int tg3_set_power_state(struct tg3 *tp, pci_power_t state)
1365 {
1366         u32 misc_host_ctrl;
1367         u16 power_control, power_caps;
1368         int pm = tp->pm_cap;
1369
1370         /* Make sure register accesses (indirect or otherwise)
1371          * will function correctly.
1372          */
1373         pci_write_config_dword(tp->pdev,
1374                                TG3PCI_MISC_HOST_CTRL,
1375                                tp->misc_host_ctrl);
1376
1377         pci_read_config_word(tp->pdev,
1378                              pm + PCI_PM_CTRL,
1379                              &power_control);
1380         power_control |= PCI_PM_CTRL_PME_STATUS;
1381         power_control &= ~(PCI_PM_CTRL_STATE_MASK);
1382         switch (state) {
1383         case PCI_D0:
1384                 power_control |= 0;
1385                 pci_write_config_word(tp->pdev,
1386                                       pm + PCI_PM_CTRL,
1387                                       power_control);
1388                 udelay(100);    /* Delay after power state change */
1389
1390                 /* Switch out of Vaux if it is a NIC */
1391                 if (tp->tg3_flags2 & TG3_FLG2_IS_NIC)
1392                         tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl, 100);
1393
1394                 return 0;
1395
1396         case PCI_D1:
1397                 power_control |= 1;
1398                 break;
1399
1400         case PCI_D2:
1401                 power_control |= 2;
1402                 break;
1403
1404         case PCI_D3hot:
1405                 power_control |= 3;
1406                 break;
1407
1408         default:
1409                 printk(KERN_WARNING PFX "%s: Invalid power state (%d) "
1410                        "requested.\n",
1411                        tp->dev->name, state);
1412                 return -EINVAL;
1413         };
1414
1415         power_control |= PCI_PM_CTRL_PME_ENABLE;
1416
1417         misc_host_ctrl = tr32(TG3PCI_MISC_HOST_CTRL);
1418         tw32(TG3PCI_MISC_HOST_CTRL,
1419              misc_host_ctrl | MISC_HOST_CTRL_MASK_PCI_INT);
1420
1421         if (tp->link_config.phy_is_low_power == 0) {
1422                 tp->link_config.phy_is_low_power = 1;
1423                 tp->link_config.orig_speed = tp->link_config.speed;
1424                 tp->link_config.orig_duplex = tp->link_config.duplex;
1425                 tp->link_config.orig_autoneg = tp->link_config.autoneg;
1426         }
1427
1428         if (!(tp->tg3_flags2 & TG3_FLG2_ANY_SERDES)) {
1429                 tp->link_config.speed = SPEED_10;
1430                 tp->link_config.duplex = DUPLEX_HALF;
1431                 tp->link_config.autoneg = AUTONEG_ENABLE;
1432                 tg3_setup_phy(tp, 0);
1433         }
1434
1435         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
1436                 u32 val;
1437
1438                 val = tr32(GRC_VCPU_EXT_CTRL);
1439                 tw32(GRC_VCPU_EXT_CTRL, val | GRC_VCPU_EXT_CTRL_DISABLE_WOL);
1440         } else if (!(tp->tg3_flags & TG3_FLAG_ENABLE_ASF)) {
1441                 int i;
1442                 u32 val;
1443
1444                 for (i = 0; i < 200; i++) {
1445                         tg3_read_mem(tp, NIC_SRAM_FW_ASF_STATUS_MBOX, &val);
1446                         if (val == ~NIC_SRAM_FIRMWARE_MBOX_MAGIC1)
1447                                 break;
1448                         msleep(1);
1449                 }
1450         }
1451         if (tp->tg3_flags & TG3_FLAG_WOL_CAP)
1452                 tg3_write_mem(tp, NIC_SRAM_WOL_MBOX, WOL_SIGNATURE |
1453                                                      WOL_DRV_STATE_SHUTDOWN |
1454                                                      WOL_DRV_WOL |
1455                                                      WOL_SET_MAGIC_PKT);
1456
1457         pci_read_config_word(tp->pdev, pm + PCI_PM_PMC, &power_caps);
1458
1459         if (tp->tg3_flags & TG3_FLAG_WOL_ENABLE) {
1460                 u32 mac_mode;
1461
1462                 if (!(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)) {
1463                         tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x5a);
1464                         udelay(40);
1465
1466                         if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES)
1467                                 mac_mode = MAC_MODE_PORT_MODE_GMII;
1468                         else
1469                                 mac_mode = MAC_MODE_PORT_MODE_MII;
1470
1471                         mac_mode |= tp->mac_mode & MAC_MODE_LINK_POLARITY;
1472                         if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
1473                             ASIC_REV_5700) {
1474                                 u32 speed = (tp->tg3_flags &
1475                                              TG3_FLAG_WOL_SPEED_100MB) ?
1476                                              SPEED_100 : SPEED_10;
1477                                 if (tg3_5700_link_polarity(tp, speed))
1478                                         mac_mode |= MAC_MODE_LINK_POLARITY;
1479                                 else
1480                                         mac_mode &= ~MAC_MODE_LINK_POLARITY;
1481                         }
1482                 } else {
1483                         mac_mode = MAC_MODE_PORT_MODE_TBI;
1484                 }
1485
1486                 if (!(tp->tg3_flags2 & TG3_FLG2_5750_PLUS))
1487                         tw32(MAC_LED_CTRL, tp->led_ctrl);
1488
1489                 if (((power_caps & PCI_PM_CAP_PME_D3cold) &&
1490                      (tp->tg3_flags & TG3_FLAG_WOL_ENABLE)))
1491                         mac_mode |= MAC_MODE_MAGIC_PKT_ENABLE;
1492
1493                 tw32_f(MAC_MODE, mac_mode);
1494                 udelay(100);
1495
1496                 tw32_f(MAC_RX_MODE, RX_MODE_ENABLE);
1497                 udelay(10);
1498         }
1499
1500         if (!(tp->tg3_flags & TG3_FLAG_WOL_SPEED_100MB) &&
1501             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
1502              GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)) {
1503                 u32 base_val;
1504
1505                 base_val = tp->pci_clock_ctrl;
1506                 base_val |= (CLOCK_CTRL_RXCLK_DISABLE |
1507                              CLOCK_CTRL_TXCLK_DISABLE);
1508
1509                 tw32_wait_f(TG3PCI_CLOCK_CTRL, base_val | CLOCK_CTRL_ALTCLK |
1510                             CLOCK_CTRL_PWRDOWN_PLL133, 40);
1511         } else if ((tp->tg3_flags2 & TG3_FLG2_5780_CLASS) ||
1512                    (tp->tg3_flags & TG3_FLAG_CPMU_PRESENT) ||
1513                    (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)) {
1514                 /* do nothing */
1515         } else if (!((tp->tg3_flags2 & TG3_FLG2_5750_PLUS) &&
1516                      (tp->tg3_flags & TG3_FLAG_ENABLE_ASF))) {
1517                 u32 newbits1, newbits2;
1518
1519                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
1520                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
1521                         newbits1 = (CLOCK_CTRL_RXCLK_DISABLE |
1522                                     CLOCK_CTRL_TXCLK_DISABLE |
1523                                     CLOCK_CTRL_ALTCLK);
1524                         newbits2 = newbits1 | CLOCK_CTRL_44MHZ_CORE;
1525                 } else if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
1526                         newbits1 = CLOCK_CTRL_625_CORE;
1527                         newbits2 = newbits1 | CLOCK_CTRL_ALTCLK;
1528                 } else {
1529                         newbits1 = CLOCK_CTRL_ALTCLK;
1530                         newbits2 = newbits1 | CLOCK_CTRL_44MHZ_CORE;
1531                 }
1532
1533                 tw32_wait_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl | newbits1,
1534                             40);
1535
1536                 tw32_wait_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl | newbits2,
1537                             40);
1538
1539                 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
1540                         u32 newbits3;
1541
1542                         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
1543                             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
1544                                 newbits3 = (CLOCK_CTRL_RXCLK_DISABLE |
1545                                             CLOCK_CTRL_TXCLK_DISABLE |
1546                                             CLOCK_CTRL_44MHZ_CORE);
1547                         } else {
1548                                 newbits3 = CLOCK_CTRL_44MHZ_CORE;
1549                         }
1550
1551                         tw32_wait_f(TG3PCI_CLOCK_CTRL,
1552                                     tp->pci_clock_ctrl | newbits3, 40);
1553                 }
1554         }
1555
1556         if (!(tp->tg3_flags & TG3_FLAG_WOL_ENABLE) &&
1557             !(tp->tg3_flags & TG3_FLAG_ENABLE_ASF) &&
1558             !(tp->tg3_flags3 & TG3_FLG3_ENABLE_APE))
1559                 tg3_power_down_phy(tp);
1560
1561         tg3_frob_aux_power(tp);
1562
1563         /* Workaround for unstable PLL clock */
1564         if ((GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_AX) ||
1565             (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_BX)) {
1566                 u32 val = tr32(0x7d00);
1567
1568                 val &= ~((1 << 16) | (1 << 4) | (1 << 2) | (1 << 1) | 1);
1569                 tw32(0x7d00, val);
1570                 if (!(tp->tg3_flags & TG3_FLAG_ENABLE_ASF)) {
1571                         int err;
1572
1573                         err = tg3_nvram_lock(tp);
1574                         tg3_halt_cpu(tp, RX_CPU_BASE);
1575                         if (!err)
1576                                 tg3_nvram_unlock(tp);
1577                 }
1578         }
1579
1580         tg3_write_sig_post_reset(tp, RESET_KIND_SHUTDOWN);
1581
1582         /* Finally, set the new power state. */
1583         pci_write_config_word(tp->pdev, pm + PCI_PM_CTRL, power_control);
1584         udelay(100);    /* Delay after power state change */
1585
1586         return 0;
1587 }
1588
1589 static void tg3_link_report(struct tg3 *tp)
1590 {
1591         if (!netif_carrier_ok(tp->dev)) {
1592                 if (netif_msg_link(tp))
1593                         printk(KERN_INFO PFX "%s: Link is down.\n",
1594                                tp->dev->name);
1595         } else if (netif_msg_link(tp)) {
1596                 printk(KERN_INFO PFX "%s: Link is up at %d Mbps, %s duplex.\n",
1597                        tp->dev->name,
1598                        (tp->link_config.active_speed == SPEED_1000 ?
1599                         1000 :
1600                         (tp->link_config.active_speed == SPEED_100 ?
1601                          100 : 10)),
1602                        (tp->link_config.active_duplex == DUPLEX_FULL ?
1603                         "full" : "half"));
1604
1605                 printk(KERN_INFO PFX
1606                        "%s: Flow control is %s for TX and %s for RX.\n",
1607                        tp->dev->name,
1608                        (tp->link_config.active_flowctrl & TG3_FLOW_CTRL_TX) ?
1609                        "on" : "off",
1610                        (tp->link_config.active_flowctrl & TG3_FLOW_CTRL_RX) ?
1611                        "on" : "off");
1612         }
1613 }
1614
1615 static u16 tg3_advert_flowctrl_1000T(u8 flow_ctrl)
1616 {
1617         u16 miireg;
1618
1619         if ((flow_ctrl & TG3_FLOW_CTRL_TX) && (flow_ctrl & TG3_FLOW_CTRL_RX))
1620                 miireg = ADVERTISE_PAUSE_CAP;
1621         else if (flow_ctrl & TG3_FLOW_CTRL_TX)
1622                 miireg = ADVERTISE_PAUSE_ASYM;
1623         else if (flow_ctrl & TG3_FLOW_CTRL_RX)
1624                 miireg = ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM;
1625         else
1626                 miireg = 0;
1627
1628         return miireg;
1629 }
1630
1631 static u16 tg3_advert_flowctrl_1000X(u8 flow_ctrl)
1632 {
1633         u16 miireg;
1634
1635         if ((flow_ctrl & TG3_FLOW_CTRL_TX) && (flow_ctrl & TG3_FLOW_CTRL_RX))
1636                 miireg = ADVERTISE_1000XPAUSE;
1637         else if (flow_ctrl & TG3_FLOW_CTRL_TX)
1638                 miireg = ADVERTISE_1000XPSE_ASYM;
1639         else if (flow_ctrl & TG3_FLOW_CTRL_RX)
1640                 miireg = ADVERTISE_1000XPAUSE | ADVERTISE_1000XPSE_ASYM;
1641         else
1642                 miireg = 0;
1643
1644         return miireg;
1645 }
1646
1647 static u8 tg3_resolve_flowctrl_1000T(u16 lcladv, u16 rmtadv)
1648 {
1649         u8 cap = 0;
1650
1651         if (lcladv & ADVERTISE_PAUSE_CAP) {
1652                 if (lcladv & ADVERTISE_PAUSE_ASYM) {
1653                         if (rmtadv & LPA_PAUSE_CAP)
1654                                 cap = TG3_FLOW_CTRL_TX | TG3_FLOW_CTRL_RX;
1655                         else if (rmtadv & LPA_PAUSE_ASYM)
1656                                 cap = TG3_FLOW_CTRL_RX;
1657                 } else {
1658                         if (rmtadv & LPA_PAUSE_CAP)
1659                                 cap = TG3_FLOW_CTRL_TX | TG3_FLOW_CTRL_RX;
1660                 }
1661         } else if (lcladv & ADVERTISE_PAUSE_ASYM) {
1662                 if ((rmtadv & LPA_PAUSE_CAP) && (rmtadv & LPA_PAUSE_ASYM))
1663                         cap = TG3_FLOW_CTRL_TX;
1664         }
1665
1666         return cap;
1667 }
1668
1669 static u8 tg3_resolve_flowctrl_1000X(u16 lcladv, u16 rmtadv)
1670 {
1671         u8 cap = 0;
1672
1673         if (lcladv & ADVERTISE_1000XPAUSE) {
1674                 if (lcladv & ADVERTISE_1000XPSE_ASYM) {
1675                         if (rmtadv & LPA_1000XPAUSE)
1676                                 cap = TG3_FLOW_CTRL_TX | TG3_FLOW_CTRL_RX;
1677                         else if (rmtadv & LPA_1000XPAUSE_ASYM)
1678                                 cap = TG3_FLOW_CTRL_RX;
1679                 } else {
1680                         if (rmtadv & LPA_1000XPAUSE)
1681                                 cap = TG3_FLOW_CTRL_TX | TG3_FLOW_CTRL_RX;
1682                 }
1683         } else if (lcladv & ADVERTISE_1000XPSE_ASYM) {
1684                 if ((rmtadv & LPA_1000XPAUSE) && (rmtadv & LPA_1000XPAUSE_ASYM))
1685                         cap = TG3_FLOW_CTRL_TX;
1686         }
1687
1688         return cap;
1689 }
1690
1691 static void tg3_setup_flow_control(struct tg3 *tp, u32 local_adv, u32 remote_adv)
1692 {
1693         u8 new_tg3_flags = 0;
1694         u32 old_rx_mode = tp->rx_mode;
1695         u32 old_tx_mode = tp->tx_mode;
1696
1697         if (tp->tg3_flags & TG3_FLAG_PAUSE_AUTONEG) {
1698                 if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES)
1699                         new_tg3_flags = tg3_resolve_flowctrl_1000X(local_adv,
1700                                                                    remote_adv);
1701                 else
1702                         new_tg3_flags = tg3_resolve_flowctrl_1000T(local_adv,
1703                                                                    remote_adv);
1704         } else {
1705                 new_tg3_flags = tp->link_config.flowctrl;
1706         }
1707
1708         tp->link_config.active_flowctrl = new_tg3_flags;
1709
1710         if (new_tg3_flags & TG3_FLOW_CTRL_RX)
1711                 tp->rx_mode |= RX_MODE_FLOW_CTRL_ENABLE;
1712         else
1713                 tp->rx_mode &= ~RX_MODE_FLOW_CTRL_ENABLE;
1714
1715         if (old_rx_mode != tp->rx_mode) {
1716                 tw32_f(MAC_RX_MODE, tp->rx_mode);
1717         }
1718
1719         if (new_tg3_flags & TG3_FLOW_CTRL_TX)
1720                 tp->tx_mode |= TX_MODE_FLOW_CTRL_ENABLE;
1721         else
1722                 tp->tx_mode &= ~TX_MODE_FLOW_CTRL_ENABLE;
1723
1724         if (old_tx_mode != tp->tx_mode) {
1725                 tw32_f(MAC_TX_MODE, tp->tx_mode);
1726         }
1727 }
1728
1729 static void tg3_aux_stat_to_speed_duplex(struct tg3 *tp, u32 val, u16 *speed, u8 *duplex)
1730 {
1731         switch (val & MII_TG3_AUX_STAT_SPDMASK) {
1732         case MII_TG3_AUX_STAT_10HALF:
1733                 *speed = SPEED_10;
1734                 *duplex = DUPLEX_HALF;
1735                 break;
1736
1737         case MII_TG3_AUX_STAT_10FULL:
1738                 *speed = SPEED_10;
1739                 *duplex = DUPLEX_FULL;
1740                 break;
1741
1742         case MII_TG3_AUX_STAT_100HALF:
1743                 *speed = SPEED_100;
1744                 *duplex = DUPLEX_HALF;
1745                 break;
1746
1747         case MII_TG3_AUX_STAT_100FULL:
1748                 *speed = SPEED_100;
1749                 *duplex = DUPLEX_FULL;
1750                 break;
1751
1752         case MII_TG3_AUX_STAT_1000HALF:
1753                 *speed = SPEED_1000;
1754                 *duplex = DUPLEX_HALF;
1755                 break;
1756
1757         case MII_TG3_AUX_STAT_1000FULL:
1758                 *speed = SPEED_1000;
1759                 *duplex = DUPLEX_FULL;
1760                 break;
1761
1762         default:
1763                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
1764                         *speed = (val & MII_TG3_AUX_STAT_100) ? SPEED_100 :
1765                                  SPEED_10;
1766                         *duplex = (val & MII_TG3_AUX_STAT_FULL) ? DUPLEX_FULL :
1767                                   DUPLEX_HALF;
1768                         break;
1769                 }
1770                 *speed = SPEED_INVALID;
1771                 *duplex = DUPLEX_INVALID;
1772                 break;
1773         };
1774 }
1775
1776 static void tg3_phy_copper_begin(struct tg3 *tp)
1777 {
1778         u32 new_adv;
1779         int i;
1780
1781         if (tp->link_config.phy_is_low_power) {
1782                 /* Entering low power mode.  Disable gigabit and
1783                  * 100baseT advertisements.
1784                  */
1785                 tg3_writephy(tp, MII_TG3_CTRL, 0);
1786
1787                 new_adv = (ADVERTISE_10HALF | ADVERTISE_10FULL |
1788                            ADVERTISE_CSMA | ADVERTISE_PAUSE_CAP);
1789                 if (tp->tg3_flags & TG3_FLAG_WOL_SPEED_100MB)
1790                         new_adv |= (ADVERTISE_100HALF | ADVERTISE_100FULL);
1791
1792                 tg3_writephy(tp, MII_ADVERTISE, new_adv);
1793         } else if (tp->link_config.speed == SPEED_INVALID) {
1794                 if (tp->tg3_flags & TG3_FLAG_10_100_ONLY)
1795                         tp->link_config.advertising &=
1796                                 ~(ADVERTISED_1000baseT_Half |
1797                                   ADVERTISED_1000baseT_Full);
1798
1799                 new_adv = ADVERTISE_CSMA;
1800                 if (tp->link_config.advertising & ADVERTISED_10baseT_Half)
1801                         new_adv |= ADVERTISE_10HALF;
1802                 if (tp->link_config.advertising & ADVERTISED_10baseT_Full)
1803                         new_adv |= ADVERTISE_10FULL;
1804                 if (tp->link_config.advertising & ADVERTISED_100baseT_Half)
1805                         new_adv |= ADVERTISE_100HALF;
1806                 if (tp->link_config.advertising & ADVERTISED_100baseT_Full)
1807                         new_adv |= ADVERTISE_100FULL;
1808
1809                 new_adv |= tg3_advert_flowctrl_1000T(tp->link_config.flowctrl);
1810
1811                 tg3_writephy(tp, MII_ADVERTISE, new_adv);
1812
1813                 if (tp->link_config.advertising &
1814                     (ADVERTISED_1000baseT_Half | ADVERTISED_1000baseT_Full)) {
1815                         new_adv = 0;
1816                         if (tp->link_config.advertising & ADVERTISED_1000baseT_Half)
1817                                 new_adv |= MII_TG3_CTRL_ADV_1000_HALF;
1818                         if (tp->link_config.advertising & ADVERTISED_1000baseT_Full)
1819                                 new_adv |= MII_TG3_CTRL_ADV_1000_FULL;
1820                         if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY) &&
1821                             (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
1822                              tp->pci_chip_rev_id == CHIPREV_ID_5701_B0))
1823                                 new_adv |= (MII_TG3_CTRL_AS_MASTER |
1824                                             MII_TG3_CTRL_ENABLE_AS_MASTER);
1825                         tg3_writephy(tp, MII_TG3_CTRL, new_adv);
1826                 } else {
1827                         tg3_writephy(tp, MII_TG3_CTRL, 0);
1828                 }
1829         } else {
1830                 new_adv = tg3_advert_flowctrl_1000T(tp->link_config.flowctrl);
1831                 new_adv |= ADVERTISE_CSMA;
1832
1833                 /* Asking for a specific link mode. */
1834                 if (tp->link_config.speed == SPEED_1000) {
1835                         tg3_writephy(tp, MII_ADVERTISE, new_adv);
1836
1837                         if (tp->link_config.duplex == DUPLEX_FULL)
1838                                 new_adv = MII_TG3_CTRL_ADV_1000_FULL;
1839                         else
1840                                 new_adv = MII_TG3_CTRL_ADV_1000_HALF;
1841                         if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
1842                             tp->pci_chip_rev_id == CHIPREV_ID_5701_B0)
1843                                 new_adv |= (MII_TG3_CTRL_AS_MASTER |
1844                                             MII_TG3_CTRL_ENABLE_AS_MASTER);
1845                 } else {
1846                         if (tp->link_config.speed == SPEED_100) {
1847                                 if (tp->link_config.duplex == DUPLEX_FULL)
1848                                         new_adv |= ADVERTISE_100FULL;
1849                                 else
1850                                         new_adv |= ADVERTISE_100HALF;
1851                         } else {
1852                                 if (tp->link_config.duplex == DUPLEX_FULL)
1853                                         new_adv |= ADVERTISE_10FULL;
1854                                 else
1855                                         new_adv |= ADVERTISE_10HALF;
1856                         }
1857                         tg3_writephy(tp, MII_ADVERTISE, new_adv);
1858
1859                         new_adv = 0;
1860                 }
1861
1862                 tg3_writephy(tp, MII_TG3_CTRL, new_adv);
1863         }
1864
1865         if (tp->link_config.autoneg == AUTONEG_DISABLE &&
1866             tp->link_config.speed != SPEED_INVALID) {
1867                 u32 bmcr, orig_bmcr;
1868
1869                 tp->link_config.active_speed = tp->link_config.speed;
1870                 tp->link_config.active_duplex = tp->link_config.duplex;
1871
1872                 bmcr = 0;
1873                 switch (tp->link_config.speed) {
1874                 default:
1875                 case SPEED_10:
1876                         break;
1877
1878                 case SPEED_100:
1879                         bmcr |= BMCR_SPEED100;
1880                         break;
1881
1882                 case SPEED_1000:
1883                         bmcr |= TG3_BMCR_SPEED1000;
1884                         break;
1885                 };
1886
1887                 if (tp->link_config.duplex == DUPLEX_FULL)
1888                         bmcr |= BMCR_FULLDPLX;
1889
1890                 if (!tg3_readphy(tp, MII_BMCR, &orig_bmcr) &&
1891                     (bmcr != orig_bmcr)) {
1892                         tg3_writephy(tp, MII_BMCR, BMCR_LOOPBACK);
1893                         for (i = 0; i < 1500; i++) {
1894                                 u32 tmp;
1895
1896                                 udelay(10);
1897                                 if (tg3_readphy(tp, MII_BMSR, &tmp) ||
1898                                     tg3_readphy(tp, MII_BMSR, &tmp))
1899                                         continue;
1900                                 if (!(tmp & BMSR_LSTATUS)) {
1901                                         udelay(40);
1902                                         break;
1903                                 }
1904                         }
1905                         tg3_writephy(tp, MII_BMCR, bmcr);
1906                         udelay(40);
1907                 }
1908         } else {
1909                 tg3_writephy(tp, MII_BMCR,
1910                              BMCR_ANENABLE | BMCR_ANRESTART);
1911         }
1912 }
1913
1914 static int tg3_init_5401phy_dsp(struct tg3 *tp)
1915 {
1916         int err;
1917
1918         /* Turn off tap power management. */
1919         /* Set Extended packet length bit */
1920         err  = tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x4c20);
1921
1922         err |= tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x0012);
1923         err |= tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x1804);
1924
1925         err |= tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x0013);
1926         err |= tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x1204);
1927
1928         err |= tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8006);
1929         err |= tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0132);
1930
1931         err |= tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8006);
1932         err |= tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0232);
1933
1934         err |= tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x201f);
1935         err |= tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0a20);
1936
1937         udelay(40);
1938
1939         return err;
1940 }
1941
1942 static int tg3_copper_is_advertising_all(struct tg3 *tp, u32 mask)
1943 {
1944         u32 adv_reg, all_mask = 0;
1945
1946         if (mask & ADVERTISED_10baseT_Half)
1947                 all_mask |= ADVERTISE_10HALF;
1948         if (mask & ADVERTISED_10baseT_Full)
1949                 all_mask |= ADVERTISE_10FULL;
1950         if (mask & ADVERTISED_100baseT_Half)
1951                 all_mask |= ADVERTISE_100HALF;
1952         if (mask & ADVERTISED_100baseT_Full)
1953                 all_mask |= ADVERTISE_100FULL;
1954
1955         if (tg3_readphy(tp, MII_ADVERTISE, &adv_reg))
1956                 return 0;
1957
1958         if ((adv_reg & all_mask) != all_mask)
1959                 return 0;
1960         if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY)) {
1961                 u32 tg3_ctrl;
1962
1963                 all_mask = 0;
1964                 if (mask & ADVERTISED_1000baseT_Half)
1965                         all_mask |= ADVERTISE_1000HALF;
1966                 if (mask & ADVERTISED_1000baseT_Full)
1967                         all_mask |= ADVERTISE_1000FULL;
1968
1969                 if (tg3_readphy(tp, MII_TG3_CTRL, &tg3_ctrl))
1970                         return 0;
1971
1972                 if ((tg3_ctrl & all_mask) != all_mask)
1973                         return 0;
1974         }
1975         return 1;
1976 }
1977
1978 static int tg3_setup_copper_phy(struct tg3 *tp, int force_reset)
1979 {
1980         int current_link_up;
1981         u32 bmsr, dummy;
1982         u16 current_speed;
1983         u8 current_duplex;
1984         int i, err;
1985
1986         tw32(MAC_EVENT, 0);
1987
1988         tw32_f(MAC_STATUS,
1989              (MAC_STATUS_SYNC_CHANGED |
1990               MAC_STATUS_CFG_CHANGED |
1991               MAC_STATUS_MI_COMPLETION |
1992               MAC_STATUS_LNKSTATE_CHANGED));
1993         udelay(40);
1994
1995         tp->mi_mode = MAC_MI_MODE_BASE;
1996         tw32_f(MAC_MI_MODE, tp->mi_mode);
1997         udelay(80);
1998
1999         tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x02);
2000
2001         /* Some third-party PHYs need to be reset on link going
2002          * down.
2003          */
2004         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
2005              GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
2006              GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) &&
2007             netif_carrier_ok(tp->dev)) {
2008                 tg3_readphy(tp, MII_BMSR, &bmsr);
2009                 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
2010                     !(bmsr & BMSR_LSTATUS))
2011                         force_reset = 1;
2012         }
2013         if (force_reset)
2014                 tg3_phy_reset(tp);
2015
2016         if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5401) {
2017                 tg3_readphy(tp, MII_BMSR, &bmsr);
2018                 if (tg3_readphy(tp, MII_BMSR, &bmsr) ||
2019                     !(tp->tg3_flags & TG3_FLAG_INIT_COMPLETE))
2020                         bmsr = 0;
2021
2022                 if (!(bmsr & BMSR_LSTATUS)) {
2023                         err = tg3_init_5401phy_dsp(tp);
2024                         if (err)
2025                                 return err;
2026
2027                         tg3_readphy(tp, MII_BMSR, &bmsr);
2028                         for (i = 0; i < 1000; i++) {
2029                                 udelay(10);
2030                                 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
2031                                     (bmsr & BMSR_LSTATUS)) {
2032                                         udelay(40);
2033                                         break;
2034                                 }
2035                         }
2036
2037                         if ((tp->phy_id & PHY_ID_REV_MASK) == PHY_REV_BCM5401_B0 &&
2038                             !(bmsr & BMSR_LSTATUS) &&
2039                             tp->link_config.active_speed == SPEED_1000) {
2040                                 err = tg3_phy_reset(tp);
2041                                 if (!err)
2042                                         err = tg3_init_5401phy_dsp(tp);
2043                                 if (err)
2044                                         return err;
2045                         }
2046                 }
2047         } else if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
2048                    tp->pci_chip_rev_id == CHIPREV_ID_5701_B0) {
2049                 /* 5701 {A0,B0} CRC bug workaround */
2050                 tg3_writephy(tp, 0x15, 0x0a75);
2051                 tg3_writephy(tp, 0x1c, 0x8c68);
2052                 tg3_writephy(tp, 0x1c, 0x8d68);
2053                 tg3_writephy(tp, 0x1c, 0x8c68);
2054         }
2055
2056         /* Clear pending interrupts... */
2057         tg3_readphy(tp, MII_TG3_ISTAT, &dummy);
2058         tg3_readphy(tp, MII_TG3_ISTAT, &dummy);
2059
2060         if (tp->tg3_flags & TG3_FLAG_USE_MI_INTERRUPT)
2061                 tg3_writephy(tp, MII_TG3_IMASK, ~MII_TG3_INT_LINKCHG);
2062         else if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5906)
2063                 tg3_writephy(tp, MII_TG3_IMASK, ~0);
2064
2065         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
2066             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
2067                 if (tp->led_ctrl == LED_CTRL_MODE_PHY_1)
2068                         tg3_writephy(tp, MII_TG3_EXT_CTRL,
2069                                      MII_TG3_EXT_CTRL_LNK3_LED_MODE);
2070                 else
2071                         tg3_writephy(tp, MII_TG3_EXT_CTRL, 0);
2072         }
2073
2074         current_link_up = 0;
2075         current_speed = SPEED_INVALID;
2076         current_duplex = DUPLEX_INVALID;
2077
2078         if (tp->tg3_flags2 & TG3_FLG2_CAPACITIVE_COUPLING) {
2079                 u32 val;
2080
2081                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x4007);
2082                 tg3_readphy(tp, MII_TG3_AUX_CTRL, &val);
2083                 if (!(val & (1 << 10))) {
2084                         val |= (1 << 10);
2085                         tg3_writephy(tp, MII_TG3_AUX_CTRL, val);
2086                         goto relink;
2087                 }
2088         }
2089
2090         bmsr = 0;
2091         for (i = 0; i < 100; i++) {
2092                 tg3_readphy(tp, MII_BMSR, &bmsr);
2093                 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
2094                     (bmsr & BMSR_LSTATUS))
2095                         break;
2096                 udelay(40);
2097         }
2098
2099         if (bmsr & BMSR_LSTATUS) {
2100                 u32 aux_stat, bmcr;
2101
2102                 tg3_readphy(tp, MII_TG3_AUX_STAT, &aux_stat);
2103                 for (i = 0; i < 2000; i++) {
2104                         udelay(10);
2105                         if (!tg3_readphy(tp, MII_TG3_AUX_STAT, &aux_stat) &&
2106                             aux_stat)
2107                                 break;
2108                 }
2109
2110                 tg3_aux_stat_to_speed_duplex(tp, aux_stat,
2111                                              &current_speed,
2112                                              &current_duplex);
2113
2114                 bmcr = 0;
2115                 for (i = 0; i < 200; i++) {
2116                         tg3_readphy(tp, MII_BMCR, &bmcr);
2117                         if (tg3_readphy(tp, MII_BMCR, &bmcr))
2118                                 continue;
2119                         if (bmcr && bmcr != 0x7fff)
2120                                 break;
2121                         udelay(10);
2122                 }
2123
2124                 if (tp->link_config.autoneg == AUTONEG_ENABLE) {
2125                         if (bmcr & BMCR_ANENABLE) {
2126                                 current_link_up = 1;
2127
2128                                 /* Force autoneg restart if we are exiting
2129                                  * low power mode.
2130                                  */
2131                                 if (!tg3_copper_is_advertising_all(tp,
2132                                                 tp->link_config.advertising))
2133                                         current_link_up = 0;
2134                         } else {
2135                                 current_link_up = 0;
2136                         }
2137                 } else {
2138                         if (!(bmcr & BMCR_ANENABLE) &&
2139                             tp->link_config.speed == current_speed &&
2140                             tp->link_config.duplex == current_duplex) {
2141                                 current_link_up = 1;
2142                         } else {
2143                                 current_link_up = 0;
2144                         }
2145                 }
2146
2147                 tp->link_config.active_speed = current_speed;
2148                 tp->link_config.active_duplex = current_duplex;
2149         }
2150
2151         if (current_link_up == 1 &&
2152             (tp->link_config.active_duplex == DUPLEX_FULL) &&
2153             (tp->link_config.autoneg == AUTONEG_ENABLE)) {
2154                 u32 local_adv, remote_adv;
2155
2156                 if (tg3_readphy(tp, MII_ADVERTISE, &local_adv))
2157                         local_adv = 0;
2158
2159                 if (tg3_readphy(tp, MII_LPA, &remote_adv))
2160                         remote_adv = 0;
2161
2162                 /* If we are not advertising what has been requested,
2163                  * bring the link down and reconfigure.
2164                  */
2165                 if (local_adv !=
2166                     tg3_advert_flowctrl_1000T(tp->link_config.flowctrl)) {
2167                         current_link_up = 0;
2168                 } else {
2169                         tg3_setup_flow_control(tp, local_adv, remote_adv);
2170                 }
2171         }
2172 relink:
2173         if (current_link_up == 0 || tp->link_config.phy_is_low_power) {
2174                 u32 tmp;
2175
2176                 tg3_phy_copper_begin(tp);
2177
2178                 tg3_readphy(tp, MII_BMSR, &tmp);
2179                 if (!tg3_readphy(tp, MII_BMSR, &tmp) &&
2180                     (tmp & BMSR_LSTATUS))
2181                         current_link_up = 1;
2182         }
2183
2184         tp->mac_mode &= ~MAC_MODE_PORT_MODE_MASK;
2185         if (current_link_up == 1) {
2186                 if (tp->link_config.active_speed == SPEED_100 ||
2187                     tp->link_config.active_speed == SPEED_10)
2188                         tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
2189                 else
2190                         tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
2191         } else
2192                 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
2193
2194         tp->mac_mode &= ~MAC_MODE_HALF_DUPLEX;
2195         if (tp->link_config.active_duplex == DUPLEX_HALF)
2196                 tp->mac_mode |= MAC_MODE_HALF_DUPLEX;
2197
2198         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700) {
2199                 if (current_link_up == 1 &&
2200                     tg3_5700_link_polarity(tp, tp->link_config.active_speed))
2201                         tp->mac_mode |= MAC_MODE_LINK_POLARITY;
2202                 else
2203                         tp->mac_mode &= ~MAC_MODE_LINK_POLARITY;
2204         }
2205
2206         /* ??? Without this setting Netgear GA302T PHY does not
2207          * ??? send/receive packets...
2208          */
2209         if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5411 &&
2210             tp->pci_chip_rev_id == CHIPREV_ID_5700_ALTIMA) {
2211                 tp->mi_mode |= MAC_MI_MODE_AUTO_POLL;
2212                 tw32_f(MAC_MI_MODE, tp->mi_mode);
2213                 udelay(80);
2214         }
2215
2216         tw32_f(MAC_MODE, tp->mac_mode);
2217         udelay(40);
2218
2219         if (tp->tg3_flags & TG3_FLAG_USE_LINKCHG_REG) {
2220                 /* Polled via timer. */
2221                 tw32_f(MAC_EVENT, 0);
2222         } else {
2223                 tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
2224         }
2225         udelay(40);
2226
2227         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 &&
2228             current_link_up == 1 &&
2229             tp->link_config.active_speed == SPEED_1000 &&
2230             ((tp->tg3_flags & TG3_FLAG_PCIX_MODE) ||
2231              (tp->tg3_flags & TG3_FLAG_PCI_HIGH_SPEED))) {
2232                 udelay(120);
2233                 tw32_f(MAC_STATUS,
2234                      (MAC_STATUS_SYNC_CHANGED |
2235                       MAC_STATUS_CFG_CHANGED));
2236                 udelay(40);
2237                 tg3_write_mem(tp,
2238                               NIC_SRAM_FIRMWARE_MBOX,
2239                               NIC_SRAM_FIRMWARE_MBOX_MAGIC2);
2240         }
2241
2242         if (current_link_up != netif_carrier_ok(tp->dev)) {
2243                 if (current_link_up)
2244                         netif_carrier_on(tp->dev);
2245                 else
2246                         netif_carrier_off(tp->dev);
2247                 tg3_link_report(tp);
2248         }
2249
2250         return 0;
2251 }
2252
2253 struct tg3_fiber_aneginfo {
2254         int state;
2255 #define ANEG_STATE_UNKNOWN              0
2256 #define ANEG_STATE_AN_ENABLE            1
2257 #define ANEG_STATE_RESTART_INIT         2
2258 #define ANEG_STATE_RESTART              3
2259 #define ANEG_STATE_DISABLE_LINK_OK      4
2260 #define ANEG_STATE_ABILITY_DETECT_INIT  5
2261 #define ANEG_STATE_ABILITY_DETECT       6
2262 #define ANEG_STATE_ACK_DETECT_INIT      7
2263 #define ANEG_STATE_ACK_DETECT           8
2264 #define ANEG_STATE_COMPLETE_ACK_INIT    9
2265 #define ANEG_STATE_COMPLETE_ACK         10
2266 #define ANEG_STATE_IDLE_DETECT_INIT     11
2267 #define ANEG_STATE_IDLE_DETECT          12
2268 #define ANEG_STATE_LINK_OK              13
2269 #define ANEG_STATE_NEXT_PAGE_WAIT_INIT  14
2270 #define ANEG_STATE_NEXT_PAGE_WAIT       15
2271
2272         u32 flags;
2273 #define MR_AN_ENABLE            0x00000001
2274 #define MR_RESTART_AN           0x00000002
2275 #define MR_AN_COMPLETE          0x00000004
2276 #define MR_PAGE_RX              0x00000008
2277 #define MR_NP_LOADED            0x00000010
2278 #define MR_TOGGLE_TX            0x00000020
2279 #define MR_LP_ADV_FULL_DUPLEX   0x00000040
2280 #define MR_LP_ADV_HALF_DUPLEX   0x00000080
2281 #define MR_LP_ADV_SYM_PAUSE     0x00000100
2282 #define MR_LP_ADV_ASYM_PAUSE    0x00000200
2283 #define MR_LP_ADV_REMOTE_FAULT1 0x00000400
2284 #define MR_LP_ADV_REMOTE_FAULT2 0x00000800
2285 #define MR_LP_ADV_NEXT_PAGE     0x00001000
2286 #define MR_TOGGLE_RX            0x00002000
2287 #define MR_NP_RX                0x00004000
2288
2289 #define MR_LINK_OK              0x80000000
2290
2291         unsigned long link_time, cur_time;
2292
2293         u32 ability_match_cfg;
2294         int ability_match_count;
2295
2296         char ability_match, idle_match, ack_match;
2297
2298         u32 txconfig, rxconfig;
2299 #define ANEG_CFG_NP             0x00000080
2300 #define ANEG_CFG_ACK            0x00000040
2301 #define ANEG_CFG_RF2            0x00000020
2302 #define ANEG_CFG_RF1            0x00000010
2303 #define ANEG_CFG_PS2            0x00000001
2304 #define ANEG_CFG_PS1            0x00008000
2305 #define ANEG_CFG_HD             0x00004000
2306 #define ANEG_CFG_FD             0x00002000
2307 #define ANEG_CFG_INVAL          0x00001f06
2308
2309 };
2310 #define ANEG_OK         0
2311 #define ANEG_DONE       1
2312 #define ANEG_TIMER_ENAB 2
2313 #define ANEG_FAILED     -1
2314
2315 #define ANEG_STATE_SETTLE_TIME  10000
2316
2317 static int tg3_fiber_aneg_smachine(struct tg3 *tp,
2318                                    struct tg3_fiber_aneginfo *ap)
2319 {
2320         unsigned long delta;
2321         u32 rx_cfg_reg;
2322         int ret;
2323
2324         if (ap->state == ANEG_STATE_UNKNOWN) {
2325                 ap->rxconfig = 0;
2326                 ap->link_time = 0;
2327                 ap->cur_time = 0;
2328                 ap->ability_match_cfg = 0;
2329                 ap->ability_match_count = 0;
2330                 ap->ability_match = 0;
2331                 ap->idle_match = 0;
2332                 ap->ack_match = 0;
2333         }
2334         ap->cur_time++;
2335
2336         if (tr32(MAC_STATUS) & MAC_STATUS_RCVD_CFG) {
2337                 rx_cfg_reg = tr32(MAC_RX_AUTO_NEG);
2338
2339                 if (rx_cfg_reg != ap->ability_match_cfg) {
2340                         ap->ability_match_cfg = rx_cfg_reg;
2341                         ap->ability_match = 0;
2342                         ap->ability_match_count = 0;
2343                 } else {
2344                         if (++ap->ability_match_count > 1) {
2345                                 ap->ability_match = 1;
2346                                 ap->ability_match_cfg = rx_cfg_reg;
2347                         }
2348                 }
2349                 if (rx_cfg_reg & ANEG_CFG_ACK)
2350                         ap->ack_match = 1;
2351                 else
2352                         ap->ack_match = 0;
2353
2354                 ap->idle_match = 0;
2355         } else {
2356                 ap->idle_match = 1;
2357                 ap->ability_match_cfg = 0;
2358                 ap->ability_match_count = 0;
2359                 ap->ability_match = 0;
2360                 ap->ack_match = 0;
2361
2362                 rx_cfg_reg = 0;
2363         }
2364
2365         ap->rxconfig = rx_cfg_reg;
2366         ret = ANEG_OK;
2367
2368         switch(ap->state) {
2369         case ANEG_STATE_UNKNOWN:
2370                 if (ap->flags & (MR_AN_ENABLE | MR_RESTART_AN))
2371                         ap->state = ANEG_STATE_AN_ENABLE;
2372
2373                 /* fallthru */
2374         case ANEG_STATE_AN_ENABLE:
2375                 ap->flags &= ~(MR_AN_COMPLETE | MR_PAGE_RX);
2376                 if (ap->flags & MR_AN_ENABLE) {
2377                         ap->link_time = 0;
2378                         ap->cur_time = 0;
2379                         ap->ability_match_cfg = 0;
2380                         ap->ability_match_count = 0;
2381                         ap->ability_match = 0;
2382                         ap->idle_match = 0;
2383                         ap->ack_match = 0;
2384
2385                         ap->state = ANEG_STATE_RESTART_INIT;
2386                 } else {
2387                         ap->state = ANEG_STATE_DISABLE_LINK_OK;
2388                 }
2389                 break;
2390
2391         case ANEG_STATE_RESTART_INIT:
2392                 ap->link_time = ap->cur_time;
2393                 ap->flags &= ~(MR_NP_LOADED);
2394                 ap->txconfig = 0;
2395                 tw32(MAC_TX_AUTO_NEG, 0);
2396                 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
2397                 tw32_f(MAC_MODE, tp->mac_mode);
2398                 udelay(40);
2399
2400                 ret = ANEG_TIMER_ENAB;
2401                 ap->state = ANEG_STATE_RESTART;
2402
2403                 /* fallthru */
2404         case ANEG_STATE_RESTART:
2405                 delta = ap->cur_time - ap->link_time;
2406                 if (delta > ANEG_STATE_SETTLE_TIME) {
2407                         ap->state = ANEG_STATE_ABILITY_DETECT_INIT;
2408                 } else {
2409                         ret = ANEG_TIMER_ENAB;
2410                 }
2411                 break;
2412
2413         case ANEG_STATE_DISABLE_LINK_OK:
2414                 ret = ANEG_DONE;
2415                 break;
2416
2417         case ANEG_STATE_ABILITY_DETECT_INIT:
2418                 ap->flags &= ~(MR_TOGGLE_TX);
2419                 ap->txconfig = (ANEG_CFG_FD | ANEG_CFG_PS1);
2420                 tw32(MAC_TX_AUTO_NEG, ap->txconfig);
2421                 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
2422                 tw32_f(MAC_MODE, tp->mac_mode);
2423                 udelay(40);
2424
2425                 ap->state = ANEG_STATE_ABILITY_DETECT;
2426                 break;
2427
2428         case ANEG_STATE_ABILITY_DETECT:
2429                 if (ap->ability_match != 0 && ap->rxconfig != 0) {
2430                         ap->state = ANEG_STATE_ACK_DETECT_INIT;
2431                 }
2432                 break;
2433
2434         case ANEG_STATE_ACK_DETECT_INIT:
2435                 ap->txconfig |= ANEG_CFG_ACK;
2436                 tw32(MAC_TX_AUTO_NEG, ap->txconfig);
2437                 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
2438                 tw32_f(MAC_MODE, tp->mac_mode);
2439                 udelay(40);
2440
2441                 ap->state = ANEG_STATE_ACK_DETECT;
2442
2443                 /* fallthru */
2444         case ANEG_STATE_ACK_DETECT:
2445                 if (ap->ack_match != 0) {
2446                         if ((ap->rxconfig & ~ANEG_CFG_ACK) ==
2447                             (ap->ability_match_cfg & ~ANEG_CFG_ACK)) {
2448                                 ap->state = ANEG_STATE_COMPLETE_ACK_INIT;
2449                         } else {
2450                                 ap->state = ANEG_STATE_AN_ENABLE;
2451                         }
2452                 } else if (ap->ability_match != 0 &&
2453                            ap->rxconfig == 0) {
2454                         ap->state = ANEG_STATE_AN_ENABLE;
2455                 }
2456                 break;
2457
2458         case ANEG_STATE_COMPLETE_ACK_INIT:
2459                 if (ap->rxconfig & ANEG_CFG_INVAL) {
2460                         ret = ANEG_FAILED;
2461                         break;
2462                 }
2463                 ap->flags &= ~(MR_LP_ADV_FULL_DUPLEX |
2464                                MR_LP_ADV_HALF_DUPLEX |
2465                                MR_LP_ADV_SYM_PAUSE |
2466                                MR_LP_ADV_ASYM_PAUSE |
2467                                MR_LP_ADV_REMOTE_FAULT1 |
2468                                MR_LP_ADV_REMOTE_FAULT2 |
2469                                MR_LP_ADV_NEXT_PAGE |
2470                                MR_TOGGLE_RX |
2471                                MR_NP_RX);
2472                 if (ap->rxconfig & ANEG_CFG_FD)
2473                         ap->flags |= MR_LP_ADV_FULL_DUPLEX;
2474                 if (ap->rxconfig & ANEG_CFG_HD)
2475                         ap->flags |= MR_LP_ADV_HALF_DUPLEX;
2476                 if (ap->rxconfig & ANEG_CFG_PS1)
2477                         ap->flags |= MR_LP_ADV_SYM_PAUSE;
2478                 if (ap->rxconfig & ANEG_CFG_PS2)
2479                         ap->flags |= MR_LP_ADV_ASYM_PAUSE;
2480                 if (ap->rxconfig & ANEG_CFG_RF1)
2481                         ap->flags |= MR_LP_ADV_REMOTE_FAULT1;
2482                 if (ap->rxconfig & ANEG_CFG_RF2)
2483                         ap->flags |= MR_LP_ADV_REMOTE_FAULT2;
2484                 if (ap->rxconfig & ANEG_CFG_NP)
2485                         ap->flags |= MR_LP_ADV_NEXT_PAGE;
2486
2487                 ap->link_time = ap->cur_time;
2488
2489                 ap->flags ^= (MR_TOGGLE_TX);
2490                 if (ap->rxconfig & 0x0008)
2491                         ap->flags |= MR_TOGGLE_RX;
2492                 if (ap->rxconfig & ANEG_CFG_NP)
2493                         ap->flags |= MR_NP_RX;
2494                 ap->flags |= MR_PAGE_RX;
2495
2496                 ap->state = ANEG_STATE_COMPLETE_ACK;
2497                 ret = ANEG_TIMER_ENAB;
2498                 break;
2499
2500         case ANEG_STATE_COMPLETE_ACK:
2501                 if (ap->ability_match != 0 &&
2502                     ap->rxconfig == 0) {
2503                         ap->state = ANEG_STATE_AN_ENABLE;
2504                         break;
2505                 }
2506                 delta = ap->cur_time - ap->link_time;
2507                 if (delta > ANEG_STATE_SETTLE_TIME) {
2508                         if (!(ap->flags & (MR_LP_ADV_NEXT_PAGE))) {
2509                                 ap->state = ANEG_STATE_IDLE_DETECT_INIT;
2510                         } else {
2511                                 if ((ap->txconfig & ANEG_CFG_NP) == 0 &&
2512                                     !(ap->flags & MR_NP_RX)) {
2513                                         ap->state = ANEG_STATE_IDLE_DETECT_INIT;
2514                                 } else {
2515                                         ret = ANEG_FAILED;
2516                                 }
2517                         }
2518                 }
2519                 break;
2520
2521         case ANEG_STATE_IDLE_DETECT_INIT:
2522                 ap->link_time = ap->cur_time;
2523                 tp->mac_mode &= ~MAC_MODE_SEND_CONFIGS;
2524                 tw32_f(MAC_MODE, tp->mac_mode);
2525                 udelay(40);
2526
2527                 ap->state = ANEG_STATE_IDLE_DETECT;
2528                 ret = ANEG_TIMER_ENAB;
2529                 break;
2530
2531         case ANEG_STATE_IDLE_DETECT:
2532                 if (ap->ability_match != 0 &&
2533                     ap->rxconfig == 0) {
2534                         ap->state = ANEG_STATE_AN_ENABLE;
2535                         break;
2536                 }
2537                 delta = ap->cur_time - ap->link_time;
2538                 if (delta > ANEG_STATE_SETTLE_TIME) {
2539                         /* XXX another gem from the Broadcom driver :( */
2540                         ap->state = ANEG_STATE_LINK_OK;
2541                 }
2542                 break;
2543
2544         case ANEG_STATE_LINK_OK:
2545                 ap->flags |= (MR_AN_COMPLETE | MR_LINK_OK);
2546                 ret = ANEG_DONE;
2547                 break;
2548
2549         case ANEG_STATE_NEXT_PAGE_WAIT_INIT:
2550                 /* ??? unimplemented */
2551                 break;
2552
2553         case ANEG_STATE_NEXT_PAGE_WAIT:
2554                 /* ??? unimplemented */
2555                 break;
2556
2557         default:
2558                 ret = ANEG_FAILED;
2559                 break;
2560         };
2561
2562         return ret;
2563 }
2564
2565 static int fiber_autoneg(struct tg3 *tp, u32 *flags)
2566 {
2567         int res = 0;
2568         struct tg3_fiber_aneginfo aninfo;
2569         int status = ANEG_FAILED;
2570         unsigned int tick;
2571         u32 tmp;
2572
2573         tw32_f(MAC_TX_AUTO_NEG, 0);
2574
2575         tmp = tp->mac_mode & ~MAC_MODE_PORT_MODE_MASK;
2576         tw32_f(MAC_MODE, tmp | MAC_MODE_PORT_MODE_GMII);
2577         udelay(40);
2578
2579         tw32_f(MAC_MODE, tp->mac_mode | MAC_MODE_SEND_CONFIGS);
2580         udelay(40);
2581
2582         memset(&aninfo, 0, sizeof(aninfo));
2583         aninfo.flags |= MR_AN_ENABLE;
2584         aninfo.state = ANEG_STATE_UNKNOWN;
2585         aninfo.cur_time = 0;
2586         tick = 0;
2587         while (++tick < 195000) {
2588                 status = tg3_fiber_aneg_smachine(tp, &aninfo);
2589                 if (status == ANEG_DONE || status == ANEG_FAILED)
2590                         break;
2591
2592                 udelay(1);
2593         }
2594
2595         tp->mac_mode &= ~MAC_MODE_SEND_CONFIGS;
2596         tw32_f(MAC_MODE, tp->mac_mode);
2597         udelay(40);
2598
2599         *flags = aninfo.flags;
2600
2601         if (status == ANEG_DONE &&
2602             (aninfo.flags & (MR_AN_COMPLETE | MR_LINK_OK |
2603                              MR_LP_ADV_FULL_DUPLEX)))
2604                 res = 1;
2605
2606         return res;
2607 }
2608
2609 static void tg3_init_bcm8002(struct tg3 *tp)
2610 {
2611         u32 mac_status = tr32(MAC_STATUS);
2612         int i;
2613
2614         /* Reset when initting first time or we have a link. */
2615         if ((tp->tg3_flags & TG3_FLAG_INIT_COMPLETE) &&
2616             !(mac_status & MAC_STATUS_PCS_SYNCED))
2617                 return;
2618
2619         /* Set PLL lock range. */
2620         tg3_writephy(tp, 0x16, 0x8007);
2621
2622         /* SW reset */
2623         tg3_writephy(tp, MII_BMCR, BMCR_RESET);
2624
2625         /* Wait for reset to complete. */
2626         /* XXX schedule_timeout() ... */
2627         for (i = 0; i < 500; i++)
2628                 udelay(10);
2629
2630         /* Config mode; select PMA/Ch 1 regs. */
2631         tg3_writephy(tp, 0x10, 0x8411);
2632
2633         /* Enable auto-lock and comdet, select txclk for tx. */
2634         tg3_writephy(tp, 0x11, 0x0a10);
2635
2636         tg3_writephy(tp, 0x18, 0x00a0);
2637         tg3_writephy(tp, 0x16, 0x41ff);
2638
2639         /* Assert and deassert POR. */
2640         tg3_writephy(tp, 0x13, 0x0400);
2641         udelay(40);
2642         tg3_writephy(tp, 0x13, 0x0000);
2643
2644         tg3_writephy(tp, 0x11, 0x0a50);
2645         udelay(40);
2646         tg3_writephy(tp, 0x11, 0x0a10);
2647
2648         /* Wait for signal to stabilize */
2649         /* XXX schedule_timeout() ... */
2650         for (i = 0; i < 15000; i++)
2651                 udelay(10);
2652
2653         /* Deselect the channel register so we can read the PHYID
2654          * later.
2655          */
2656         tg3_writephy(tp, 0x10, 0x8011);
2657 }
2658
2659 static int tg3_setup_fiber_hw_autoneg(struct tg3 *tp, u32 mac_status)
2660 {
2661         u32 sg_dig_ctrl, sg_dig_status;
2662         u32 serdes_cfg, expected_sg_dig_ctrl;
2663         int workaround, port_a;
2664         int current_link_up;
2665
2666         serdes_cfg = 0;
2667         expected_sg_dig_ctrl = 0;
2668         workaround = 0;
2669         port_a = 1;
2670         current_link_up = 0;
2671
2672         if (tp->pci_chip_rev_id != CHIPREV_ID_5704_A0 &&
2673             tp->pci_chip_rev_id != CHIPREV_ID_5704_A1) {
2674                 workaround = 1;
2675                 if (tr32(TG3PCI_DUAL_MAC_CTRL) & DUAL_MAC_CTRL_ID)
2676                         port_a = 0;
2677
2678                 /* preserve bits 0-11,13,14 for signal pre-emphasis */
2679                 /* preserve bits 20-23 for voltage regulator */
2680                 serdes_cfg = tr32(MAC_SERDES_CFG) & 0x00f06fff;
2681         }
2682
2683         sg_dig_ctrl = tr32(SG_DIG_CTRL);
2684
2685         if (tp->link_config.autoneg != AUTONEG_ENABLE) {
2686                 if (sg_dig_ctrl & (1 << 31)) {
2687                         if (workaround) {
2688                                 u32 val = serdes_cfg;
2689
2690                                 if (port_a)
2691                                         val |= 0xc010000;
2692                                 else
2693                                         val |= 0x4010000;
2694                                 tw32_f(MAC_SERDES_CFG, val);
2695                         }
2696                         tw32_f(SG_DIG_CTRL, 0x01388400);
2697                 }
2698                 if (mac_status & MAC_STATUS_PCS_SYNCED) {
2699                         tg3_setup_flow_control(tp, 0, 0);
2700                         current_link_up = 1;
2701                 }
2702                 goto out;
2703         }
2704
2705         /* Want auto-negotiation.  */
2706         expected_sg_dig_ctrl = 0x81388400;
2707
2708         /* Pause capability */
2709         expected_sg_dig_ctrl |= (1 << 11);
2710
2711         /* Asymettric pause */
2712         expected_sg_dig_ctrl |= (1 << 12);
2713
2714         if (sg_dig_ctrl != expected_sg_dig_ctrl) {
2715                 if ((tp->tg3_flags2 & TG3_FLG2_PARALLEL_DETECT) &&
2716                     tp->serdes_counter &&
2717                     ((mac_status & (MAC_STATUS_PCS_SYNCED |
2718                                     MAC_STATUS_RCVD_CFG)) ==
2719                      MAC_STATUS_PCS_SYNCED)) {
2720                         tp->serdes_counter--;
2721                         current_link_up = 1;
2722                         goto out;
2723                 }
2724 restart_autoneg:
2725                 if (workaround)
2726                         tw32_f(MAC_SERDES_CFG, serdes_cfg | 0xc011000);
2727                 tw32_f(SG_DIG_CTRL, expected_sg_dig_ctrl | (1 << 30));
2728                 udelay(5);
2729                 tw32_f(SG_DIG_CTRL, expected_sg_dig_ctrl);
2730
2731                 tp->serdes_counter = SERDES_AN_TIMEOUT_5704S;
2732                 tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
2733         } else if (mac_status & (MAC_STATUS_PCS_SYNCED |
2734                                  MAC_STATUS_SIGNAL_DET)) {
2735                 sg_dig_status = tr32(SG_DIG_STATUS);
2736                 mac_status = tr32(MAC_STATUS);
2737
2738                 if ((sg_dig_status & (1 << 1)) &&
2739                     (mac_status & MAC_STATUS_PCS_SYNCED)) {
2740                         u32 local_adv, remote_adv;
2741
2742                         local_adv = ADVERTISE_PAUSE_CAP;
2743                         remote_adv = 0;
2744                         if (sg_dig_status & (1 << 19))
2745                                 remote_adv |= LPA_PAUSE_CAP;
2746                         if (sg_dig_status & (1 << 20))
2747                                 remote_adv |= LPA_PAUSE_ASYM;
2748
2749                         tg3_setup_flow_control(tp, local_adv, remote_adv);
2750                         current_link_up = 1;
2751                         tp->serdes_counter = 0;
2752                         tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
2753                 } else if (!(sg_dig_status & (1 << 1))) {
2754                         if (tp->serdes_counter)
2755                                 tp->serdes_counter--;
2756                         else {
2757                                 if (workaround) {
2758                                         u32 val = serdes_cfg;
2759
2760                                         if (port_a)
2761                                                 val |= 0xc010000;
2762                                         else
2763                                                 val |= 0x4010000;
2764
2765                                         tw32_f(MAC_SERDES_CFG, val);
2766                                 }
2767
2768                                 tw32_f(SG_DIG_CTRL, 0x01388400);
2769                                 udelay(40);
2770
2771                                 /* Link parallel detection - link is up */
2772                                 /* only if we have PCS_SYNC and not */
2773                                 /* receiving config code words */
2774                                 mac_status = tr32(MAC_STATUS);
2775                                 if ((mac_status & MAC_STATUS_PCS_SYNCED) &&
2776                                     !(mac_status & MAC_STATUS_RCVD_CFG)) {
2777                                         tg3_setup_flow_control(tp, 0, 0);
2778                                         current_link_up = 1;
2779                                         tp->tg3_flags2 |=
2780                                                 TG3_FLG2_PARALLEL_DETECT;
2781                                         tp->serdes_counter =
2782                                                 SERDES_PARALLEL_DET_TIMEOUT;
2783                                 } else
2784                                         goto restart_autoneg;
2785                         }
2786                 }
2787         } else {
2788                 tp->serdes_counter = SERDES_AN_TIMEOUT_5704S;
2789                 tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
2790         }
2791
2792 out:
2793         return current_link_up;
2794 }
2795
2796 static int tg3_setup_fiber_by_hand(struct tg3 *tp, u32 mac_status)
2797 {
2798         int current_link_up = 0;
2799
2800         if (!(mac_status & MAC_STATUS_PCS_SYNCED))
2801                 goto out;
2802
2803         if (tp->link_config.autoneg == AUTONEG_ENABLE) {
2804                 u32 flags;
2805                 int i;
2806
2807                 if (fiber_autoneg(tp, &flags)) {
2808                         u32 local_adv, remote_adv;
2809
2810                         local_adv = ADVERTISE_PAUSE_CAP;
2811                         remote_adv = 0;
2812                         if (flags & MR_LP_ADV_SYM_PAUSE)
2813                                 remote_adv |= LPA_PAUSE_CAP;
2814                         if (flags & MR_LP_ADV_ASYM_PAUSE)
2815                                 remote_adv |= LPA_PAUSE_ASYM;
2816
2817                         tg3_setup_flow_control(tp, local_adv, remote_adv);
2818
2819                         current_link_up = 1;
2820                 }
2821                 for (i = 0; i < 30; i++) {
2822                         udelay(20);
2823                         tw32_f(MAC_STATUS,
2824                                (MAC_STATUS_SYNC_CHANGED |
2825                                 MAC_STATUS_CFG_CHANGED));
2826                         udelay(40);
2827                         if ((tr32(MAC_STATUS) &
2828                              (MAC_STATUS_SYNC_CHANGED |
2829                               MAC_STATUS_CFG_CHANGED)) == 0)
2830                                 break;
2831                 }
2832
2833                 mac_status = tr32(MAC_STATUS);
2834                 if (current_link_up == 0 &&
2835                     (mac_status & MAC_STATUS_PCS_SYNCED) &&
2836                     !(mac_status & MAC_STATUS_RCVD_CFG))
2837                         current_link_up = 1;
2838         } else {
2839                 /* Forcing 1000FD link up. */
2840                 current_link_up = 1;
2841
2842                 tw32_f(MAC_MODE, (tp->mac_mode | MAC_MODE_SEND_CONFIGS));
2843                 udelay(40);
2844
2845                 tw32_f(MAC_MODE, tp->mac_mode);
2846                 udelay(40);
2847         }
2848
2849 out:
2850         return current_link_up;
2851 }
2852
2853 static int tg3_setup_fiber_phy(struct tg3 *tp, int force_reset)
2854 {
2855         u32 orig_pause_cfg;
2856         u16 orig_active_speed;
2857         u8 orig_active_duplex;
2858         u32 mac_status;
2859         int current_link_up;
2860         int i;
2861
2862         orig_pause_cfg = tp->link_config.active_flowctrl;
2863         orig_active_speed = tp->link_config.active_speed;
2864         orig_active_duplex = tp->link_config.active_duplex;
2865
2866         if (!(tp->tg3_flags2 & TG3_FLG2_HW_AUTONEG) &&
2867             netif_carrier_ok(tp->dev) &&
2868             (tp->tg3_flags & TG3_FLAG_INIT_COMPLETE)) {
2869                 mac_status = tr32(MAC_STATUS);
2870                 mac_status &= (MAC_STATUS_PCS_SYNCED |
2871                                MAC_STATUS_SIGNAL_DET |
2872                                MAC_STATUS_CFG_CHANGED |
2873                                MAC_STATUS_RCVD_CFG);
2874                 if (mac_status == (MAC_STATUS_PCS_SYNCED |
2875                                    MAC_STATUS_SIGNAL_DET)) {
2876                         tw32_f(MAC_STATUS, (MAC_STATUS_SYNC_CHANGED |
2877                                             MAC_STATUS_CFG_CHANGED));
2878                         return 0;
2879                 }
2880         }
2881
2882         tw32_f(MAC_TX_AUTO_NEG, 0);
2883
2884         tp->mac_mode &= ~(MAC_MODE_PORT_MODE_MASK | MAC_MODE_HALF_DUPLEX);
2885         tp->mac_mode |= MAC_MODE_PORT_MODE_TBI;
2886         tw32_f(MAC_MODE, tp->mac_mode);
2887         udelay(40);
2888
2889         if (tp->phy_id == PHY_ID_BCM8002)
2890                 tg3_init_bcm8002(tp);
2891
2892         /* Enable link change event even when serdes polling.  */
2893         tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
2894         udelay(40);
2895
2896         current_link_up = 0;
2897         mac_status = tr32(MAC_STATUS);
2898
2899         if (tp->tg3_flags2 & TG3_FLG2_HW_AUTONEG)
2900                 current_link_up = tg3_setup_fiber_hw_autoneg(tp, mac_status);
2901         else
2902                 current_link_up = tg3_setup_fiber_by_hand(tp, mac_status);
2903
2904         tp->hw_status->status =
2905                 (SD_STATUS_UPDATED |
2906                  (tp->hw_status->status & ~SD_STATUS_LINK_CHG));
2907
2908         for (i = 0; i < 100; i++) {
2909                 tw32_f(MAC_STATUS, (MAC_STATUS_SYNC_CHANGED |
2910                                     MAC_STATUS_CFG_CHANGED));
2911                 udelay(5);
2912                 if ((tr32(MAC_STATUS) & (MAC_STATUS_SYNC_CHANGED |
2913                                          MAC_STATUS_CFG_CHANGED |
2914                                          MAC_STATUS_LNKSTATE_CHANGED)) == 0)
2915                         break;
2916         }
2917
2918         mac_status = tr32(MAC_STATUS);
2919         if ((mac_status & MAC_STATUS_PCS_SYNCED) == 0) {
2920                 current_link_up = 0;
2921                 if (tp->link_config.autoneg == AUTONEG_ENABLE &&
2922                     tp->serdes_counter == 0) {
2923                         tw32_f(MAC_MODE, (tp->mac_mode |
2924                                           MAC_MODE_SEND_CONFIGS));
2925                         udelay(1);
2926                         tw32_f(MAC_MODE, tp->mac_mode);
2927                 }
2928         }
2929
2930         if (current_link_up == 1) {
2931                 tp->link_config.active_speed = SPEED_1000;
2932                 tp->link_config.active_duplex = DUPLEX_FULL;
2933                 tw32(MAC_LED_CTRL, (tp->led_ctrl |
2934                                     LED_CTRL_LNKLED_OVERRIDE |
2935                                     LED_CTRL_1000MBPS_ON));
2936         } else {
2937                 tp->link_config.active_speed = SPEED_INVALID;
2938                 tp->link_config.active_duplex = DUPLEX_INVALID;
2939                 tw32(MAC_LED_CTRL, (tp->led_ctrl |
2940                                     LED_CTRL_LNKLED_OVERRIDE |
2941                                     LED_CTRL_TRAFFIC_OVERRIDE));
2942         }
2943
2944         if (current_link_up != netif_carrier_ok(tp->dev)) {
2945                 if (current_link_up)
2946                         netif_carrier_on(tp->dev);
2947                 else
2948                         netif_carrier_off(tp->dev);
2949                 tg3_link_report(tp);
2950         } else {
2951                 u32 now_pause_cfg = tp->link_config.active_flowctrl;
2952                 if (orig_pause_cfg != now_pause_cfg ||
2953                     orig_active_speed != tp->link_config.active_speed ||
2954                     orig_active_duplex != tp->link_config.active_duplex)
2955                         tg3_link_report(tp);
2956         }
2957
2958         return 0;
2959 }
2960
2961 static int tg3_setup_fiber_mii_phy(struct tg3 *tp, int force_reset)
2962 {
2963         int current_link_up, err = 0;
2964         u32 bmsr, bmcr;
2965         u16 current_speed;
2966         u8 current_duplex;
2967
2968         tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
2969         tw32_f(MAC_MODE, tp->mac_mode);
2970         udelay(40);
2971
2972         tw32(MAC_EVENT, 0);
2973
2974         tw32_f(MAC_STATUS,
2975              (MAC_STATUS_SYNC_CHANGED |
2976               MAC_STATUS_CFG_CHANGED |
2977               MAC_STATUS_MI_COMPLETION |
2978               MAC_STATUS_LNKSTATE_CHANGED));
2979         udelay(40);
2980
2981         if (force_reset)
2982                 tg3_phy_reset(tp);
2983
2984         current_link_up = 0;
2985         current_speed = SPEED_INVALID;
2986         current_duplex = DUPLEX_INVALID;
2987
2988         err |= tg3_readphy(tp, MII_BMSR, &bmsr);
2989         err |= tg3_readphy(tp, MII_BMSR, &bmsr);
2990         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) {
2991                 if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
2992                         bmsr |= BMSR_LSTATUS;
2993                 else
2994                         bmsr &= ~BMSR_LSTATUS;
2995         }
2996
2997         err |= tg3_readphy(tp, MII_BMCR, &bmcr);
2998
2999         if ((tp->link_config.autoneg == AUTONEG_ENABLE) && !force_reset &&
3000             (tp->tg3_flags2 & TG3_FLG2_PARALLEL_DETECT)) {
3001                 /* do nothing, just check for link up at the end */
3002         } else if (tp->link_config.autoneg == AUTONEG_ENABLE) {
3003                 u32 adv, new_adv;
3004
3005                 err |= tg3_readphy(tp, MII_ADVERTISE, &adv);
3006                 new_adv = adv & ~(ADVERTISE_1000XFULL | ADVERTISE_1000XHALF |
3007                                   ADVERTISE_1000XPAUSE |
3008                                   ADVERTISE_1000XPSE_ASYM |
3009                                   ADVERTISE_SLCT);
3010
3011                 new_adv |= tg3_advert_flowctrl_1000X(tp->link_config.flowctrl);
3012
3013                 if (tp->link_config.advertising & ADVERTISED_1000baseT_Half)
3014                         new_adv |= ADVERTISE_1000XHALF;
3015                 if (tp->link_config.advertising & ADVERTISED_1000baseT_Full)
3016                         new_adv |= ADVERTISE_1000XFULL;
3017
3018                 if ((new_adv != adv) || !(bmcr & BMCR_ANENABLE)) {
3019                         tg3_writephy(tp, MII_ADVERTISE, new_adv);
3020                         bmcr |= BMCR_ANENABLE | BMCR_ANRESTART;
3021                         tg3_writephy(tp, MII_BMCR, bmcr);
3022
3023                         tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
3024                         tp->serdes_counter = SERDES_AN_TIMEOUT_5714S;
3025                         tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
3026
3027                         return err;
3028                 }
3029         } else {
3030                 u32 new_bmcr;
3031
3032                 bmcr &= ~BMCR_SPEED1000;
3033                 new_bmcr = bmcr & ~(BMCR_ANENABLE | BMCR_FULLDPLX);
3034
3035                 if (tp->link_config.duplex == DUPLEX_FULL)
3036                         new_bmcr |= BMCR_FULLDPLX;
3037
3038                 if (new_bmcr != bmcr) {
3039                         /* BMCR_SPEED1000 is a reserved bit that needs
3040                          * to be set on write.
3041                          */
3042                         new_bmcr |= BMCR_SPEED1000;
3043
3044                         /* Force a linkdown */
3045                         if (netif_carrier_ok(tp->dev)) {
3046                                 u32 adv;
3047
3048                                 err |= tg3_readphy(tp, MII_ADVERTISE, &adv);
3049                                 adv &= ~(ADVERTISE_1000XFULL |
3050                                          ADVERTISE_1000XHALF |
3051                                          ADVERTISE_SLCT);
3052                                 tg3_writephy(tp, MII_ADVERTISE, adv);
3053                                 tg3_writephy(tp, MII_BMCR, bmcr |
3054                                                            BMCR_ANRESTART |
3055                                                            BMCR_ANENABLE);
3056                                 udelay(10);
3057                                 netif_carrier_off(tp->dev);
3058                         }
3059                         tg3_writephy(tp, MII_BMCR, new_bmcr);
3060                         bmcr = new_bmcr;
3061                         err |= tg3_readphy(tp, MII_BMSR, &bmsr);
3062                         err |= tg3_readphy(tp, MII_BMSR, &bmsr);
3063                         if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
3064                             ASIC_REV_5714) {
3065                                 if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
3066                                         bmsr |= BMSR_LSTATUS;
3067                                 else
3068                                         bmsr &= ~BMSR_LSTATUS;
3069                         }
3070                         tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
3071                 }
3072         }
3073
3074         if (bmsr & BMSR_LSTATUS) {
3075                 current_speed = SPEED_1000;
3076                 current_link_up = 1;
3077                 if (bmcr & BMCR_FULLDPLX)
3078                         current_duplex = DUPLEX_FULL;
3079                 else
3080                         current_duplex = DUPLEX_HALF;
3081
3082                 if (bmcr & BMCR_ANENABLE) {
3083                         u32 local_adv, remote_adv, common;
3084
3085                         err |= tg3_readphy(tp, MII_ADVERTISE, &local_adv);
3086                         err |= tg3_readphy(tp, MII_LPA, &remote_adv);
3087                         common = local_adv & remote_adv;
3088                         if (common & (ADVERTISE_1000XHALF |
3089                                       ADVERTISE_1000XFULL)) {
3090                                 if (common & ADVERTISE_1000XFULL)
3091                                         current_duplex = DUPLEX_FULL;
3092                                 else
3093                                         current_duplex = DUPLEX_HALF;
3094
3095                                 tg3_setup_flow_control(tp, local_adv,
3096                                                        remote_adv);
3097                         }
3098                         else
3099                                 current_link_up = 0;
3100                 }
3101         }
3102
3103         tp->mac_mode &= ~MAC_MODE_HALF_DUPLEX;
3104         if (tp->link_config.active_duplex == DUPLEX_HALF)
3105                 tp->mac_mode |= MAC_MODE_HALF_DUPLEX;
3106
3107         tw32_f(MAC_MODE, tp->mac_mode);
3108         udelay(40);
3109
3110         tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
3111
3112         tp->link_config.active_speed = current_speed;
3113         tp->link_config.active_duplex = current_duplex;
3114
3115         if (current_link_up != netif_carrier_ok(tp->dev)) {
3116                 if (current_link_up)
3117                         netif_carrier_on(tp->dev);
3118                 else {
3119                         netif_carrier_off(tp->dev);
3120                         tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
3121                 }
3122                 tg3_link_report(tp);
3123         }
3124         return err;
3125 }
3126
3127 static void tg3_serdes_parallel_detect(struct tg3 *tp)
3128 {
3129         if (tp->serdes_counter) {
3130                 /* Give autoneg time to complete. */
3131                 tp->serdes_counter--;
3132                 return;
3133         }
3134         if (!netif_carrier_ok(tp->dev) &&
3135             (tp->link_config.autoneg == AUTONEG_ENABLE)) {
3136                 u32 bmcr;
3137
3138                 tg3_readphy(tp, MII_BMCR, &bmcr);
3139                 if (bmcr & BMCR_ANENABLE) {
3140                         u32 phy1, phy2;
3141
3142                         /* Select shadow register 0x1f */
3143                         tg3_writephy(tp, 0x1c, 0x7c00);
3144                         tg3_readphy(tp, 0x1c, &phy1);
3145
3146                         /* Select expansion interrupt status register */
3147                         tg3_writephy(tp, 0x17, 0x0f01);
3148                         tg3_readphy(tp, 0x15, &phy2);
3149                         tg3_readphy(tp, 0x15, &phy2);
3150
3151                         if ((phy1 & 0x10) && !(phy2 & 0x20)) {
3152                                 /* We have signal detect and not receiving
3153                                  * config code words, link is up by parallel
3154                                  * detection.
3155                                  */
3156
3157                                 bmcr &= ~BMCR_ANENABLE;
3158                                 bmcr |= BMCR_SPEED1000 | BMCR_FULLDPLX;
3159                                 tg3_writephy(tp, MII_BMCR, bmcr);
3160                                 tp->tg3_flags2 |= TG3_FLG2_PARALLEL_DETECT;
3161                         }
3162                 }
3163         }
3164         else if (netif_carrier_ok(tp->dev) &&
3165                  (tp->link_config.autoneg == AUTONEG_ENABLE) &&
3166                  (tp->tg3_flags2 & TG3_FLG2_PARALLEL_DETECT)) {
3167                 u32 phy2;
3168
3169                 /* Select expansion interrupt status register */
3170                 tg3_writephy(tp, 0x17, 0x0f01);
3171                 tg3_readphy(tp, 0x15, &phy2);
3172                 if (phy2 & 0x20) {
3173                         u32 bmcr;
3174
3175                         /* Config code words received, turn on autoneg. */
3176                         tg3_readphy(tp, MII_BMCR, &bmcr);
3177                         tg3_writephy(tp, MII_BMCR, bmcr | BMCR_ANENABLE);
3178
3179                         tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
3180
3181                 }
3182         }
3183 }
3184
3185 static int tg3_setup_phy(struct tg3 *tp, int force_reset)
3186 {
3187         int err;
3188
3189         if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) {
3190                 err = tg3_setup_fiber_phy(tp, force_reset);
3191         } else if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES) {
3192                 err = tg3_setup_fiber_mii_phy(tp, force_reset);
3193         } else {
3194                 err = tg3_setup_copper_phy(tp, force_reset);
3195         }
3196
3197         if (tp->pci_chip_rev_id == CHIPREV_ID_5784_A0 ||
3198             tp->pci_chip_rev_id == CHIPREV_ID_5784_A1) {
3199                 u32 val, scale;
3200
3201                 val = tr32(TG3_CPMU_CLCK_STAT) & CPMU_CLCK_STAT_MAC_CLCK_MASK;
3202                 if (val == CPMU_CLCK_STAT_MAC_CLCK_62_5)
3203                         scale = 65;
3204                 else if (val == CPMU_CLCK_STAT_MAC_CLCK_6_25)
3205                         scale = 6;
3206                 else
3207                         scale = 12;
3208
3209                 val = tr32(GRC_MISC_CFG) & ~GRC_MISC_CFG_PRESCALAR_MASK;
3210                 val |= (scale << GRC_MISC_CFG_PRESCALAR_SHIFT);
3211                 tw32(GRC_MISC_CFG, val);
3212         }
3213
3214         if (tp->link_config.active_speed == SPEED_1000 &&
3215             tp->link_config.active_duplex == DUPLEX_HALF)
3216                 tw32(MAC_TX_LENGTHS,
3217                      ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
3218                       (6 << TX_LENGTHS_IPG_SHIFT) |
3219                       (0xff << TX_LENGTHS_SLOT_TIME_SHIFT)));
3220         else
3221                 tw32(MAC_TX_LENGTHS,
3222                      ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
3223                       (6 << TX_LENGTHS_IPG_SHIFT) |
3224                       (32 << TX_LENGTHS_SLOT_TIME_SHIFT)));
3225
3226         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
3227                 if (netif_carrier_ok(tp->dev)) {
3228                         tw32(HOSTCC_STAT_COAL_TICKS,
3229                              tp->coal.stats_block_coalesce_usecs);
3230                 } else {
3231                         tw32(HOSTCC_STAT_COAL_TICKS, 0);
3232                 }
3233         }
3234
3235         if (tp->tg3_flags & TG3_FLAG_ASPM_WORKAROUND) {
3236                 u32 val = tr32(PCIE_PWR_MGMT_THRESH);
3237                 if (!netif_carrier_ok(tp->dev))
3238                         val = (val & ~PCIE_PWR_MGMT_L1_THRESH_MSK) |
3239                               tp->pwrmgmt_thresh;
3240                 else
3241                         val |= PCIE_PWR_MGMT_L1_THRESH_MSK;
3242                 tw32(PCIE_PWR_MGMT_THRESH, val);
3243         }
3244
3245         return err;
3246 }
3247
3248 /* This is called whenever we suspect that the system chipset is re-
3249  * ordering the sequence of MMIO to the tx send mailbox. The symptom
3250  * is bogus tx completions. We try to recover by setting the
3251  * TG3_FLAG_MBOX_WRITE_REORDER flag and resetting the chip later
3252  * in the workqueue.
3253  */
3254 static void tg3_tx_recover(struct tg3 *tp)
3255 {
3256         BUG_ON((tp->tg3_flags & TG3_FLAG_MBOX_WRITE_REORDER) ||
3257                tp->write32_tx_mbox == tg3_write_indirect_mbox);
3258
3259         printk(KERN_WARNING PFX "%s: The system may be re-ordering memory-"
3260                "mapped I/O cycles to the network device, attempting to "
3261                "recover. Please report the problem to the driver maintainer "
3262                "and include system chipset information.\n", tp->dev->name);
3263
3264         spin_lock(&tp->lock);
3265         tp->tg3_flags |= TG3_FLAG_TX_RECOVERY_PENDING;
3266         spin_unlock(&tp->lock);
3267 }
3268
3269 static inline u32 tg3_tx_avail(struct tg3 *tp)
3270 {
3271         smp_mb();
3272         return (tp->tx_pending -
3273                 ((tp->tx_prod - tp->tx_cons) & (TG3_TX_RING_SIZE - 1)));
3274 }
3275
3276 /* Tigon3 never reports partial packet sends.  So we do not
3277  * need special logic to handle SKBs that have not had all
3278  * of their frags sent yet, like SunGEM does.
3279  */
3280 static void tg3_tx(struct tg3 *tp)
3281 {
3282         u32 hw_idx = tp->hw_status->idx[0].tx_consumer;
3283         u32 sw_idx = tp->tx_cons;
3284
3285         while (sw_idx != hw_idx) {
3286                 struct tx_ring_info *ri = &tp->tx_buffers[sw_idx];
3287                 struct sk_buff *skb = ri->skb;
3288                 int i, tx_bug = 0;
3289
3290                 if (unlikely(skb == NULL)) {
3291                         tg3_tx_recover(tp);
3292                         return;
3293                 }
3294
3295                 pci_unmap_single(tp->pdev,
3296                                  pci_unmap_addr(ri, mapping),
3297                                  skb_headlen(skb),
3298                                  PCI_DMA_TODEVICE);
3299
3300                 ri->skb = NULL;
3301
3302                 sw_idx = NEXT_TX(sw_idx);
3303
3304                 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
3305                         ri = &tp->tx_buffers[sw_idx];
3306                         if (unlikely(ri->skb != NULL || sw_idx == hw_idx))
3307                                 tx_bug = 1;
3308
3309                         pci_unmap_page(tp->pdev,
3310                                        pci_unmap_addr(ri, mapping),
3311                                        skb_shinfo(skb)->frags[i].size,
3312                                        PCI_DMA_TODEVICE);
3313
3314                         sw_idx = NEXT_TX(sw_idx);
3315                 }
3316
3317                 dev_kfree_skb(skb);
3318
3319                 if (unlikely(tx_bug)) {
3320                         tg3_tx_recover(tp);
3321                         return;
3322                 }
3323         }
3324
3325         tp->tx_cons = sw_idx;
3326
3327         /* Need to make the tx_cons update visible to tg3_start_xmit()
3328          * before checking for netif_queue_stopped().  Without the
3329          * memory barrier, there is a small possibility that tg3_start_xmit()
3330          * will miss it and cause the queue to be stopped forever.
3331          */
3332         smp_mb();
3333
3334         if (unlikely(netif_queue_stopped(tp->dev) &&
3335                      (tg3_tx_avail(tp) > TG3_TX_WAKEUP_THRESH(tp)))) {
3336                 netif_tx_lock(tp->dev);
3337                 if (netif_queue_stopped(tp->dev) &&
3338                     (tg3_tx_avail(tp) > TG3_TX_WAKEUP_THRESH(tp)))
3339                         netif_wake_queue(tp->dev);
3340                 netif_tx_unlock(tp->dev);
3341         }
3342 }
3343
3344 /* Returns size of skb allocated or < 0 on error.
3345  *
3346  * We only need to fill in the address because the other members
3347  * of the RX descriptor are invariant, see tg3_init_rings.
3348  *
3349  * Note the purposeful assymetry of cpu vs. chip accesses.  For
3350  * posting buffers we only dirty the first cache line of the RX
3351  * descriptor (containing the address).  Whereas for the RX status
3352  * buffers the cpu only reads the last cacheline of the RX descriptor
3353  * (to fetch the error flags, vlan tag, checksum, and opaque cookie).
3354  */
3355 static int tg3_alloc_rx_skb(struct tg3 *tp, u32 opaque_key,
3356                             int src_idx, u32 dest_idx_unmasked)
3357 {
3358         struct tg3_rx_buffer_desc *desc;
3359         struct ring_info *map, *src_map;
3360         struct sk_buff *skb;
3361         dma_addr_t mapping;
3362         int skb_size, dest_idx;
3363
3364         src_map = NULL;
3365         switch (opaque_key) {
3366         case RXD_OPAQUE_RING_STD:
3367                 dest_idx = dest_idx_unmasked % TG3_RX_RING_SIZE;
3368                 desc = &tp->rx_std[dest_idx];
3369                 map = &tp->rx_std_buffers[dest_idx];
3370                 if (src_idx >= 0)
3371                         src_map = &tp->rx_std_buffers[src_idx];
3372                 skb_size = tp->rx_pkt_buf_sz;
3373                 break;
3374
3375         case RXD_OPAQUE_RING_JUMBO:
3376                 dest_idx = dest_idx_unmasked % TG3_RX_JUMBO_RING_SIZE;
3377                 desc = &tp->rx_jumbo[dest_idx];
3378                 map = &tp->rx_jumbo_buffers[dest_idx];
3379                 if (src_idx >= 0)
3380                         src_map = &tp->rx_jumbo_buffers[src_idx];
3381                 skb_size = RX_JUMBO_PKT_BUF_SZ;
3382                 break;
3383
3384         default:
3385                 return -EINVAL;
3386         };
3387
3388         /* Do not overwrite any of the map or rp information
3389          * until we are sure we can commit to a new buffer.
3390          *
3391          * Callers depend upon this behavior and assume that
3392          * we leave everything unchanged if we fail.
3393          */
3394         skb = netdev_alloc_skb(tp->dev, skb_size);
3395         if (skb == NULL)
3396                 return -ENOMEM;
3397
3398         skb_reserve(skb, tp->rx_offset);
3399
3400         mapping = pci_map_single(tp->pdev, skb->data,
3401                                  skb_size - tp->rx_offset,
3402                                  PCI_DMA_FROMDEVICE);
3403
3404         map->skb = skb;
3405         pci_unmap_addr_set(map, mapping, mapping);
3406
3407         if (src_map != NULL)
3408                 src_map->skb = NULL;
3409
3410         desc->addr_hi = ((u64)mapping >> 32);
3411         desc->addr_lo = ((u64)mapping & 0xffffffff);
3412
3413         return skb_size;
3414 }
3415
3416 /* We only need to move over in the address because the other
3417  * members of the RX descriptor are invariant.  See notes above
3418  * tg3_alloc_rx_skb for full details.
3419  */
3420 static void tg3_recycle_rx(struct tg3 *tp, u32 opaque_key,
3421                            int src_idx, u32 dest_idx_unmasked)
3422 {
3423         struct tg3_rx_buffer_desc *src_desc, *dest_desc;
3424         struct ring_info *src_map, *dest_map;
3425         int dest_idx;
3426
3427         switch (opaque_key) {
3428         case RXD_OPAQUE_RING_STD:
3429                 dest_idx = dest_idx_unmasked % TG3_RX_RING_SIZE;
3430                 dest_desc = &tp->rx_std[dest_idx];
3431                 dest_map = &tp->rx_std_buffers[dest_idx];
3432                 src_desc = &tp->rx_std[src_idx];
3433                 src_map = &tp->rx_std_buffers[src_idx];
3434                 break;
3435
3436         case RXD_OPAQUE_RING_JUMBO:
3437                 dest_idx = dest_idx_unmasked % TG3_RX_JUMBO_RING_SIZE;
3438                 dest_desc = &tp->rx_jumbo[dest_idx];
3439                 dest_map = &tp->rx_jumbo_buffers[dest_idx];
3440                 src_desc = &tp->rx_jumbo[src_idx];
3441                 src_map = &tp->rx_jumbo_buffers[src_idx];
3442                 break;
3443
3444         default:
3445                 return;
3446         };
3447
3448         dest_map->skb = src_map->skb;
3449         pci_unmap_addr_set(dest_map, mapping,
3450                            pci_unmap_addr(src_map, mapping));
3451         dest_desc->addr_hi = src_desc->addr_hi;
3452         dest_desc->addr_lo = src_desc->addr_lo;
3453
3454         src_map->skb = NULL;
3455 }
3456
3457 #if TG3_VLAN_TAG_USED
3458 static int tg3_vlan_rx(struct tg3 *tp, struct sk_buff *skb, u16 vlan_tag)
3459 {
3460         return vlan_hwaccel_receive_skb(skb, tp->vlgrp, vlan_tag);
3461 }
3462 #endif
3463
3464 /* The RX ring scheme is composed of multiple rings which post fresh
3465  * buffers to the chip, and one special ring the chip uses to report
3466  * status back to the host.
3467  *
3468  * The special ring reports the status of received packets to the
3469  * host.  The chip does not write into the original descriptor the
3470  * RX buffer was obtained from.  The chip simply takes the original
3471  * descriptor as provided by the host, updates the status and length
3472  * field, then writes this into the next status ring entry.
3473  *
3474  * Each ring the host uses to post buffers to the chip is described
3475  * by a TG3_BDINFO entry in the chips SRAM area.  When a packet arrives,
3476  * it is first placed into the on-chip ram.  When the packet's length
3477  * is known, it walks down the TG3_BDINFO entries to select the ring.
3478  * Each TG3_BDINFO specifies a MAXLEN field and the first TG3_BDINFO
3479  * which is within the range of the new packet's length is chosen.
3480  *
3481  * The "separate ring for rx status" scheme may sound queer, but it makes
3482  * sense from a cache coherency perspective.  If only the host writes
3483  * to the buffer post rings, and only the chip writes to the rx status
3484  * rings, then cache lines never move beyond shared-modified state.
3485  * If both the host and chip were to write into the same ring, cache line
3486  * eviction could occur since both entities want it in an exclusive state.
3487  */
3488 static int tg3_rx(struct tg3 *tp, int budget)
3489 {
3490         u32 work_mask, rx_std_posted = 0;
3491         u32 sw_idx = tp->rx_rcb_ptr;
3492         u16 hw_idx;
3493         int received;
3494
3495         hw_idx = tp->hw_status->idx[0].rx_producer;
3496         /*
3497          * We need to order the read of hw_idx and the read of
3498          * the opaque cookie.
3499          */
3500         rmb();
3501         work_mask = 0;
3502         received = 0;
3503         while (sw_idx != hw_idx && budget > 0) {
3504                 struct tg3_rx_buffer_desc *desc = &tp->rx_rcb[sw_idx];
3505                 unsigned int len;
3506                 struct sk_buff *skb;
3507                 dma_addr_t dma_addr;
3508                 u32 opaque_key, desc_idx, *post_ptr;
3509
3510                 desc_idx = desc->opaque & RXD_OPAQUE_INDEX_MASK;
3511                 opaque_key = desc->opaque & RXD_OPAQUE_RING_MASK;
3512                 if (opaque_key == RXD_OPAQUE_RING_STD) {
3513                         dma_addr = pci_unmap_addr(&tp->rx_std_buffers[desc_idx],
3514                                                   mapping);
3515                         skb = tp->rx_std_buffers[desc_idx].skb;
3516                         post_ptr = &tp->rx_std_ptr;
3517                         rx_std_posted++;
3518                 } else if (opaque_key == RXD_OPAQUE_RING_JUMBO) {
3519                         dma_addr = pci_unmap_addr(&tp->rx_jumbo_buffers[desc_idx],
3520                                                   mapping);
3521                         skb = tp->rx_jumbo_buffers[desc_idx].skb;
3522                         post_ptr = &tp->rx_jumbo_ptr;
3523                 }
3524                 else {
3525                         goto next_pkt_nopost;
3526                 }
3527
3528                 work_mask |= opaque_key;
3529
3530                 if ((desc->err_vlan & RXD_ERR_MASK) != 0 &&
3531                     (desc->err_vlan != RXD_ERR_ODD_NIBBLE_RCVD_MII)) {
3532                 drop_it:
3533                         tg3_recycle_rx(tp, opaque_key,
3534                                        desc_idx, *post_ptr);
3535                 drop_it_no_recycle:
3536                         /* Other statistics kept track of by card. */
3537                         tp->net_stats.rx_dropped++;
3538                         goto next_pkt;
3539                 }
3540
3541                 len = ((desc->idx_len & RXD_LEN_MASK) >> RXD_LEN_SHIFT) - 4; /* omit crc */
3542
3543                 if (len > RX_COPY_THRESHOLD
3544                         && tp->rx_offset == 2
3545                         /* rx_offset != 2 iff this is a 5701 card running
3546                          * in PCI-X mode [see tg3_get_invariants()] */
3547                 ) {
3548                         int skb_size;
3549
3550                         skb_size = tg3_alloc_rx_skb(tp, opaque_key,
3551                                                     desc_idx, *post_ptr);
3552                         if (skb_size < 0)
3553                                 goto drop_it;
3554
3555                         pci_unmap_single(tp->pdev, dma_addr,
3556                                          skb_size - tp->rx_offset,
3557                                          PCI_DMA_FROMDEVICE);
3558
3559                         skb_put(skb, len);
3560                 } else {
3561                         struct sk_buff *copy_skb;
3562
3563                         tg3_recycle_rx(tp, opaque_key,
3564                                        desc_idx, *post_ptr);
3565
3566                         copy_skb = netdev_alloc_skb(tp->dev, len + 2);
3567                         if (copy_skb == NULL)
3568                                 goto drop_it_no_recycle;
3569
3570                         skb_reserve(copy_skb, 2);
3571                         skb_put(copy_skb, len);
3572                         pci_dma_sync_single_for_cpu(tp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE);
3573                         skb_copy_from_linear_data(skb, copy_skb->data, len);
3574                         pci_dma_sync_single_for_device(tp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE);
3575
3576                         /* We'll reuse the original ring buffer. */
3577                         skb = copy_skb;
3578                 }
3579
3580                 if ((tp->tg3_flags & TG3_FLAG_RX_CHECKSUMS) &&
3581                     (desc->type_flags & RXD_FLAG_TCPUDP_CSUM) &&
3582                     (((desc->ip_tcp_csum & RXD_TCPCSUM_MASK)
3583                       >> RXD_TCPCSUM_SHIFT) == 0xffff))
3584                         skb->ip_summed = CHECKSUM_UNNECESSARY;
3585                 else
3586                         skb->ip_summed = CHECKSUM_NONE;
3587
3588                 skb->protocol = eth_type_trans(skb, tp->dev);
3589 #if TG3_VLAN_TAG_USED
3590                 if (tp->vlgrp != NULL &&
3591                     desc->type_flags & RXD_FLAG_VLAN) {
3592                         tg3_vlan_rx(tp, skb,
3593                                     desc->err_vlan & RXD_VLAN_MASK);
3594                 } else
3595 #endif
3596                         netif_receive_skb(skb);
3597
3598                 tp->dev->last_rx = jiffies;
3599                 received++;
3600                 budget--;
3601
3602 next_pkt:
3603                 (*post_ptr)++;
3604
3605                 if (unlikely(rx_std_posted >= tp->rx_std_max_post)) {
3606                         u32 idx = *post_ptr % TG3_RX_RING_SIZE;
3607
3608                         tw32_rx_mbox(MAILBOX_RCV_STD_PROD_IDX +
3609                                      TG3_64BIT_REG_LOW, idx);
3610                         work_mask &= ~RXD_OPAQUE_RING_STD;
3611                         rx_std_posted = 0;
3612                 }
3613 next_pkt_nopost:
3614                 sw_idx++;
3615                 sw_idx &= (TG3_RX_RCB_RING_SIZE(tp) - 1);
3616
3617                 /* Refresh hw_idx to see if there is new work */
3618                 if (sw_idx == hw_idx) {
3619                         hw_idx = tp->hw_status->idx[0].rx_producer;
3620                         rmb();
3621                 }
3622         }
3623
3624         /* ACK the status ring. */
3625         tp->rx_rcb_ptr = sw_idx;
3626         tw32_rx_mbox(MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW, sw_idx);
3627
3628         /* Refill RX ring(s). */
3629         if (work_mask & RXD_OPAQUE_RING_STD) {
3630                 sw_idx = tp->rx_std_ptr % TG3_RX_RING_SIZE;
3631                 tw32_rx_mbox(MAILBOX_RCV_STD_PROD_IDX + TG3_64BIT_REG_LOW,
3632                              sw_idx);
3633         }
3634         if (work_mask & RXD_OPAQUE_RING_JUMBO) {
3635                 sw_idx = tp->rx_jumbo_ptr % TG3_RX_JUMBO_RING_SIZE;
3636                 tw32_rx_mbox(MAILBOX_RCV_JUMBO_PROD_IDX + TG3_64BIT_REG_LOW,
3637                              sw_idx);
3638         }
3639         mmiowb();
3640
3641         return received;
3642 }
3643
3644 static int tg3_poll_work(struct tg3 *tp, int work_done, int budget)
3645 {
3646         struct tg3_hw_status *sblk = tp->hw_status;
3647
3648         /* handle link change and other phy events */
3649         if (!(tp->tg3_flags &
3650               (TG3_FLAG_USE_LINKCHG_REG |
3651                TG3_FLAG_POLL_SERDES))) {
3652                 if (sblk->status & SD_STATUS_LINK_CHG) {
3653                         sblk->status = SD_STATUS_UPDATED |
3654                                 (sblk->status & ~SD_STATUS_LINK_CHG);
3655                         spin_lock(&tp->lock);
3656                         tg3_setup_phy(tp, 0);
3657                         spin_unlock(&tp->lock);
3658                 }
3659         }
3660
3661         /* run TX completion thread */
3662         if (sblk->idx[0].tx_consumer != tp->tx_cons) {
3663                 tg3_tx(tp);
3664                 if (unlikely(tp->tg3_flags & TG3_FLAG_TX_RECOVERY_PENDING))
3665                         return work_done;
3666         }
3667
3668         /* run RX thread, within the bounds set by NAPI.
3669          * All RX "locking" is done by ensuring outside
3670          * code synchronizes with tg3->napi.poll()
3671          */
3672         if (sblk->idx[0].rx_producer != tp->rx_rcb_ptr)
3673                 work_done += tg3_rx(tp, budget - work_done);
3674
3675         return work_done;
3676 }
3677
3678 static int tg3_poll(struct napi_struct *napi, int budget)
3679 {
3680         struct tg3 *tp = container_of(napi, struct tg3, napi);
3681         int work_done = 0;
3682         struct tg3_hw_status *sblk = tp->hw_status;
3683
3684         while (1) {
3685                 work_done = tg3_poll_work(tp, work_done, budget);
3686
3687                 if (unlikely(tp->tg3_flags & TG3_FLAG_TX_RECOVERY_PENDING))
3688                         goto tx_recovery;
3689
3690                 if (unlikely(work_done >= budget))
3691                         break;
3692
3693                 if (tp->tg3_flags & TG3_FLAG_TAGGED_STATUS) {
3694                         /* tp->last_tag is used in tg3_restart_ints() below
3695                          * to tell the hw how much work has been processed,
3696                          * so we must read it before checking for more work.
3697                          */
3698                         tp->last_tag = sblk->status_tag;
3699                         rmb();
3700                 } else
3701                         sblk->status &= ~SD_STATUS_UPDATED;
3702
3703                 if (likely(!tg3_has_work(tp))) {
3704                         netif_rx_complete(tp->dev, napi);
3705                         tg3_restart_ints(tp);
3706                         break;
3707                 }
3708         }
3709
3710         return work_done;
3711
3712 tx_recovery:
3713         /* work_done is guaranteed to be less than budget. */
3714         netif_rx_complete(tp->dev, napi);
3715         schedule_work(&tp->reset_task);
3716         return work_done;
3717 }
3718
3719 static void tg3_irq_quiesce(struct tg3 *tp)
3720 {
3721         BUG_ON(tp->irq_sync);
3722
3723         tp->irq_sync = 1;
3724         smp_mb();
3725
3726         synchronize_irq(tp->pdev->irq);
3727 }
3728
3729 static inline int tg3_irq_sync(struct tg3 *tp)
3730 {
3731         return tp->irq_sync;
3732 }
3733
3734 /* Fully shutdown all tg3 driver activity elsewhere in the system.
3735  * If irq_sync is non-zero, then the IRQ handler must be synchronized
3736  * with as well.  Most of the time, this is not necessary except when
3737  * shutting down the device.
3738  */
3739 static inline void tg3_full_lock(struct tg3 *tp, int irq_sync)
3740 {
3741         spin_lock_bh(&tp->lock);
3742         if (irq_sync)
3743                 tg3_irq_quiesce(tp);
3744 }
3745
3746 static inline void tg3_full_unlock(struct tg3 *tp)
3747 {
3748         spin_unlock_bh(&tp->lock);
3749 }
3750
3751 /* One-shot MSI handler - Chip automatically disables interrupt
3752  * after sending MSI so driver doesn't have to do it.
3753  */
3754 static irqreturn_t tg3_msi_1shot(int irq, void *dev_id)
3755 {
3756         struct net_device *dev = dev_id;
3757         struct tg3 *tp = netdev_priv(dev);
3758
3759         prefetch(tp->hw_status);
3760         prefetch(&tp->rx_rcb[tp->rx_rcb_ptr]);
3761
3762         if (likely(!tg3_irq_sync(tp)))
3763                 netif_rx_schedule(dev, &tp->napi);
3764
3765         return IRQ_HANDLED;
3766 }
3767
3768 /* MSI ISR - No need to check for interrupt sharing and no need to
3769  * flush status block and interrupt mailbox. PCI ordering rules
3770  * guarantee that MSI will arrive after the status block.
3771  */
3772 static irqreturn_t tg3_msi(int irq, void *dev_id)
3773 {
3774         struct net_device *dev = dev_id;
3775         struct tg3 *tp = netdev_priv(dev);
3776
3777         prefetch(tp->hw_status);
3778         prefetch(&tp->rx_rcb[tp->rx_rcb_ptr]);
3779         /*
3780          * Writing any value to intr-mbox-0 clears PCI INTA# and
3781          * chip-internal interrupt pending events.
3782          * Writing non-zero to intr-mbox-0 additional tells the
3783          * NIC to stop sending us irqs, engaging "in-intr-handler"
3784          * event coalescing.
3785          */
3786         tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
3787         if (likely(!tg3_irq_sync(tp)))
3788                 netif_rx_schedule(dev, &tp->napi);
3789
3790         return IRQ_RETVAL(1);
3791 }
3792
3793 static irqreturn_t tg3_interrupt(int irq, void *dev_id)
3794 {
3795         struct net_device *dev = dev_id;
3796         struct tg3 *tp = netdev_priv(dev);
3797         struct tg3_hw_status *sblk = tp->hw_status;
3798         unsigned int handled = 1;
3799
3800         /* In INTx mode, it is possible for the interrupt to arrive at
3801          * the CPU before the status block posted prior to the interrupt.
3802          * Reading the PCI State register will confirm whether the
3803          * interrupt is ours and will flush the status block.
3804          */
3805         if (unlikely(!(sblk->status & SD_STATUS_UPDATED))) {
3806                 if ((tp->tg3_flags & TG3_FLAG_CHIP_RESETTING) ||
3807                     (tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
3808                         handled = 0;
3809                         goto out;
3810                 }
3811         }
3812
3813         /*
3814          * Writing any value to intr-mbox-0 clears PCI INTA# and
3815          * chip-internal interrupt pending events.
3816          * Writing non-zero to intr-mbox-0 additional tells the
3817          * NIC to stop sending us irqs, engaging "in-intr-handler"
3818          * event coalescing.
3819          *
3820          * Flush the mailbox to de-assert the IRQ immediately to prevent
3821          * spurious interrupts.  The flush impacts performance but
3822          * excessive spurious interrupts can be worse in some cases.
3823          */
3824         tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
3825         if (tg3_irq_sync(tp))
3826                 goto out;
3827         sblk->status &= ~SD_STATUS_UPDATED;
3828         if (likely(tg3_has_work(tp))) {
3829                 prefetch(&tp->rx_rcb[tp->rx_rcb_ptr]);
3830                 netif_rx_schedule(dev, &tp->napi);
3831         } else {
3832                 /* No work, shared interrupt perhaps?  re-enable
3833                  * interrupts, and flush that PCI write
3834                  */
3835                 tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
3836                                0x00000000);
3837         }
3838 out:
3839         return IRQ_RETVAL(handled);
3840 }
3841
3842 static irqreturn_t tg3_interrupt_tagged(int irq, void *dev_id)
3843 {
3844         struct net_device *dev = dev_id;
3845         struct tg3 *tp = netdev_priv(dev);
3846         struct tg3_hw_status *sblk = tp->hw_status;
3847         unsigned int handled = 1;
3848
3849         /* In INTx mode, it is possible for the interrupt to arrive at
3850          * the CPU before the status block posted prior to the interrupt.
3851          * Reading the PCI State register will confirm whether the
3852          * interrupt is ours and will flush the status block.
3853          */
3854         if (unlikely(sblk->status_tag == tp->last_tag)) {
3855                 if ((tp->tg3_flags & TG3_FLAG_CHIP_RESETTING) ||
3856                     (tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
3857                         handled = 0;
3858                         goto out;
3859                 }
3860         }
3861
3862         /*
3863          * writing any value to intr-mbox-0 clears PCI INTA# and
3864          * chip-internal interrupt pending events.
3865          * writing non-zero to intr-mbox-0 additional tells the
3866          * NIC to stop sending us irqs, engaging "in-intr-handler"
3867          * event coalescing.
3868          *
3869          * Flush the mailbox to de-assert the IRQ immediately to prevent
3870          * spurious interrupts.  The flush impacts performance but
3871          * excessive spurious interrupts can be worse in some cases.
3872          */
3873         tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
3874         if (tg3_irq_sync(tp))
3875                 goto out;
3876         if (netif_rx_schedule_prep(dev, &tp->napi)) {
3877                 prefetch(&tp->rx_rcb[tp->rx_rcb_ptr]);
3878                 /* Update last_tag to mark that this status has been
3879                  * seen. Because interrupt may be shared, we may be
3880                  * racing with tg3_poll(), so only update last_tag
3881                  * if tg3_poll() is not scheduled.
3882                  */
3883                 tp->last_tag = sblk->status_tag;
3884                 __netif_rx_schedule(dev, &tp->napi);
3885         }
3886 out:
3887         return IRQ_RETVAL(handled);
3888 }
3889
3890 /* ISR for interrupt test */
3891 static irqreturn_t tg3_test_isr(int irq, void *dev_id)
3892 {
3893         struct net_device *dev = dev_id;
3894         struct tg3 *tp = netdev_priv(dev);
3895         struct tg3_hw_status *sblk = tp->hw_status;
3896
3897         if ((sblk->status & SD_STATUS_UPDATED) ||
3898             !(tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
3899                 tg3_disable_ints(tp);
3900                 return IRQ_RETVAL(1);
3901         }
3902         return IRQ_RETVAL(0);
3903 }
3904
3905 static int tg3_init_hw(struct tg3 *, int);
3906 static int tg3_halt(struct tg3 *, int, int);
3907
3908 /* Restart hardware after configuration changes, self-test, etc.
3909  * Invoked with tp->lock held.
3910  */
3911 static int tg3_restart_hw(struct tg3 *tp, int reset_phy)
3912 {
3913         int err;
3914
3915         err = tg3_init_hw(tp, reset_phy);
3916         if (err) {
3917                 printk(KERN_ERR PFX "%s: Failed to re-initialize device, "
3918                        "aborting.\n", tp->dev->name);
3919                 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
3920                 tg3_full_unlock(tp);
3921                 del_timer_sync(&tp->timer);
3922                 tp->irq_sync = 0;
3923                 napi_enable(&tp->napi);
3924                 dev_close(tp->dev);
3925                 tg3_full_lock(tp, 0);
3926         }
3927         return err;
3928 }
3929
3930 #ifdef CONFIG_NET_POLL_CONTROLLER
3931 static void tg3_poll_controller(struct net_device *dev)
3932 {
3933         struct tg3 *tp = netdev_priv(dev);
3934
3935         tg3_interrupt(tp->pdev->irq, dev);
3936 }
3937 #endif
3938
3939 static void tg3_reset_task(struct work_struct *work)
3940 {
3941         struct tg3 *tp = container_of(work, struct tg3, reset_task);
3942         unsigned int restart_timer;
3943
3944         tg3_full_lock(tp, 0);
3945
3946         if (!netif_running(tp->dev)) {
3947                 tg3_full_unlock(tp);
3948                 return;
3949         }
3950
3951         tg3_full_unlock(tp);
3952
3953         tg3_netif_stop(tp);
3954
3955         tg3_full_lock(tp, 1);
3956
3957         restart_timer = tp->tg3_flags2 & TG3_FLG2_RESTART_TIMER;
3958         tp->tg3_flags2 &= ~TG3_FLG2_RESTART_TIMER;
3959
3960         if (tp->tg3_flags & TG3_FLAG_TX_RECOVERY_PENDING) {
3961                 tp->write32_tx_mbox = tg3_write32_tx_mbox;
3962                 tp->write32_rx_mbox = tg3_write_flush_reg32;
3963                 tp->tg3_flags |= TG3_FLAG_MBOX_WRITE_REORDER;
3964                 tp->tg3_flags &= ~TG3_FLAG_TX_RECOVERY_PENDING;
3965         }
3966
3967         tg3_halt(tp, RESET_KIND_SHUTDOWN, 0);
3968         if (tg3_init_hw(tp, 1))
3969                 goto out;
3970
3971         tg3_netif_start(tp);
3972
3973         if (restart_timer)
3974                 mod_timer(&tp->timer, jiffies + 1);
3975
3976 out:
3977         tg3_full_unlock(tp);
3978 }
3979
3980 static void tg3_dump_short_state(struct tg3 *tp)
3981 {
3982         printk(KERN_ERR PFX "DEBUG: MAC_TX_STATUS[%08x] MAC_RX_STATUS[%08x]\n",
3983                tr32(MAC_TX_STATUS), tr32(MAC_RX_STATUS));
3984         printk(KERN_ERR PFX "DEBUG: RDMAC_STATUS[%08x] WDMAC_STATUS[%08x]\n",
3985                tr32(RDMAC_STATUS), tr32(WDMAC_STATUS));
3986 }
3987
3988 static void tg3_tx_timeout(struct net_device *dev)
3989 {
3990         struct tg3 *tp = netdev_priv(dev);
3991
3992         if (netif_msg_tx_err(tp)) {
3993                 printk(KERN_ERR PFX "%s: transmit timed out, resetting\n",
3994                        dev->name);
3995                 tg3_dump_short_state(tp);
3996         }
3997
3998         schedule_work(&tp->reset_task);
3999 }
4000
4001 /* Test for DMA buffers crossing any 4GB boundaries: 4G, 8G, etc */
4002 static inline int tg3_4g_overflow_test(dma_addr_t mapping, int len)
4003 {
4004         u32 base = (u32) mapping & 0xffffffff;
4005
4006         return ((base > 0xffffdcc0) &&
4007                 (base + len + 8 < base));
4008 }
4009
4010 /* Test for DMA addresses > 40-bit */
4011 static inline int tg3_40bit_overflow_test(struct tg3 *tp, dma_addr_t mapping,
4012                                           int len)
4013 {
4014 #if defined(CONFIG_HIGHMEM) && (BITS_PER_LONG == 64)
4015         if (tp->tg3_flags & TG3_FLAG_40BIT_DMA_BUG)
4016                 return (((u64) mapping + len) > DMA_40BIT_MASK);
4017         return 0;
4018 #else
4019         return 0;
4020 #endif
4021 }
4022
4023 static void tg3_set_txd(struct tg3 *, int, dma_addr_t, int, u32, u32);
4024
4025 /* Workaround 4GB and 40-bit hardware DMA bugs. */
4026 static int tigon3_dma_hwbug_workaround(struct tg3 *tp, struct sk_buff *skb,
4027                                        u32 last_plus_one, u32 *start,
4028                                        u32 base_flags, u32 mss)
4029 {
4030         struct sk_buff *new_skb = skb_copy(skb, GFP_ATOMIC);
4031         dma_addr_t new_addr = 0;
4032         u32 entry = *start;
4033         int i, ret = 0;
4034
4035         if (!new_skb) {
4036                 ret = -1;
4037         } else {
4038                 /* New SKB is guaranteed to be linear. */
4039                 entry = *start;
4040                 new_addr = pci_map_single(tp->pdev, new_skb->data, new_skb->len,
4041                                           PCI_DMA_TODEVICE);
4042                 /* Make sure new skb does not cross any 4G boundaries.
4043                  * Drop the packet if it does.
4044                  */
4045                 if (tg3_4g_overflow_test(new_addr, new_skb->len)) {
4046                         ret = -1;
4047                         dev_kfree_skb(new_skb);
4048                         new_skb = NULL;
4049                 } else {
4050                         tg3_set_txd(tp, entry, new_addr, new_skb->len,
4051                                     base_flags, 1 | (mss << 1));
4052                         *start = NEXT_TX(entry);
4053                 }
4054         }
4055
4056         /* Now clean up the sw ring entries. */
4057         i = 0;
4058         while (entry != last_plus_one) {
4059                 int len;
4060
4061                 if (i == 0)
4062                         len = skb_headlen(skb);
4063                 else
4064                         len = skb_shinfo(skb)->frags[i-1].size;
4065                 pci_unmap_single(tp->pdev,
4066                                  pci_unmap_addr(&tp->tx_buffers[entry], mapping),
4067                                  len, PCI_DMA_TODEVICE);
4068                 if (i == 0) {
4069                         tp->tx_buffers[entry].skb = new_skb;
4070                         pci_unmap_addr_set(&tp->tx_buffers[entry], mapping, new_addr);
4071                 } else {
4072                         tp->tx_buffers[entry].skb = NULL;
4073                 }
4074                 entry = NEXT_TX(entry);
4075                 i++;
4076         }
4077
4078         dev_kfree_skb(skb);
4079
4080         return ret;
4081 }
4082
4083 static void tg3_set_txd(struct tg3 *tp, int entry,
4084                         dma_addr_t mapping, int len, u32 flags,
4085                         u32 mss_and_is_end)
4086 {
4087         struct tg3_tx_buffer_desc *txd = &tp->tx_ring[entry];
4088         int is_end = (mss_and_is_end & 0x1);
4089         u32 mss = (mss_and_is_end >> 1);
4090         u32 vlan_tag = 0;
4091
4092         if (is_end)
4093                 flags |= TXD_FLAG_END;
4094         if (flags & TXD_FLAG_VLAN) {
4095                 vlan_tag = flags >> 16;
4096                 flags &= 0xffff;
4097         }
4098         vlan_tag |= (mss << TXD_MSS_SHIFT);
4099
4100         txd->addr_hi = ((u64) mapping >> 32);
4101         txd->addr_lo = ((u64) mapping & 0xffffffff);
4102         txd->len_flags = (len << TXD_LEN_SHIFT) | flags;
4103         txd->vlan_tag = vlan_tag << TXD_VLAN_TAG_SHIFT;
4104 }
4105
4106 /* hard_start_xmit for devices that don't have any bugs and
4107  * support TG3_FLG2_HW_TSO_2 only.
4108  */
4109 static int tg3_start_xmit(struct sk_buff *skb, struct net_device *dev)
4110 {
4111         struct tg3 *tp = netdev_priv(dev);
4112         dma_addr_t mapping;
4113         u32 len, entry, base_flags, mss;
4114
4115         len = skb_headlen(skb);
4116
4117         /* We are running in BH disabled context with netif_tx_lock
4118          * and TX reclaim runs via tp->napi.poll inside of a software
4119          * interrupt.  Furthermore, IRQ processing runs lockless so we have
4120          * no IRQ context deadlocks to worry about either.  Rejoice!
4121          */
4122         if (unlikely(tg3_tx_avail(tp) <= (skb_shinfo(skb)->nr_frags + 1))) {
4123                 if (!netif_queue_stopped(dev)) {
4124                         netif_stop_queue(dev);
4125
4126                         /* This is a hard error, log it. */
4127                         printk(KERN_ERR PFX "%s: BUG! Tx Ring full when "
4128                                "queue awake!\n", dev->name);
4129                 }
4130                 return NETDEV_TX_BUSY;
4131         }
4132
4133         entry = tp->tx_prod;
4134         base_flags = 0;
4135         mss = 0;
4136         if ((mss = skb_shinfo(skb)->gso_size) != 0) {
4137                 int tcp_opt_len, ip_tcp_len;
4138
4139                 if (skb_header_cloned(skb) &&
4140                     pskb_expand_head(skb, 0, 0, GFP_ATOMIC)) {
4141                         dev_kfree_skb(skb);
4142                         goto out_unlock;
4143                 }
4144
4145                 if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6)
4146                         mss |= (skb_headlen(skb) - ETH_HLEN) << 9;
4147                 else {
4148                         struct iphdr *iph = ip_hdr(skb);
4149
4150                         tcp_opt_len = tcp_optlen(skb);
4151                         ip_tcp_len = ip_hdrlen(skb) + sizeof(struct tcphdr);
4152
4153                         iph->check = 0;
4154                         iph->tot_len = htons(mss + ip_tcp_len + tcp_opt_len);
4155                         mss |= (ip_tcp_len + tcp_opt_len) << 9;
4156                 }
4157
4158                 base_flags |= (TXD_FLAG_CPU_PRE_DMA |
4159                                TXD_FLAG_CPU_POST_DMA);
4160
4161                 tcp_hdr(skb)->check = 0;
4162
4163         }
4164         else if (skb->ip_summed == CHECKSUM_PARTIAL)
4165                 base_flags |= TXD_FLAG_TCPUDP_CSUM;
4166 #if TG3_VLAN_TAG_USED
4167         if (tp->vlgrp != NULL && vlan_tx_tag_present(skb))
4168                 base_flags |= (TXD_FLAG_VLAN |
4169                                (vlan_tx_tag_get(skb) << 16));
4170 #endif
4171
4172         /* Queue skb data, a.k.a. the main skb fragment. */
4173         mapping = pci_map_single(tp->pdev, skb->data, len, PCI_DMA_TODEVICE);
4174
4175         tp->tx_buffers[entry].skb = skb;
4176         pci_unmap_addr_set(&tp->tx_buffers[entry], mapping, mapping);
4177
4178         tg3_set_txd(tp, entry, mapping, len, base_flags,
4179                     (skb_shinfo(skb)->nr_frags == 0) | (mss << 1));
4180
4181         entry = NEXT_TX(entry);
4182
4183         /* Now loop through additional data fragments, and queue them. */
4184         if (skb_shinfo(skb)->nr_frags > 0) {
4185                 unsigned int i, last;
4186
4187                 last = skb_shinfo(skb)->nr_frags - 1;
4188                 for (i = 0; i <= last; i++) {
4189                         skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
4190
4191                         len = frag->size;
4192                         mapping = pci_map_page(tp->pdev,
4193                                                frag->page,
4194                                                frag->page_offset,
4195                                                len, PCI_DMA_TODEVICE);
4196
4197                         tp->tx_buffers[entry].skb = NULL;
4198                         pci_unmap_addr_set(&tp->tx_buffers[entry], mapping, mapping);
4199
4200                         tg3_set_txd(tp, entry, mapping, len,
4201                                     base_flags, (i == last) | (mss << 1));
4202
4203                         entry = NEXT_TX(entry);
4204                 }
4205         }
4206
4207         /* Packets are ready, update Tx producer idx local and on card. */
4208         tw32_tx_mbox((MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW), entry);
4209
4210         tp->tx_prod = entry;
4211         if (unlikely(tg3_tx_avail(tp) <= (MAX_SKB_FRAGS + 1))) {
4212                 netif_stop_queue(dev);
4213                 if (tg3_tx_avail(tp) > TG3_TX_WAKEUP_THRESH(tp))
4214                         netif_wake_queue(tp->dev);
4215         }
4216
4217 out_unlock:
4218         mmiowb();
4219
4220         dev->trans_start = jiffies;
4221
4222         return NETDEV_TX_OK;
4223 }
4224
4225 static int tg3_start_xmit_dma_bug(struct sk_buff *, struct net_device *);
4226
4227 /* Use GSO to workaround a rare TSO bug that may be triggered when the
4228  * TSO header is greater than 80 bytes.
4229  */
4230 static int tg3_tso_bug(struct tg3 *tp, struct sk_buff *skb)
4231 {
4232         struct sk_buff *segs, *nskb;
4233
4234         /* Estimate the number of fragments in the worst case */
4235         if (unlikely(tg3_tx_avail(tp) <= (skb_shinfo(skb)->gso_segs * 3))) {
4236                 netif_stop_queue(tp->dev);
4237                 if (tg3_tx_avail(tp) <= (skb_shinfo(skb)->gso_segs * 3))
4238                         return NETDEV_TX_BUSY;
4239
4240                 netif_wake_queue(tp->dev);
4241         }
4242
4243         segs = skb_gso_segment(skb, tp->dev->features & ~NETIF_F_TSO);
4244         if (unlikely(IS_ERR(segs)))
4245                 goto tg3_tso_bug_end;
4246
4247         do {
4248                 nskb = segs;
4249                 segs = segs->next;
4250                 nskb->next = NULL;
4251                 tg3_start_xmit_dma_bug(nskb, tp->dev);
4252         } while (segs);
4253
4254 tg3_tso_bug_end:
4255         dev_kfree_skb(skb);
4256
4257         return NETDEV_TX_OK;
4258 }
4259
4260 /* hard_start_xmit for devices that have the 4G bug and/or 40-bit bug and
4261  * support TG3_FLG2_HW_TSO_1 or firmware TSO only.
4262  */
4263 static int tg3_start_xmit_dma_bug(struct sk_buff *skb, struct net_device *dev)
4264 {
4265         struct tg3 *tp = netdev_priv(dev);
4266         dma_addr_t mapping;
4267         u32 len, entry, base_flags, mss;
4268         int would_hit_hwbug;
4269
4270         len = skb_headlen(skb);
4271
4272         /* We are running in BH disabled context with netif_tx_lock
4273          * and TX reclaim runs via tp->napi.poll inside of a software
4274          * interrupt.  Furthermore, IRQ processing runs lockless so we have
4275          * no IRQ context deadlocks to worry about either.  Rejoice!
4276          */
4277         if (unlikely(tg3_tx_avail(tp) <= (skb_shinfo(skb)->nr_frags + 1))) {
4278                 if (!netif_queue_stopped(dev)) {
4279                         netif_stop_queue(dev);
4280
4281                         /* This is a hard error, log it. */
4282                         printk(KERN_ERR PFX "%s: BUG! Tx Ring full when "
4283                                "queue awake!\n", dev->name);
4284                 }
4285                 return NETDEV_TX_BUSY;
4286         }
4287
4288         entry = tp->tx_prod;
4289         base_flags = 0;
4290         if (skb->ip_summed == CHECKSUM_PARTIAL)
4291                 base_flags |= TXD_FLAG_TCPUDP_CSUM;
4292         mss = 0;
4293         if ((mss = skb_shinfo(skb)->gso_size) != 0) {
4294                 struct iphdr *iph;
4295                 int tcp_opt_len, ip_tcp_len, hdr_len;
4296
4297                 if (skb_header_cloned(skb) &&
4298                     pskb_expand_head(skb, 0, 0, GFP_ATOMIC)) {
4299                         dev_kfree_skb(skb);
4300                         goto out_unlock;
4301                 }
4302
4303                 tcp_opt_len = tcp_optlen(skb);
4304                 ip_tcp_len = ip_hdrlen(skb) + sizeof(struct tcphdr);
4305
4306                 hdr_len = ip_tcp_len + tcp_opt_len;
4307                 if (unlikely((ETH_HLEN + hdr_len) > 80) &&
4308                              (tp->tg3_flags2 & TG3_FLG2_TSO_BUG))
4309                         return (tg3_tso_bug(tp, skb));
4310
4311                 base_flags |= (TXD_FLAG_CPU_PRE_DMA |
4312                                TXD_FLAG_CPU_POST_DMA);
4313
4314                 iph = ip_hdr(skb);
4315                 iph->check = 0;
4316                 iph->tot_len = htons(mss + hdr_len);
4317                 if (tp->tg3_flags2 & TG3_FLG2_HW_TSO) {
4318                         tcp_hdr(skb)->check = 0;
4319                         base_flags &= ~TXD_FLAG_TCPUDP_CSUM;
4320                 } else
4321                         tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr,
4322                                                                  iph->daddr, 0,
4323                                                                  IPPROTO_TCP,
4324                                                                  0);
4325
4326                 if ((tp->tg3_flags2 & TG3_FLG2_HW_TSO) ||
4327                     (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705)) {
4328                         if (tcp_opt_len || iph->ihl > 5) {
4329                                 int tsflags;
4330
4331                                 tsflags = (iph->ihl - 5) + (tcp_opt_len >> 2);
4332                                 mss |= (tsflags << 11);
4333                         }
4334                 } else {
4335                         if (tcp_opt_len || iph->ihl > 5) {
4336                                 int tsflags;
4337
4338                                 tsflags = (iph->ihl - 5) + (tcp_opt_len >> 2);
4339                                 base_flags |= tsflags << 12;
4340                         }
4341                 }
4342         }
4343 #if TG3_VLAN_TAG_USED
4344         if (tp->vlgrp != NULL && vlan_tx_tag_present(skb))
4345                 base_flags |= (TXD_FLAG_VLAN |
4346                                (vlan_tx_tag_get(skb) << 16));
4347 #endif
4348
4349         /* Queue skb data, a.k.a. the main skb fragment. */
4350         mapping = pci_map_single(tp->pdev, skb->data, len, PCI_DMA_TODEVICE);
4351
4352         tp->tx_buffers[entry].skb = skb;
4353         pci_unmap_addr_set(&tp->tx_buffers[entry], mapping, mapping);
4354
4355         would_hit_hwbug = 0;
4356
4357         if (tg3_4g_overflow_test(mapping, len))
4358                 would_hit_hwbug = 1;
4359
4360         tg3_set_txd(tp, entry, mapping, len, base_flags,
4361                     (skb_shinfo(skb)->nr_frags == 0) | (mss << 1));
4362
4363         entry = NEXT_TX(entry);
4364
4365         /* Now loop through additional data fragments, and queue them. */
4366         if (skb_shinfo(skb)->nr_frags > 0) {
4367                 unsigned int i, last;
4368
4369                 last = skb_shinfo(skb)->nr_frags - 1;
4370                 for (i = 0; i <= last; i++) {
4371                         skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
4372
4373                         len = frag->size;
4374                         mapping = pci_map_page(tp->pdev,
4375                                                frag->page,
4376                                                frag->page_offset,
4377                                                len, PCI_DMA_TODEVICE);
4378
4379                         tp->tx_buffers[entry].skb = NULL;
4380                         pci_unmap_addr_set(&tp->tx_buffers[entry], mapping, mapping);
4381
4382                         if (tg3_4g_overflow_test(mapping, len))
4383                                 would_hit_hwbug = 1;
4384
4385                         if (tg3_40bit_overflow_test(tp, mapping, len))
4386                                 would_hit_hwbug = 1;
4387
4388                         if (tp->tg3_flags2 & TG3_FLG2_HW_TSO)
4389                                 tg3_set_txd(tp, entry, mapping, len,
4390                                             base_flags, (i == last)|(mss << 1));
4391                         else
4392                                 tg3_set_txd(tp, entry, mapping, len,
4393                                             base_flags, (i == last));
4394
4395                         entry = NEXT_TX(entry);
4396                 }
4397         }
4398
4399         if (would_hit_hwbug) {
4400                 u32 last_plus_one = entry;
4401                 u32 start;
4402
4403                 start = entry - 1 - skb_shinfo(skb)->nr_frags;
4404                 start &= (TG3_TX_RING_SIZE - 1);
4405
4406                 /* If the workaround fails due to memory/mapping
4407                  * failure, silently drop this packet.
4408                  */
4409                 if (tigon3_dma_hwbug_workaround(tp, skb, last_plus_one,
4410                                                 &start, base_flags, mss))
4411                         goto out_unlock;
4412
4413                 entry = start;
4414         }
4415
4416         /* Packets are ready, update Tx producer idx local and on card. */
4417         tw32_tx_mbox((MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW), entry);
4418
4419         tp->tx_prod = entry;
4420         if (unlikely(tg3_tx_avail(tp) <= (MAX_SKB_FRAGS + 1))) {
4421                 netif_stop_queue(dev);
4422                 if (tg3_tx_avail(tp) > TG3_TX_WAKEUP_THRESH(tp))
4423                         netif_wake_queue(tp->dev);
4424         }
4425
4426 out_unlock:
4427         mmiowb();
4428
4429         dev->trans_start = jiffies;
4430
4431         return NETDEV_TX_OK;
4432 }
4433
4434 static inline void tg3_set_mtu(struct net_device *dev, struct tg3 *tp,
4435                                int new_mtu)
4436 {
4437         dev->mtu = new_mtu;
4438
4439         if (new_mtu > ETH_DATA_LEN) {
4440                 if (tp->tg3_flags2 & TG3_FLG2_5780_CLASS) {
4441                         tp->tg3_flags2 &= ~TG3_FLG2_TSO_CAPABLE;
4442                         ethtool_op_set_tso(dev, 0);
4443                 }
4444                 else
4445                         tp->tg3_flags |= TG3_FLAG_JUMBO_RING_ENABLE;
4446         } else {
4447                 if (tp->tg3_flags2 & TG3_FLG2_5780_CLASS)
4448                         tp->tg3_flags2 |= TG3_FLG2_TSO_CAPABLE;
4449                 tp->tg3_flags &= ~TG3_FLAG_JUMBO_RING_ENABLE;
4450         }
4451 }
4452
4453 static int tg3_change_mtu(struct net_device *dev, int new_mtu)
4454 {
4455         struct tg3 *tp = netdev_priv(dev);
4456         int err;
4457
4458         if (new_mtu < TG3_MIN_MTU || new_mtu > TG3_MAX_MTU(tp))
4459                 return -EINVAL;
4460
4461         if (!netif_running(dev)) {
4462                 /* We'll just catch it later when the
4463                  * device is up'd.
4464                  */
4465                 tg3_set_mtu(dev, tp, new_mtu);
4466                 return 0;
4467         }
4468
4469         tg3_netif_stop(tp);
4470
4471         tg3_full_lock(tp, 1);
4472
4473         tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
4474
4475         tg3_set_mtu(dev, tp, new_mtu);
4476
4477         err = tg3_restart_hw(tp, 0);
4478
4479         if (!err)
4480                 tg3_netif_start(tp);
4481
4482         tg3_full_unlock(tp);
4483
4484         return err;
4485 }
4486
4487 /* Free up pending packets in all rx/tx rings.
4488  *
4489  * The chip has been shut down and the driver detached from
4490  * the networking, so no interrupts or new tx packets will
4491  * end up in the driver.  tp->{tx,}lock is not held and we are not
4492  * in an interrupt context and thus may sleep.
4493  */
4494 static void tg3_free_rings(struct tg3 *tp)
4495 {
4496         struct ring_info *rxp;
4497         int i;
4498
4499         for (i = 0; i < TG3_RX_RING_SIZE; i++) {
4500                 rxp = &tp->rx_std_buffers[i];
4501
4502                 if (rxp->skb == NULL)
4503                         continue;
4504                 pci_unmap_single(tp->pdev,
4505                                  pci_unmap_addr(rxp, mapping),
4506                                  tp->rx_pkt_buf_sz - tp->rx_offset,
4507                                  PCI_DMA_FROMDEVICE);
4508                 dev_kfree_skb_any(rxp->skb);
4509                 rxp->skb = NULL;
4510         }
4511
4512         for (i = 0; i < TG3_RX_JUMBO_RING_SIZE; i++) {
4513                 rxp = &tp->rx_jumbo_buffers[i];
4514
4515                 if (rxp->skb == NULL)
4516                         continue;
4517                 pci_unmap_single(tp->pdev,
4518                                  pci_unmap_addr(rxp, mapping),
4519                                  RX_JUMBO_PKT_BUF_SZ - tp->rx_offset,
4520                                  PCI_DMA_FROMDEVICE);
4521                 dev_kfree_skb_any(rxp->skb);
4522                 rxp->skb = NULL;
4523         }
4524
4525         for (i = 0; i < TG3_TX_RING_SIZE; ) {
4526                 struct tx_ring_info *txp;
4527                 struct sk_buff *skb;
4528                 int j;
4529
4530                 txp = &tp->tx_buffers[i];
4531                 skb = txp->skb;
4532
4533                 if (skb == NULL) {
4534                         i++;
4535                         continue;
4536                 }
4537
4538                 pci_unmap_single(tp->pdev,
4539                                  pci_unmap_addr(txp, mapping),
4540                                  skb_headlen(skb),
4541                                  PCI_DMA_TODEVICE);
4542                 txp->skb = NULL;
4543
4544                 i++;
4545
4546                 for (j = 0; j < skb_shinfo(skb)->nr_frags; j++) {
4547                         txp = &tp->tx_buffers[i & (TG3_TX_RING_SIZE - 1)];
4548                         pci_unmap_page(tp->pdev,
4549                                        pci_unmap_addr(txp, mapping),
4550                                        skb_shinfo(skb)->frags[j].size,
4551                                        PCI_DMA_TODEVICE);
4552                         i++;
4553                 }
4554
4555                 dev_kfree_skb_any(skb);
4556         }
4557 }
4558
4559 /* Initialize tx/rx rings for packet processing.
4560  *
4561  * The chip has been shut down and the driver detached from
4562  * the networking, so no interrupts or new tx packets will
4563  * end up in the driver.  tp->{tx,}lock are held and thus
4564  * we may not sleep.
4565  */
4566 static int tg3_init_rings(struct tg3 *tp)
4567 {
4568         u32 i;
4569
4570         /* Free up all the SKBs. */
4571         tg3_free_rings(tp);
4572
4573         /* Zero out all descriptors. */
4574         memset(tp->rx_std, 0, TG3_RX_RING_BYTES);
4575         memset(tp->rx_jumbo, 0, TG3_RX_JUMBO_RING_BYTES);
4576         memset(tp->rx_rcb, 0, TG3_RX_RCB_RING_BYTES(tp));
4577         memset(tp->tx_ring, 0, TG3_TX_RING_BYTES);
4578
4579         tp->rx_pkt_buf_sz = RX_PKT_BUF_SZ;
4580         if ((tp->tg3_flags2 & TG3_FLG2_5780_CLASS) &&
4581             (tp->dev->mtu > ETH_DATA_LEN))
4582                 tp->rx_pkt_buf_sz = RX_JUMBO_PKT_BUF_SZ;
4583
4584         /* Initialize invariants of the rings, we only set this
4585          * stuff once.  This works because the card does not
4586          * write into the rx buffer posting rings.
4587          */
4588         for (i = 0; i < TG3_RX_RING_SIZE; i++) {
4589                 struct tg3_rx_buffer_desc *rxd;
4590
4591                 rxd = &tp->rx_std[i];
4592                 rxd->idx_len = (tp->rx_pkt_buf_sz - tp->rx_offset - 64)
4593                         << RXD_LEN_SHIFT;
4594                 rxd->type_flags = (RXD_FLAG_END << RXD_FLAGS_SHIFT);
4595                 rxd->opaque = (RXD_OPAQUE_RING_STD |
4596                                (i << RXD_OPAQUE_INDEX_SHIFT));
4597         }
4598
4599         if (tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE) {
4600                 for (i = 0; i < TG3_RX_JUMBO_RING_SIZE; i++) {
4601                         struct tg3_rx_buffer_desc *rxd;
4602
4603                         rxd = &tp->rx_jumbo[i];
4604                         rxd->idx_len = (RX_JUMBO_PKT_BUF_SZ - tp->rx_offset - 64)
4605                                 << RXD_LEN_SHIFT;
4606                         rxd->type_flags = (RXD_FLAG_END << RXD_FLAGS_SHIFT) |
4607                                 RXD_FLAG_JUMBO;
4608                         rxd->opaque = (RXD_OPAQUE_RING_JUMBO |
4609                                (i << RXD_OPAQUE_INDEX_SHIFT));
4610                 }
4611         }
4612
4613         /* Now allocate fresh SKBs for each rx ring. */
4614         for (i = 0; i < tp->rx_pending; i++) {
4615                 if (tg3_alloc_rx_skb(tp, RXD_OPAQUE_RING_STD, -1, i) < 0) {
4616                         printk(KERN_WARNING PFX
4617                                "%s: Using a smaller RX standard ring, "
4618                                "only %d out of %d buffers were allocated "
4619                                "successfully.\n",
4620                                tp->dev->name, i, tp->rx_pending);
4621                         if (i == 0)
4622                                 return -ENOMEM;
4623                         tp->rx_pending = i;
4624                         break;
4625                 }
4626         }
4627
4628         if (tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE) {
4629                 for (i = 0; i < tp->rx_jumbo_pending; i++) {
4630                         if (tg3_alloc_rx_skb(tp, RXD_OPAQUE_RING_JUMBO,
4631                                              -1, i) < 0) {
4632                                 printk(KERN_WARNING PFX
4633                                        "%s: Using a smaller RX jumbo ring, "
4634                                        "only %d out of %d buffers were "
4635                                        "allocated successfully.\n",
4636                                        tp->dev->name, i, tp->rx_jumbo_pending);
4637                                 if (i == 0) {
4638                                         tg3_free_rings(tp);
4639                                         return -ENOMEM;
4640                                 }
4641                                 tp->rx_jumbo_pending = i;
4642                                 break;
4643                         }
4644                 }
4645         }
4646         return 0;
4647 }
4648
4649 /*
4650  * Must not be invoked with interrupt sources disabled and
4651  * the hardware shutdown down.
4652  */
4653 static void tg3_free_consistent(struct tg3 *tp)
4654 {
4655         kfree(tp->rx_std_buffers);
4656         tp->rx_std_buffers = NULL;
4657         if (tp->rx_std) {
4658                 pci_free_consistent(tp->pdev, TG3_RX_RING_BYTES,
4659                                     tp->rx_std, tp->rx_std_mapping);
4660                 tp->rx_std = NULL;
4661         }
4662         if (tp->rx_jumbo) {
4663                 pci_free_consistent(tp->pdev, TG3_RX_JUMBO_RING_BYTES,
4664                                     tp->rx_jumbo, tp->rx_jumbo_mapping);
4665                 tp->rx_jumbo = NULL;
4666         }
4667         if (tp->rx_rcb) {
4668                 pci_free_consistent(tp->pdev, TG3_RX_RCB_RING_BYTES(tp),
4669                                     tp->rx_rcb, tp->rx_rcb_mapping);
4670                 tp->rx_rcb = NULL;
4671         }
4672         if (tp->tx_ring) {
4673                 pci_free_consistent(tp->pdev, TG3_TX_RING_BYTES,
4674                         tp->tx_ring, tp->tx_desc_mapping);
4675                 tp->tx_ring = NULL;
4676         }
4677         if (tp->hw_status) {
4678                 pci_free_consistent(tp->pdev, TG3_HW_STATUS_SIZE,
4679                                     tp->hw_status, tp->status_mapping);
4680                 tp->hw_status = NULL;
4681         }
4682         if (tp->hw_stats) {
4683                 pci_free_consistent(tp->pdev, sizeof(struct tg3_hw_stats),
4684                                     tp->hw_stats, tp->stats_mapping);
4685                 tp->hw_stats = NULL;
4686         }
4687 }
4688
4689 /*
4690  * Must not be invoked with interrupt sources disabled and
4691  * the hardware shutdown down.  Can sleep.
4692  */
4693 static int tg3_alloc_consistent(struct tg3 *tp)
4694 {
4695         tp->rx_std_buffers = kzalloc((sizeof(struct ring_info) *
4696                                       (TG3_RX_RING_SIZE +
4697                                        TG3_RX_JUMBO_RING_SIZE)) +
4698                                      (sizeof(struct tx_ring_info) *
4699                                       TG3_TX_RING_SIZE),
4700                                      GFP_KERNEL);
4701         if (!tp->rx_std_buffers)
4702                 return -ENOMEM;
4703
4704         tp->rx_jumbo_buffers = &tp->rx_std_buffers[TG3_RX_RING_SIZE];
4705         tp->tx_buffers = (struct tx_ring_info *)
4706                 &tp->rx_jumbo_buffers[TG3_RX_JUMBO_RING_SIZE];
4707
4708         tp->rx_std = pci_alloc_consistent(tp->pdev, TG3_RX_RING_BYTES,
4709                                           &tp->rx_std_mapping);
4710         if (!tp->rx_std)
4711                 goto err_out;
4712
4713         tp->rx_jumbo = pci_alloc_consistent(tp->pdev, TG3_RX_JUMBO_RING_BYTES,
4714                                             &tp->rx_jumbo_mapping);
4715
4716         if (!tp->rx_jumbo)
4717                 goto err_out;
4718
4719         tp->rx_rcb = pci_alloc_consistent(tp->pdev, TG3_RX_RCB_RING_BYTES(tp),
4720                                           &tp->rx_rcb_mapping);
4721         if (!tp->rx_rcb)
4722                 goto err_out;
4723
4724         tp->tx_ring = pci_alloc_consistent(tp->pdev, TG3_TX_RING_BYTES,
4725                                            &tp->tx_desc_mapping);
4726         if (!tp->tx_ring)
4727                 goto err_out;
4728
4729         tp->hw_status = pci_alloc_consistent(tp->pdev,
4730                                              TG3_HW_STATUS_SIZE,
4731                                              &tp->status_mapping);
4732         if (!tp->hw_status)
4733                 goto err_out;
4734
4735         tp->hw_stats = pci_alloc_consistent(tp->pdev,
4736                                             sizeof(struct tg3_hw_stats),
4737                                             &tp->stats_mapping);
4738         if (!tp->hw_stats)
4739                 goto err_out;
4740
4741         memset(tp->hw_status, 0, TG3_HW_STATUS_SIZE);
4742         memset(tp->hw_stats, 0, sizeof(struct tg3_hw_stats));
4743
4744         return 0;
4745
4746 err_out:
4747         tg3_free_consistent(tp);
4748         return -ENOMEM;
4749 }
4750
4751 #define MAX_WAIT_CNT 1000
4752
4753 /* To stop a block, clear the enable bit and poll till it
4754  * clears.  tp->lock is held.
4755  */
4756 static int tg3_stop_block(struct tg3 *tp, unsigned long ofs, u32 enable_bit, int silent)
4757 {
4758         unsigned int i;
4759         u32 val;
4760
4761         if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
4762                 switch (ofs) {
4763                 case RCVLSC_MODE:
4764                 case DMAC_MODE:
4765                 case MBFREE_MODE:
4766                 case BUFMGR_MODE:
4767                 case MEMARB_MODE:
4768                         /* We can't enable/disable these bits of the
4769                          * 5705/5750, just say success.
4770                          */
4771                         return 0;
4772
4773                 default:
4774                         break;
4775                 };
4776         }
4777
4778         val = tr32(ofs);
4779         val &= ~enable_bit;
4780         tw32_f(ofs, val);
4781
4782         for (i = 0; i < MAX_WAIT_CNT; i++) {
4783                 udelay(100);
4784                 val = tr32(ofs);
4785                 if ((val & enable_bit) == 0)
4786                         break;
4787         }
4788
4789         if (i == MAX_WAIT_CNT && !silent) {
4790                 printk(KERN_ERR PFX "tg3_stop_block timed out, "
4791                        "ofs=%lx enable_bit=%x\n",
4792                        ofs, enable_bit);
4793                 return -ENODEV;
4794         }
4795
4796         return 0;
4797 }
4798
4799 /* tp->lock is held. */
4800 static int tg3_abort_hw(struct tg3 *tp, int silent)
4801 {
4802         int i, err;
4803
4804         tg3_disable_ints(tp);
4805
4806         tp->rx_mode &= ~RX_MODE_ENABLE;
4807         tw32_f(MAC_RX_MODE, tp->rx_mode);
4808         udelay(10);
4809
4810         err  = tg3_stop_block(tp, RCVBDI_MODE, RCVBDI_MODE_ENABLE, silent);
4811         err |= tg3_stop_block(tp, RCVLPC_MODE, RCVLPC_MODE_ENABLE, silent);
4812         err |= tg3_stop_block(tp, RCVLSC_MODE, RCVLSC_MODE_ENABLE, silent);
4813         err |= tg3_stop_block(tp, RCVDBDI_MODE, RCVDBDI_MODE_ENABLE, silent);
4814         err |= tg3_stop_block(tp, RCVDCC_MODE, RCVDCC_MODE_ENABLE, silent);
4815         err |= tg3_stop_block(tp, RCVCC_MODE, RCVCC_MODE_ENABLE, silent);
4816
4817         err |= tg3_stop_block(tp, SNDBDS_MODE, SNDBDS_MODE_ENABLE, silent);
4818         err |= tg3_stop_block(tp, SNDBDI_MODE, SNDBDI_MODE_ENABLE, silent);
4819         err |= tg3_stop_block(tp, SNDDATAI_MODE, SNDDATAI_MODE_ENABLE, silent);
4820         err |= tg3_stop_block(tp, RDMAC_MODE, RDMAC_MODE_ENABLE, silent);
4821         err |= tg3_stop_block(tp, SNDDATAC_MODE, SNDDATAC_MODE_ENABLE, silent);
4822         err |= tg3_stop_block(tp, DMAC_MODE, DMAC_MODE_ENABLE, silent);
4823         err |= tg3_stop_block(tp, SNDBDC_MODE, SNDBDC_MODE_ENABLE, silent);
4824
4825         tp->mac_mode &= ~MAC_MODE_TDE_ENABLE;
4826         tw32_f(MAC_MODE, tp->mac_mode);
4827         udelay(40);
4828
4829         tp->tx_mode &= ~TX_MODE_ENABLE;
4830         tw32_f(MAC_TX_MODE, tp->tx_mode);
4831
4832         for (i = 0; i < MAX_WAIT_CNT; i++) {
4833                 udelay(100);
4834                 if (!(tr32(MAC_TX_MODE) & TX_MODE_ENABLE))
4835                         break;
4836         }
4837         if (i >= MAX_WAIT_CNT) {
4838                 printk(KERN_ERR PFX "tg3_abort_hw timed out for %s, "
4839                        "TX_MODE_ENABLE will not clear MAC_TX_MODE=%08x\n",
4840                        tp->dev->name, tr32(MAC_TX_MODE));
4841                 err |= -ENODEV;
4842         }
4843
4844         err |= tg3_stop_block(tp, HOSTCC_MODE, HOSTCC_MODE_ENABLE, silent);
4845         err |= tg3_stop_block(tp, WDMAC_MODE, WDMAC_MODE_ENABLE, silent);
4846         err |= tg3_stop_block(tp, MBFREE_MODE, MBFREE_MODE_ENABLE, silent);
4847
4848         tw32(FTQ_RESET, 0xffffffff);
4849         tw32(FTQ_RESET, 0x00000000);
4850
4851         err |= tg3_stop_block(tp, BUFMGR_MODE, BUFMGR_MODE_ENABLE, silent);
4852         err |= tg3_stop_block(tp, MEMARB_MODE, MEMARB_MODE_ENABLE, silent);
4853
4854         if (tp->hw_status)
4855                 memset(tp->hw_status, 0, TG3_HW_STATUS_SIZE);
4856         if (tp->hw_stats)
4857                 memset(tp->hw_stats, 0, sizeof(struct tg3_hw_stats));
4858
4859         return err;
4860 }
4861
4862 /* tp->lock is held. */
4863 static int tg3_nvram_lock(struct tg3 *tp)
4864 {
4865         if (tp->tg3_flags & TG3_FLAG_NVRAM) {
4866                 int i;
4867
4868                 if (tp->nvram_lock_cnt == 0) {
4869                         tw32(NVRAM_SWARB, SWARB_REQ_SET1);
4870                         for (i = 0; i < 8000; i++) {
4871                                 if (tr32(NVRAM_SWARB) & SWARB_GNT1)
4872                                         break;
4873                                 udelay(20);
4874                         }
4875                         if (i == 8000) {
4876                                 tw32(NVRAM_SWARB, SWARB_REQ_CLR1);
4877                                 return -ENODEV;
4878                         }
4879                 }
4880                 tp->nvram_lock_cnt++;
4881         }
4882         return 0;
4883 }
4884
4885 /* tp->lock is held. */
4886 static void tg3_nvram_unlock(struct tg3 *tp)
4887 {
4888         if (tp->tg3_flags & TG3_FLAG_NVRAM) {
4889                 if (tp->nvram_lock_cnt > 0)
4890                         tp->nvram_lock_cnt--;
4891                 if (tp->nvram_lock_cnt == 0)
4892                         tw32_f(NVRAM_SWARB, SWARB_REQ_CLR1);
4893         }
4894 }
4895
4896 /* tp->lock is held. */
4897 static void tg3_enable_nvram_access(struct tg3 *tp)
4898 {
4899         if ((tp->tg3_flags2 & TG3_FLG2_5750_PLUS) &&
4900             !(tp->tg3_flags2 & TG3_FLG2_PROTECTED_NVRAM)) {
4901                 u32 nvaccess = tr32(NVRAM_ACCESS);
4902
4903                 tw32(NVRAM_ACCESS, nvaccess | ACCESS_ENABLE);
4904         }
4905 }
4906
4907 /* tp->lock is held. */
4908 static void tg3_disable_nvram_access(struct tg3 *tp)
4909 {
4910         if ((tp->tg3_flags2 & TG3_FLG2_5750_PLUS) &&
4911             !(tp->tg3_flags2 & TG3_FLG2_PROTECTED_NVRAM)) {
4912                 u32 nvaccess = tr32(NVRAM_ACCESS);
4913
4914                 tw32(NVRAM_ACCESS, nvaccess & ~ACCESS_ENABLE);
4915         }
4916 }
4917
4918 static void tg3_ape_send_event(struct tg3 *tp, u32 event)
4919 {
4920         int i;
4921         u32 apedata;
4922
4923         apedata = tg3_ape_read32(tp, TG3_APE_SEG_SIG);
4924         if (apedata != APE_SEG_SIG_MAGIC)
4925                 return;
4926
4927         apedata = tg3_ape_read32(tp, TG3_APE_FW_STATUS);
4928         if (apedata != APE_FW_STATUS_READY)
4929                 return;
4930
4931         /* Wait for up to 1 millisecond for APE to service previous event. */
4932         for (i = 0; i < 10; i++) {
4933                 if (tg3_ape_lock(tp, TG3_APE_LOCK_MEM))
4934                         return;
4935
4936                 apedata = tg3_ape_read32(tp, TG3_APE_EVENT_STATUS);
4937
4938                 if (!(apedata & APE_EVENT_STATUS_EVENT_PENDING))
4939                         tg3_ape_write32(tp, TG3_APE_EVENT_STATUS,
4940                                         event | APE_EVENT_STATUS_EVENT_PENDING);
4941
4942                 tg3_ape_unlock(tp, TG3_APE_LOCK_MEM);
4943
4944                 if (!(apedata & APE_EVENT_STATUS_EVENT_PENDING))
4945                         break;
4946
4947                 udelay(100);
4948         }
4949
4950         if (!(apedata & APE_EVENT_STATUS_EVENT_PENDING))
4951                 tg3_ape_write32(tp, TG3_APE_EVENT, APE_EVENT_1);
4952 }
4953
4954 static void tg3_ape_driver_state_change(struct tg3 *tp, int kind)
4955 {
4956         u32 event;
4957         u32 apedata;
4958
4959         if (!(tp->tg3_flags3 & TG3_FLG3_ENABLE_APE))
4960                 return;
4961
4962         switch (kind) {
4963                 case RESET_KIND_INIT:
4964                         tg3_ape_write32(tp, TG3_APE_HOST_SEG_SIG,
4965                                         APE_HOST_SEG_SIG_MAGIC);
4966                         tg3_ape_write32(tp, TG3_APE_HOST_SEG_LEN,
4967                                         APE_HOST_SEG_LEN_MAGIC);
4968                         apedata = tg3_ape_read32(tp, TG3_APE_HOST_INIT_COUNT);
4969                         tg3_ape_write32(tp, TG3_APE_HOST_INIT_COUNT, ++apedata);
4970                         tg3_ape_write32(tp, TG3_APE_HOST_DRIVER_ID,
4971                                         APE_HOST_DRIVER_ID_MAGIC);
4972                         tg3_ape_write32(tp, TG3_APE_HOST_BEHAVIOR,
4973                                         APE_HOST_BEHAV_NO_PHYLOCK);
4974
4975                         event = APE_EVENT_STATUS_STATE_START;
4976                         break;
4977                 case RESET_KIND_SHUTDOWN:
4978                         event = APE_EVENT_STATUS_STATE_UNLOAD;
4979                         break;
4980                 case RESET_KIND_SUSPEND:
4981                         event = APE_EVENT_STATUS_STATE_SUSPEND;
4982                         break;
4983                 default:
4984                         return;
4985         }
4986
4987         event |= APE_EVENT_STATUS_DRIVER_EVNT | APE_EVENT_STATUS_STATE_CHNGE;
4988
4989         tg3_ape_send_event(tp, event);
4990 }
4991
4992 /* tp->lock is held. */
4993 static void tg3_write_sig_pre_reset(struct tg3 *tp, int kind)
4994 {
4995         tg3_write_mem(tp, NIC_SRAM_FIRMWARE_MBOX,
4996                       NIC_SRAM_FIRMWARE_MBOX_MAGIC1);
4997
4998         if (tp->tg3_flags2 & TG3_FLG2_ASF_NEW_HANDSHAKE) {
4999                 switch (kind) {
5000                 case RESET_KIND_INIT:
5001                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
5002                                       DRV_STATE_START);
5003                         break;
5004
5005                 case RESET_KIND_SHUTDOWN:
5006                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
5007                                       DRV_STATE_UNLOAD);
5008                         break;
5009
5010                 case RESET_KIND_SUSPEND:
5011                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
5012                                       DRV_STATE_SUSPEND);
5013                         break;
5014
5015                 default:
5016                         break;
5017                 };
5018         }
5019
5020         if (kind == RESET_KIND_INIT ||
5021             kind == RESET_KIND_SUSPEND)
5022                 tg3_ape_driver_state_change(tp, kind);
5023 }
5024
5025 /* tp->lock is held. */
5026 static void tg3_write_sig_post_reset(struct tg3 *tp, int kind)
5027 {
5028         if (tp->tg3_flags2 & TG3_FLG2_ASF_NEW_HANDSHAKE) {
5029                 switch (kind) {
5030                 case RESET_KIND_INIT:
5031                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
5032                                       DRV_STATE_START_DONE);
5033                         break;
5034
5035                 case RESET_KIND_SHUTDOWN:
5036                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
5037                                       DRV_STATE_UNLOAD_DONE);
5038                         break;
5039
5040                 default:
5041                         break;
5042                 };
5043         }
5044
5045         if (kind == RESET_KIND_SHUTDOWN)
5046                 tg3_ape_driver_state_change(tp, kind);
5047 }
5048
5049 /* tp->lock is held. */
5050 static void tg3_write_sig_legacy(struct tg3 *tp, int kind)
5051 {
5052         if (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) {
5053                 switch (kind) {
5054                 case RESET_KIND_INIT:
5055                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
5056                                       DRV_STATE_START);
5057                         break;
5058
5059                 case RESET_KIND_SHUTDOWN:
5060                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
5061                                       DRV_STATE_UNLOAD);
5062                         break;
5063
5064                 case RESET_KIND_SUSPEND:
5065                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
5066                                       DRV_STATE_SUSPEND);
5067                         break;
5068
5069                 default:
5070                         break;
5071                 };
5072         }
5073 }
5074
5075 static int tg3_poll_fw(struct tg3 *tp)
5076 {
5077         int i;
5078         u32 val;
5079
5080         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
5081                 /* Wait up to 20ms for init done. */
5082                 for (i = 0; i < 200; i++) {
5083                         if (tr32(VCPU_STATUS) & VCPU_STATUS_INIT_DONE)
5084                                 return 0;
5085                         udelay(100);
5086                 }
5087                 return -ENODEV;
5088         }
5089
5090         /* Wait for firmware initialization to complete. */
5091         for (i = 0; i < 100000; i++) {
5092                 tg3_read_mem(tp, NIC_SRAM_FIRMWARE_MBOX, &val);
5093                 if (val == ~NIC_SRAM_FIRMWARE_MBOX_MAGIC1)
5094                         break;
5095                 udelay(10);
5096         }
5097
5098         /* Chip might not be fitted with firmware.  Some Sun onboard
5099          * parts are configured like that.  So don't signal the timeout
5100          * of the above loop as an error, but do report the lack of
5101          * running firmware once.
5102          */
5103         if (i >= 100000 &&
5104             !(tp->tg3_flags2 & TG3_FLG2_NO_FWARE_REPORTED)) {
5105                 tp->tg3_flags2 |= TG3_FLG2_NO_FWARE_REPORTED;
5106
5107                 printk(KERN_INFO PFX "%s: No firmware running.\n",
5108                        tp->dev->name);
5109         }
5110
5111         return 0;
5112 }
5113
5114 /* Save PCI command register before chip reset */
5115 static void tg3_save_pci_state(struct tg3 *tp)
5116 {
5117         pci_read_config_word(tp->pdev, PCI_COMMAND, &tp->pci_cmd);
5118 }
5119
5120 /* Restore PCI state after chip reset */
5121 static void tg3_restore_pci_state(struct tg3 *tp)
5122 {
5123         u32 val;
5124
5125         /* Re-enable indirect register accesses. */
5126         pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
5127                                tp->misc_host_ctrl);
5128
5129         /* Set MAX PCI retry to zero. */
5130         val = (PCISTATE_ROM_ENABLE | PCISTATE_ROM_RETRY_ENABLE);
5131         if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0 &&
5132             (tp->tg3_flags & TG3_FLAG_PCIX_MODE))
5133                 val |= PCISTATE_RETRY_SAME_DMA;
5134         /* Allow reads and writes to the APE register and memory space. */
5135         if (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE)
5136                 val |= PCISTATE_ALLOW_APE_CTLSPC_WR |
5137                        PCISTATE_ALLOW_APE_SHMEM_WR;
5138         pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE, val);
5139
5140         pci_write_config_word(tp->pdev, PCI_COMMAND, tp->pci_cmd);
5141
5142         if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS)
5143                 pcie_set_readrq(tp->pdev, 4096);
5144         else {
5145                 pci_write_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE,
5146                                       tp->pci_cacheline_sz);
5147                 pci_write_config_byte(tp->pdev, PCI_LATENCY_TIMER,
5148                                       tp->pci_lat_timer);
5149         }
5150
5151         /* Make sure PCI-X relaxed ordering bit is clear. */
5152         if (tp->pcix_cap) {
5153                 u16 pcix_cmd;
5154
5155                 pci_read_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
5156                                      &pcix_cmd);
5157                 pcix_cmd &= ~PCI_X_CMD_ERO;
5158                 pci_write_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
5159                                       pcix_cmd);
5160         }
5161
5162         if (tp->tg3_flags2 & TG3_FLG2_5780_CLASS) {
5163
5164                 /* Chip reset on 5780 will reset MSI enable bit,
5165                  * so need to restore it.
5166                  */
5167                 if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
5168                         u16 ctrl;
5169
5170                         pci_read_config_word(tp->pdev,
5171                                              tp->msi_cap + PCI_MSI_FLAGS,
5172                                              &ctrl);
5173                         pci_write_config_word(tp->pdev,
5174                                               tp->msi_cap + PCI_MSI_FLAGS,
5175                                               ctrl | PCI_MSI_FLAGS_ENABLE);
5176                         val = tr32(MSGINT_MODE);
5177                         tw32(MSGINT_MODE, val | MSGINT_MODE_ENABLE);
5178                 }
5179         }
5180 }
5181
5182 static void tg3_stop_fw(struct tg3 *);
5183
5184 /* tp->lock is held. */
5185 static int tg3_chip_reset(struct tg3 *tp)
5186 {
5187         u32 val;
5188         void (*write_op)(struct tg3 *, u32, u32);
5189         int err;
5190
5191         tg3_nvram_lock(tp);
5192
5193         /* No matching tg3_nvram_unlock() after this because
5194          * chip reset below will undo the nvram lock.
5195          */
5196         tp->nvram_lock_cnt = 0;
5197
5198         /* GRC_MISC_CFG core clock reset will clear the memory
5199          * enable bit in PCI register 4 and the MSI enable bit
5200          * on some chips, so we save relevant registers here.
5201          */
5202         tg3_save_pci_state(tp);
5203
5204         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752 ||
5205             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
5206             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
5207             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
5208             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
5209                 tw32(GRC_FASTBOOT_PC, 0);
5210
5211         /*
5212          * We must avoid the readl() that normally takes place.
5213          * It locks machines, causes machine checks, and other
5214          * fun things.  So, temporarily disable the 5701
5215          * hardware workaround, while we do the reset.
5216          */
5217         write_op = tp->write32;
5218         if (write_op == tg3_write_flush_reg32)
5219                 tp->write32 = tg3_write32;
5220
5221         /* Prevent the irq handler from reading or writing PCI registers
5222          * during chip reset when the memory enable bit in the PCI command
5223          * register may be cleared.  The chip does not generate interrupt
5224          * at this time, but the irq handler may still be called due to irq
5225          * sharing or irqpoll.
5226          */
5227         tp->tg3_flags |= TG3_FLAG_CHIP_RESETTING;
5228         if (tp->hw_status) {
5229                 tp->hw_status->status = 0;
5230                 tp->hw_status->status_tag = 0;
5231         }
5232         tp->last_tag = 0;
5233         smp_mb();
5234         synchronize_irq(tp->pdev->irq);
5235
5236         /* do the reset */
5237         val = GRC_MISC_CFG_CORECLK_RESET;
5238
5239         if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) {
5240                 if (tr32(0x7e2c) == 0x60) {
5241                         tw32(0x7e2c, 0x20);
5242                 }
5243                 if (tp->pci_chip_rev_id != CHIPREV_ID_5750_A0) {
5244                         tw32(GRC_MISC_CFG, (1 << 29));
5245                         val |= (1 << 29);
5246                 }
5247         }
5248
5249         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
5250                 tw32(VCPU_STATUS, tr32(VCPU_STATUS) | VCPU_STATUS_DRV_RESET);
5251                 tw32(GRC_VCPU_EXT_CTRL,
5252                      tr32(GRC_VCPU_EXT_CTRL) & ~GRC_VCPU_EXT_CTRL_HALT_CPU);
5253         }
5254
5255         if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS)
5256                 val |= GRC_MISC_CFG_KEEP_GPHY_POWER;
5257         tw32(GRC_MISC_CFG, val);
5258
5259         /* restore 5701 hardware bug workaround write method */
5260         tp->write32 = write_op;
5261
5262         /* Unfortunately, we have to delay before the PCI read back.
5263          * Some 575X chips even will not respond to a PCI cfg access
5264          * when the reset command is given to the chip.
5265          *
5266          * How do these hardware designers expect things to work
5267          * properly if the PCI write is posted for a long period
5268          * of time?  It is always necessary to have some method by
5269          * which a register read back can occur to push the write
5270          * out which does the reset.
5271          *
5272          * For most tg3 variants the trick below was working.
5273          * Ho hum...
5274          */
5275         udelay(120);
5276
5277         /* Flush PCI posted writes.  The normal MMIO registers
5278          * are inaccessible at this time so this is the only
5279          * way to make this reliably (actually, this is no longer
5280          * the case, see above).  I tried to use indirect
5281          * register read/write but this upset some 5701 variants.
5282          */
5283         pci_read_config_dword(tp->pdev, PCI_COMMAND, &val);
5284
5285         udelay(120);
5286
5287         if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) {
5288                 if (tp->pci_chip_rev_id == CHIPREV_ID_5750_A0) {
5289                         int i;
5290                         u32 cfg_val;
5291
5292                         /* Wait for link training to complete.  */
5293                         for (i = 0; i < 5000; i++)
5294                                 udelay(100);
5295
5296                         pci_read_config_dword(tp->pdev, 0xc4, &cfg_val);
5297                         pci_write_config_dword(tp->pdev, 0xc4,
5298                                                cfg_val | (1 << 15));
5299                 }
5300                 /* Set PCIE max payload size and clear error status.  */
5301                 pci_write_config_dword(tp->pdev, 0xd8, 0xf5000);
5302         }
5303
5304         tg3_restore_pci_state(tp);
5305
5306         tp->tg3_flags &= ~TG3_FLAG_CHIP_RESETTING;
5307
5308         val = 0;
5309         if (tp->tg3_flags2 & TG3_FLG2_5780_CLASS)
5310                 val = tr32(MEMARB_MODE);
5311         tw32(MEMARB_MODE, val | MEMARB_MODE_ENABLE);
5312
5313         if (tp->pci_chip_rev_id == CHIPREV_ID_5750_A3) {
5314                 tg3_stop_fw(tp);
5315                 tw32(0x5000, 0x400);
5316         }
5317
5318         tw32(GRC_MODE, tp->grc_mode);
5319
5320         if (tp->pci_chip_rev_id == CHIPREV_ID_5705_A0) {
5321                 val = tr32(0xc4);
5322
5323                 tw32(0xc4, val | (1 << 15));
5324         }
5325
5326         if ((tp->nic_sram_data_cfg & NIC_SRAM_DATA_CFG_MINI_PCI) != 0 &&
5327             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
5328                 tp->pci_clock_ctrl |= CLOCK_CTRL_CLKRUN_OENABLE;
5329                 if (tp->pci_chip_rev_id == CHIPREV_ID_5705_A0)
5330                         tp->pci_clock_ctrl |= CLOCK_CTRL_FORCE_CLKRUN;
5331                 tw32(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl);
5332         }
5333
5334         if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) {
5335                 tp->mac_mode = MAC_MODE_PORT_MODE_TBI;
5336                 tw32_f(MAC_MODE, tp->mac_mode);
5337         } else if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES) {
5338                 tp->mac_mode = MAC_MODE_PORT_MODE_GMII;
5339                 tw32_f(MAC_MODE, tp->mac_mode);
5340         } else
5341                 tw32_f(MAC_MODE, 0);
5342         udelay(40);
5343
5344         err = tg3_poll_fw(tp);
5345         if (err)
5346                 return err;
5347
5348         if ((tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) &&
5349             tp->pci_chip_rev_id != CHIPREV_ID_5750_A0) {
5350                 val = tr32(0x7c00);
5351
5352                 tw32(0x7c00, val | (1 << 25));
5353         }
5354
5355         /* Reprobe ASF enable state.  */
5356         tp->tg3_flags &= ~TG3_FLAG_ENABLE_ASF;
5357         tp->tg3_flags2 &= ~TG3_FLG2_ASF_NEW_HANDSHAKE;
5358         tg3_read_mem(tp, NIC_SRAM_DATA_SIG, &val);
5359         if (val == NIC_SRAM_DATA_SIG_MAGIC) {
5360                 u32 nic_cfg;
5361
5362                 tg3_read_mem(tp, NIC_SRAM_DATA_CFG, &nic_cfg);
5363                 if (nic_cfg & NIC_SRAM_DATA_CFG_ASF_ENABLE) {
5364                         tp->tg3_flags |= TG3_FLAG_ENABLE_ASF;
5365                         if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS)
5366                                 tp->tg3_flags2 |= TG3_FLG2_ASF_NEW_HANDSHAKE;
5367                 }
5368         }
5369
5370         return 0;
5371 }
5372
5373 /* tp->lock is held. */
5374 static void tg3_stop_fw(struct tg3 *tp)
5375 {
5376         if ((tp->tg3_flags & TG3_FLAG_ENABLE_ASF) &&
5377            !(tp->tg3_flags3 & TG3_FLG3_ENABLE_APE)) {
5378                 u32 val;
5379                 int i;
5380
5381                 tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX, FWCMD_NICDRV_PAUSE_FW);
5382                 val = tr32(GRC_RX_CPU_EVENT);
5383                 val |= (1 << 14);
5384                 tw32(GRC_RX_CPU_EVENT, val);
5385
5386                 /* Wait for RX cpu to ACK the event.  */
5387                 for (i = 0; i < 100; i++) {
5388                         if (!(tr32(GRC_RX_CPU_EVENT) & (1 << 14)))
5389                                 break;
5390                         udelay(1);
5391                 }
5392         }
5393 }
5394
5395 /* tp->lock is held. */
5396 static int tg3_halt(struct tg3 *tp, int kind, int silent)
5397 {
5398         int err;
5399
5400         tg3_stop_fw(tp);
5401
5402         tg3_write_sig_pre_reset(tp, kind);
5403
5404         tg3_abort_hw(tp, silent);
5405         err = tg3_chip_reset(tp);
5406
5407         tg3_write_sig_legacy(tp, kind);
5408         tg3_write_sig_post_reset(tp, kind);
5409
5410         if (err)
5411                 return err;
5412
5413         return 0;
5414 }
5415
5416 #define TG3_FW_RELEASE_MAJOR    0x0
5417 #define TG3_FW_RELASE_MINOR     0x0
5418 #define TG3_FW_RELEASE_FIX      0x0
5419 #define TG3_FW_START_ADDR       0x08000000
5420 #define TG3_FW_TEXT_ADDR        0x08000000
5421 #define TG3_FW_TEXT_LEN         0x9c0
5422 #define TG3_FW_RODATA_ADDR      0x080009c0
5423 #define TG3_FW_RODATA_LEN       0x60
5424 #define TG3_FW_DATA_ADDR        0x08000a40
5425 #define TG3_FW_DATA_LEN         0x20
5426 #define TG3_FW_SBSS_ADDR        0x08000a60
5427 #define TG3_FW_SBSS_LEN         0xc
5428 #define TG3_FW_BSS_ADDR         0x08000a70
5429 #define TG3_FW_BSS_LEN          0x10
5430
5431 static const u32 tg3FwText[(TG3_FW_TEXT_LEN / sizeof(u32)) + 1] = {
5432         0x00000000, 0x10000003, 0x00000000, 0x0000000d, 0x0000000d, 0x3c1d0800,
5433         0x37bd3ffc, 0x03a0f021, 0x3c100800, 0x26100000, 0x0e000018, 0x00000000,
5434         0x0000000d, 0x3c1d0800, 0x37bd3ffc, 0x03a0f021, 0x3c100800, 0x26100034,
5435         0x0e00021c, 0x00000000, 0x0000000d, 0x00000000, 0x00000000, 0x00000000,
5436         0x27bdffe0, 0x3c1cc000, 0xafbf0018, 0xaf80680c, 0x0e00004c, 0x241b2105,
5437         0x97850000, 0x97870002, 0x9782002c, 0x9783002e, 0x3c040800, 0x248409c0,
5438         0xafa00014, 0x00021400, 0x00621825, 0x00052c00, 0xafa30010, 0x8f860010,
5439         0x00e52825, 0x0e000060, 0x24070102, 0x3c02ac00, 0x34420100, 0x3c03ac01,
5440         0x34630100, 0xaf820490, 0x3c02ffff, 0xaf820494, 0xaf830498, 0xaf82049c,
5441         0x24020001, 0xaf825ce0, 0x0e00003f, 0xaf825d00, 0x0e000140, 0x00000000,
5442         0x8fbf0018, 0x03e00008, 0x27bd0020, 0x2402ffff, 0xaf825404, 0x8f835400,
5443         0x34630400, 0xaf835400, 0xaf825404, 0x3c020800, 0x24420034, 0xaf82541c,
5444         0x03e00008, 0xaf805400, 0x00000000, 0x00000000, 0x3c020800, 0x34423000,
5445         0x3c030800, 0x34633000, 0x3c040800, 0x348437ff, 0x3c010800, 0xac220a64,
5446         0x24020040, 0x3c010800, 0xac220a68, 0x3c010800, 0xac200a60, 0xac600000,
5447         0x24630004, 0x0083102b, 0x5040fffd, 0xac600000, 0x03e00008, 0x00000000,
5448         0x00804821, 0x8faa0010, 0x3c020800, 0x8c420a60, 0x3c040800, 0x8c840a68,
5449         0x8fab0014, 0x24430001, 0x0044102b, 0x3c010800, 0xac230a60, 0x14400003,
5450         0x00004021, 0x3c010800, 0xac200a60, 0x3c020800, 0x8c420a60, 0x3c030800,
5451         0x8c630a64, 0x91240000, 0x00021140, 0x00431021, 0x00481021, 0x25080001,
5452         0xa0440000, 0x29020008, 0x1440fff4, 0x25290001, 0x3c020800, 0x8c420a60,
5453         0x3c030800, 0x8c630a64, 0x8f84680c, 0x00021140, 0x00431021, 0xac440008,
5454         0xac45000c, 0xac460010, 0xac470014, 0xac4a0018, 0x03e00008, 0xac4b001c,
5455         0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
5456         0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
5457         0, 0, 0, 0, 0, 0,
5458         0x02000008, 0x00000000, 0x0a0001e3, 0x3c0a0001, 0x0a0001e3, 0x3c0a0002,
5459         0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000,
5460         0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000,
5461         0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000,
5462         0x0a0001e3, 0x3c0a0007, 0x0a0001e3, 0x3c0a0008, 0x0a0001e3, 0x3c0a0009,
5463         0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x3c0a000b,
5464         0x0a0001e3, 0x3c0a000c, 0x0a0001e3, 0x3c0a000d, 0x0a0001e3, 0x00000000,
5465         0x0a0001e3, 0x00000000, 0x0a0001e3, 0x3c0a000e, 0x0a0001e3, 0x00000000,
5466         0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000,
5467         0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000,
5468         0x0a0001e3, 0x00000000, 0x0a0001e3, 0x3c0a0013, 0x0a0001e3, 0x3c0a0014,
5469         0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
5470         0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
5471         0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
5472         0x27bdffe0, 0x00001821, 0x00001021, 0xafbf0018, 0xafb10014, 0xafb00010,
5473         0x3c010800, 0x00220821, 0xac200a70, 0x3c010800, 0x00220821, 0xac200a74,
5474         0x3c010800, 0x00220821, 0xac200a78, 0x24630001, 0x1860fff5, 0x2442000c,
5475         0x24110001, 0x8f906810, 0x32020004, 0x14400005, 0x24040001, 0x3c020800,
5476         0x8c420a78, 0x18400003, 0x00002021, 0x0e000182, 0x00000000, 0x32020001,
5477         0x10400003, 0x00000000, 0x0e000169, 0x00000000, 0x0a000153, 0xaf915028,
5478         0x8fbf0018, 0x8fb10014, 0x8fb00010, 0x03e00008, 0x27bd0020, 0x3c050800,
5479         0x8ca50a70, 0x3c060800, 0x8cc60a80, 0x3c070800, 0x8ce70a78, 0x27bdffe0,
5480         0x3c040800, 0x248409d0, 0xafbf0018, 0xafa00010, 0x0e000060, 0xafa00014,
5481         0x0e00017b, 0x00002021, 0x8fbf0018, 0x03e00008, 0x27bd0020, 0x24020001,
5482         0x8f836810, 0x00821004, 0x00021027, 0x00621824, 0x03e00008, 0xaf836810,
5483         0x27bdffd8, 0xafbf0024, 0x1080002e, 0xafb00020, 0x8f825cec, 0xafa20018,
5484         0x8f825cec, 0x3c100800, 0x26100a78, 0xafa2001c, 0x34028000, 0xaf825cec,
5485         0x8e020000, 0x18400016, 0x00000000, 0x3c020800, 0x94420a74, 0x8fa3001c,
5486         0x000221c0, 0xac830004, 0x8fa2001c, 0x3c010800, 0x0e000201, 0xac220a74,
5487         0x10400005, 0x00000000, 0x8e020000, 0x24420001, 0x0a0001df, 0xae020000,
5488         0x3c020800, 0x8c420a70, 0x00021c02, 0x000321c0, 0x0a0001c5, 0xafa2001c,
5489         0x0e000201, 0x00000000, 0x1040001f, 0x00000000, 0x8e020000, 0x8fa3001c,
5490         0x24420001, 0x3c010800, 0xac230a70, 0x3c010800, 0xac230a74, 0x0a0001df,
5491         0xae020000, 0x3c100800, 0x26100a78, 0x8e020000, 0x18400028, 0x00000000,
5492         0x0e000201, 0x00000000, 0x14400024, 0x00000000, 0x8e020000, 0x3c030800,
5493         0x8c630a70, 0x2442ffff, 0xafa3001c, 0x18400006, 0xae020000, 0x00031402,
5494         0x000221c0, 0x8c820004, 0x3c010800, 0xac220a70, 0x97a2001e, 0x2442ff00,
5495         0x2c420300, 0x1440000b, 0x24024000, 0x3c040800, 0x248409dc, 0xafa00010,
5496         0xafa00014, 0x8fa6001c, 0x24050008, 0x0e000060, 0x00003821, 0x0a0001df,
5497         0x00000000, 0xaf825cf8, 0x3c020800, 0x8c420a40, 0x8fa3001c, 0x24420001,
5498         0xaf835cf8, 0x3c010800, 0xac220a40, 0x8fbf0024, 0x8fb00020, 0x03e00008,
5499         0x27bd0028, 0x27bdffe0, 0x3c040800, 0x248409e8, 0x00002821, 0x00003021,
5500         0x00003821, 0xafbf0018, 0xafa00010, 0x0e000060, 0xafa00014, 0x8fbf0018,
5501         0x03e00008, 0x27bd0020, 0x8f82680c, 0x8f85680c, 0x00021827, 0x0003182b,
5502         0x00031823, 0x00431024, 0x00441021, 0x00a2282b, 0x10a00006, 0x00000000,
5503         0x00401821, 0x8f82680c, 0x0043102b, 0x1440fffd, 0x00000000, 0x03e00008,
5504         0x00000000, 0x3c040800, 0x8c840000, 0x3c030800, 0x8c630a40, 0x0064102b,
5505         0x54400002, 0x00831023, 0x00641023, 0x2c420008, 0x03e00008, 0x38420001,
5506         0x27bdffe0, 0x00802821, 0x3c040800, 0x24840a00, 0x00003021, 0x00003821,
5507         0xafbf0018, 0xafa00010, 0x0e000060, 0xafa00014, 0x0a000216, 0x00000000,
5508         0x8fbf0018, 0x03e00008, 0x27bd0020, 0x00000000, 0x27bdffe0, 0x3c1cc000,
5509         0xafbf0018, 0x0e00004c, 0xaf80680c, 0x3c040800, 0x24840a10, 0x03802821,
5510         0x00003021, 0x00003821, 0xafa00010, 0x0e000060, 0xafa00014, 0x2402ffff,
5511         0xaf825404, 0x3c0200aa, 0x0e000234, 0xaf825434, 0x8fbf0018, 0x03e00008,
5512         0x27bd0020, 0x00000000, 0x00000000, 0x00000000, 0x27bdffe8, 0xafb00010,
5513         0x24100001, 0xafbf0014, 0x3c01c003, 0xac200000, 0x8f826810, 0x30422000,
5514         0x10400003, 0x00000000, 0x0e000246, 0x00000000, 0x0a00023a, 0xaf905428,
5515         0x8fbf0014, 0x8fb00010, 0x03e00008, 0x27bd0018, 0x27bdfff8, 0x8f845d0c,
5516         0x3c0200ff, 0x3c030800, 0x8c630a50, 0x3442fff8, 0x00821024, 0x1043001e,
5517         0x3c0500ff, 0x34a5fff8, 0x3c06c003, 0x3c074000, 0x00851824, 0x8c620010,
5518         0x3c010800, 0xac230a50, 0x30420008, 0x10400005, 0x00871025, 0x8cc20000,
5519         0x24420001, 0xacc20000, 0x00871025, 0xaf825d0c, 0x8fa20000, 0x24420001,
5520         0xafa20000, 0x8fa20000, 0x8fa20000, 0x24420001, 0xafa20000, 0x8fa20000,
5521         0x8f845d0c, 0x3c030800, 0x8c630a50, 0x00851024, 0x1443ffe8, 0x00851824,
5522         0x27bd0008, 0x03e00008, 0x00000000, 0x00000000, 0x00000000
5523 };
5524
5525 static const u32 tg3FwRodata[(TG3_FW_RODATA_LEN / sizeof(u32)) + 1] = {
5526         0x35373031, 0x726c7341, 0x00000000, 0x00000000, 0x53774576, 0x656e7430,
5527         0x00000000, 0x726c7045, 0x76656e74, 0x31000000, 0x556e6b6e, 0x45766e74,
5528         0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x66617461, 0x6c457272,
5529         0x00000000, 0x00000000, 0x4d61696e, 0x43707542, 0x00000000, 0x00000000,
5530         0x00000000
5531 };
5532
5533 #if 0 /* All zeros, don't eat up space with it. */
5534 u32 tg3FwData[(TG3_FW_DATA_LEN / sizeof(u32)) + 1] = {
5535         0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
5536         0x00000000, 0x00000000, 0x00000000, 0x00000000
5537 };
5538 #endif
5539
5540 #define RX_CPU_SCRATCH_BASE     0x30000
5541 #define RX_CPU_SCRATCH_SIZE     0x04000
5542 #define TX_CPU_SCRATCH_BASE     0x34000
5543 #define TX_CPU_SCRATCH_SIZE     0x04000
5544
5545 /* tp->lock is held. */
5546 static int tg3_halt_cpu(struct tg3 *tp, u32 offset)
5547 {
5548         int i;
5549
5550         BUG_ON(offset == TX_CPU_BASE &&
5551             (tp->tg3_flags2 & TG3_FLG2_5705_PLUS));
5552
5553         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
5554                 u32 val = tr32(GRC_VCPU_EXT_CTRL);
5555
5556                 tw32(GRC_VCPU_EXT_CTRL, val | GRC_VCPU_EXT_CTRL_HALT_CPU);
5557                 return 0;
5558         }
5559         if (offset == RX_CPU_BASE) {
5560                 for (i = 0; i < 10000; i++) {
5561                         tw32(offset + CPU_STATE, 0xffffffff);
5562                         tw32(offset + CPU_MODE,  CPU_MODE_HALT);
5563                         if (tr32(offset + CPU_MODE) & CPU_MODE_HALT)
5564                                 break;
5565                 }
5566
5567                 tw32(offset + CPU_STATE, 0xffffffff);
5568                 tw32_f(offset + CPU_MODE,  CPU_MODE_HALT);
5569                 udelay(10);
5570         } else {
5571                 for (i = 0; i < 10000; i++) {
5572                         tw32(offset + CPU_STATE, 0xffffffff);
5573                         tw32(offset + CPU_MODE,  CPU_MODE_HALT);
5574                         if (tr32(offset + CPU_MODE) & CPU_MODE_HALT)
5575                                 break;
5576                 }
5577         }
5578
5579         if (i >= 10000) {
5580                 printk(KERN_ERR PFX "tg3_reset_cpu timed out for %s, "
5581                        "and %s CPU\n",
5582                        tp->dev->name,
5583                        (offset == RX_CPU_BASE ? "RX" : "TX"));
5584                 return -ENODEV;
5585         }
5586
5587         /* Clear firmware's nvram arbitration. */
5588         if (tp->tg3_flags & TG3_FLAG_NVRAM)
5589                 tw32(NVRAM_SWARB, SWARB_REQ_CLR0);
5590         return 0;
5591 }
5592
5593 struct fw_info {
5594         unsigned int text_base;
5595         unsigned int text_len;
5596         const u32 *text_data;
5597         unsigned int rodata_base;
5598         unsigned int rodata_len;
5599         const u32 *rodata_data;
5600         unsigned int data_base;
5601         unsigned int data_len;
5602         const u32 *data_data;
5603 };
5604
5605 /* tp->lock is held. */
5606 static int tg3_load_firmware_cpu(struct tg3 *tp, u32 cpu_base, u32 cpu_scratch_base,
5607                                  int cpu_scratch_size, struct fw_info *info)
5608 {
5609         int err, lock_err, i;
5610         void (*write_op)(struct tg3 *, u32, u32);
5611
5612         if (cpu_base == TX_CPU_BASE &&
5613             (tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
5614                 printk(KERN_ERR PFX "tg3_load_firmware_cpu: Trying to load "
5615                        "TX cpu firmware on %s which is 5705.\n",
5616                        tp->dev->name);
5617                 return -EINVAL;
5618         }
5619
5620         if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS)
5621                 write_op = tg3_write_mem;
5622         else
5623                 write_op = tg3_write_indirect_reg32;
5624
5625         /* It is possible that bootcode is still loading at this point.
5626          * Get the nvram lock first before halting the cpu.
5627          */
5628         lock_err = tg3_nvram_lock(tp);
5629         err = tg3_halt_cpu(tp, cpu_base);
5630         if (!lock_err)
5631                 tg3_nvram_unlock(tp);
5632         if (err)
5633                 goto out;
5634
5635         for (i = 0; i < cpu_scratch_size; i += sizeof(u32))
5636                 write_op(tp, cpu_scratch_base + i, 0);
5637         tw32(cpu_base + CPU_STATE, 0xffffffff);
5638         tw32(cpu_base + CPU_MODE, tr32(cpu_base+CPU_MODE)|CPU_MODE_HALT);
5639         for (i = 0; i < (info->text_len / sizeof(u32)); i++)
5640                 write_op(tp, (cpu_scratch_base +
5641                               (info->text_base & 0xffff) +
5642                               (i * sizeof(u32))),
5643                          (info->text_data ?
5644                           info->text_data[i] : 0));
5645         for (i = 0; i < (info->rodata_len / sizeof(u32)); i++)
5646                 write_op(tp, (cpu_scratch_base +
5647                               (info->rodata_base & 0xffff) +
5648                               (i * sizeof(u32))),
5649                          (info->rodata_data ?
5650                           info->rodata_data[i] : 0));
5651         for (i = 0; i < (info->data_len / sizeof(u32)); i++)
5652                 write_op(tp, (cpu_scratch_base +
5653                               (info->data_base & 0xffff) +
5654                               (i * sizeof(u32))),
5655                          (info->data_data ?
5656                           info->data_data[i] : 0));
5657
5658         err = 0;
5659
5660 out:
5661         return err;
5662 }
5663
5664 /* tp->lock is held. */
5665 static int tg3_load_5701_a0_firmware_fix(struct tg3 *tp)
5666 {
5667         struct fw_info info;
5668         int err, i;
5669
5670         info.text_base = TG3_FW_TEXT_ADDR;
5671         info.text_len = TG3_FW_TEXT_LEN;
5672         info.text_data = &tg3FwText[0];
5673         info.rodata_base = TG3_FW_RODATA_ADDR;
5674         info.rodata_len = TG3_FW_RODATA_LEN;
5675         info.rodata_data = &tg3FwRodata[0];
5676         info.data_base = TG3_FW_DATA_ADDR;
5677         info.data_len = TG3_FW_DATA_LEN;
5678         info.data_data = NULL;
5679
5680         err = tg3_load_firmware_cpu(tp, RX_CPU_BASE,
5681                                     RX_CPU_SCRATCH_BASE, RX_CPU_SCRATCH_SIZE,
5682                                     &info);
5683         if (err)
5684                 return err;
5685
5686         err = tg3_load_firmware_cpu(tp, TX_CPU_BASE,
5687                                     TX_CPU_SCRATCH_BASE, TX_CPU_SCRATCH_SIZE,
5688                                     &info);
5689         if (err)
5690                 return err;
5691
5692         /* Now startup only the RX cpu. */
5693         tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
5694         tw32_f(RX_CPU_BASE + CPU_PC,    TG3_FW_TEXT_ADDR);
5695
5696         for (i = 0; i < 5; i++) {
5697                 if (tr32(RX_CPU_BASE + CPU_PC) == TG3_FW_TEXT_ADDR)
5698                         break;
5699                 tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
5700                 tw32(RX_CPU_BASE + CPU_MODE,  CPU_MODE_HALT);
5701                 tw32_f(RX_CPU_BASE + CPU_PC,    TG3_FW_TEXT_ADDR);
5702                 udelay(1000);
5703         }
5704         if (i >= 5) {
5705                 printk(KERN_ERR PFX "tg3_load_firmware fails for %s "
5706                        "to set RX CPU PC, is %08x should be %08x\n",
5707                        tp->dev->name, tr32(RX_CPU_BASE + CPU_PC),
5708                        TG3_FW_TEXT_ADDR);
5709                 return -ENODEV;
5710         }
5711         tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
5712         tw32_f(RX_CPU_BASE + CPU_MODE,  0x00000000);
5713
5714         return 0;
5715 }
5716
5717
5718 #define TG3_TSO_FW_RELEASE_MAJOR        0x1
5719 #define TG3_TSO_FW_RELASE_MINOR         0x6
5720 #define TG3_TSO_FW_RELEASE_FIX          0x0
5721 #define TG3_TSO_FW_START_ADDR           0x08000000
5722 #define TG3_TSO_FW_TEXT_ADDR            0x08000000
5723 #define TG3_TSO_FW_TEXT_LEN             0x1aa0
5724 #define TG3_TSO_FW_RODATA_ADDR          0x08001aa0
5725 #define TG3_TSO_FW_RODATA_LEN           0x60
5726 #define TG3_TSO_FW_DATA_ADDR            0x08001b20
5727 #define TG3_TSO_FW_DATA