[TG3]: Add 5761 APE support
[linux-2.6.git] / drivers / net / tg3.c
1 /*
2  * tg3.c: Broadcom Tigon3 ethernet driver.
3  *
4  * Copyright (C) 2001, 2002, 2003, 2004 David S. Miller (davem@redhat.com)
5  * Copyright (C) 2001, 2002, 2003 Jeff Garzik (jgarzik@pobox.com)
6  * Copyright (C) 2004 Sun Microsystems Inc.
7  * Copyright (C) 2005-2007 Broadcom Corporation.
8  *
9  * Firmware is:
10  *      Derived from proprietary unpublished source code,
11  *      Copyright (C) 2000-2003 Broadcom Corporation.
12  *
13  *      Permission is hereby granted for the distribution of this firmware
14  *      data in hexadecimal or equivalent format, provided this copyright
15  *      notice is accompanying it.
16  */
17
18
19 #include <linux/module.h>
20 #include <linux/moduleparam.h>
21 #include <linux/kernel.h>
22 #include <linux/types.h>
23 #include <linux/compiler.h>
24 #include <linux/slab.h>
25 #include <linux/delay.h>
26 #include <linux/in.h>
27 #include <linux/init.h>
28 #include <linux/ioport.h>
29 #include <linux/pci.h>
30 #include <linux/netdevice.h>
31 #include <linux/etherdevice.h>
32 #include <linux/skbuff.h>
33 #include <linux/ethtool.h>
34 #include <linux/mii.h>
35 #include <linux/if_vlan.h>
36 #include <linux/ip.h>
37 #include <linux/tcp.h>
38 #include <linux/workqueue.h>
39 #include <linux/prefetch.h>
40 #include <linux/dma-mapping.h>
41
42 #include <net/checksum.h>
43 #include <net/ip.h>
44
45 #include <asm/system.h>
46 #include <asm/io.h>
47 #include <asm/byteorder.h>
48 #include <asm/uaccess.h>
49
50 #ifdef CONFIG_SPARC
51 #include <asm/idprom.h>
52 #include <asm/prom.h>
53 #endif
54
55 #if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE)
56 #define TG3_VLAN_TAG_USED 1
57 #else
58 #define TG3_VLAN_TAG_USED 0
59 #endif
60
61 #define TG3_TSO_SUPPORT 1
62
63 #include "tg3.h"
64
65 #define DRV_MODULE_NAME         "tg3"
66 #define PFX DRV_MODULE_NAME     ": "
67 #define DRV_MODULE_VERSION      "3.82"
68 #define DRV_MODULE_RELDATE      "October 5, 2007"
69
70 #define TG3_DEF_MAC_MODE        0
71 #define TG3_DEF_RX_MODE         0
72 #define TG3_DEF_TX_MODE         0
73 #define TG3_DEF_MSG_ENABLE        \
74         (NETIF_MSG_DRV          | \
75          NETIF_MSG_PROBE        | \
76          NETIF_MSG_LINK         | \
77          NETIF_MSG_TIMER        | \
78          NETIF_MSG_IFDOWN       | \
79          NETIF_MSG_IFUP         | \
80          NETIF_MSG_RX_ERR       | \
81          NETIF_MSG_TX_ERR)
82
83 /* length of time before we decide the hardware is borked,
84  * and dev->tx_timeout() should be called to fix the problem
85  */
86 #define TG3_TX_TIMEOUT                  (5 * HZ)
87
88 /* hardware minimum and maximum for a single frame's data payload */
89 #define TG3_MIN_MTU                     60
90 #define TG3_MAX_MTU(tp) \
91         ((tp->tg3_flags2 & TG3_FLG2_JUMBO_CAPABLE) ? 9000 : 1500)
92
93 /* These numbers seem to be hard coded in the NIC firmware somehow.
94  * You can't change the ring sizes, but you can change where you place
95  * them in the NIC onboard memory.
96  */
97 #define TG3_RX_RING_SIZE                512
98 #define TG3_DEF_RX_RING_PENDING         200
99 #define TG3_RX_JUMBO_RING_SIZE          256
100 #define TG3_DEF_RX_JUMBO_RING_PENDING   100
101
102 /* Do not place this n-ring entries value into the tp struct itself,
103  * we really want to expose these constants to GCC so that modulo et
104  * al.  operations are done with shifts and masks instead of with
105  * hw multiply/modulo instructions.  Another solution would be to
106  * replace things like '% foo' with '& (foo - 1)'.
107  */
108 #define TG3_RX_RCB_RING_SIZE(tp)        \
109         ((tp->tg3_flags2 & TG3_FLG2_5705_PLUS) ?  512 : 1024)
110
111 #define TG3_TX_RING_SIZE                512
112 #define TG3_DEF_TX_RING_PENDING         (TG3_TX_RING_SIZE - 1)
113
114 #define TG3_RX_RING_BYTES       (sizeof(struct tg3_rx_buffer_desc) * \
115                                  TG3_RX_RING_SIZE)
116 #define TG3_RX_JUMBO_RING_BYTES (sizeof(struct tg3_rx_buffer_desc) * \
117                                  TG3_RX_JUMBO_RING_SIZE)
118 #define TG3_RX_RCB_RING_BYTES(tp) (sizeof(struct tg3_rx_buffer_desc) * \
119                                    TG3_RX_RCB_RING_SIZE(tp))
120 #define TG3_TX_RING_BYTES       (sizeof(struct tg3_tx_buffer_desc) * \
121                                  TG3_TX_RING_SIZE)
122 #define NEXT_TX(N)              (((N) + 1) & (TG3_TX_RING_SIZE - 1))
123
124 #define RX_PKT_BUF_SZ           (1536 + tp->rx_offset + 64)
125 #define RX_JUMBO_PKT_BUF_SZ     (9046 + tp->rx_offset + 64)
126
127 /* minimum number of free TX descriptors required to wake up TX process */
128 #define TG3_TX_WAKEUP_THRESH(tp)                ((tp)->tx_pending / 4)
129
130 /* number of ETHTOOL_GSTATS u64's */
131 #define TG3_NUM_STATS           (sizeof(struct tg3_ethtool_stats)/sizeof(u64))
132
133 #define TG3_NUM_TEST            6
134
135 static char version[] __devinitdata =
136         DRV_MODULE_NAME ".c:v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
137
138 MODULE_AUTHOR("David S. Miller (davem@redhat.com) and Jeff Garzik (jgarzik@pobox.com)");
139 MODULE_DESCRIPTION("Broadcom Tigon3 ethernet driver");
140 MODULE_LICENSE("GPL");
141 MODULE_VERSION(DRV_MODULE_VERSION);
142
143 static int tg3_debug = -1;      /* -1 == use TG3_DEF_MSG_ENABLE as value */
144 module_param(tg3_debug, int, 0);
145 MODULE_PARM_DESC(tg3_debug, "Tigon3 bitmapped debugging message enable value");
146
147 static struct pci_device_id tg3_pci_tbl[] = {
148         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5700)},
149         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5701)},
150         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702)},
151         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703)},
152         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704)},
153         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702FE)},
154         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705)},
155         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705_2)},
156         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705M)},
157         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705M_2)},
158         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702X)},
159         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703X)},
160         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704S)},
161         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702A3)},
162         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703A3)},
163         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5782)},
164         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5788)},
165         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5789)},
166         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5901)},
167         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5901_2)},
168         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704S_2)},
169         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705F)},
170         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5720)},
171         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5721)},
172         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5722)},
173         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5750)},
174         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751)},
175         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5750M)},
176         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751M)},
177         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751F)},
178         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5752)},
179         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5752M)},
180         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753)},
181         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753M)},
182         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753F)},
183         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5754)},
184         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5754M)},
185         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5755)},
186         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5755M)},
187         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5756)},
188         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5786)},
189         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787)},
190         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787M)},
191         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787F)},
192         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5714)},
193         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5714S)},
194         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5715)},
195         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5715S)},
196         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5780)},
197         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5780S)},
198         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5781)},
199         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5906)},
200         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5906M)},
201         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5784)},
202         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5764)},
203         {PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9DXX)},
204         {PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9MXX)},
205         {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1000)},
206         {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1001)},
207         {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1003)},
208         {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC9100)},
209         {PCI_DEVICE(PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_TIGON3)},
210         {}
211 };
212
213 MODULE_DEVICE_TABLE(pci, tg3_pci_tbl);
214
215 static const struct {
216         const char string[ETH_GSTRING_LEN];
217 } ethtool_stats_keys[TG3_NUM_STATS] = {
218         { "rx_octets" },
219         { "rx_fragments" },
220         { "rx_ucast_packets" },
221         { "rx_mcast_packets" },
222         { "rx_bcast_packets" },
223         { "rx_fcs_errors" },
224         { "rx_align_errors" },
225         { "rx_xon_pause_rcvd" },
226         { "rx_xoff_pause_rcvd" },
227         { "rx_mac_ctrl_rcvd" },
228         { "rx_xoff_entered" },
229         { "rx_frame_too_long_errors" },
230         { "rx_jabbers" },
231         { "rx_undersize_packets" },
232         { "rx_in_length_errors" },
233         { "rx_out_length_errors" },
234         { "rx_64_or_less_octet_packets" },
235         { "rx_65_to_127_octet_packets" },
236         { "rx_128_to_255_octet_packets" },
237         { "rx_256_to_511_octet_packets" },
238         { "rx_512_to_1023_octet_packets" },
239         { "rx_1024_to_1522_octet_packets" },
240         { "rx_1523_to_2047_octet_packets" },
241         { "rx_2048_to_4095_octet_packets" },
242         { "rx_4096_to_8191_octet_packets" },
243         { "rx_8192_to_9022_octet_packets" },
244
245         { "tx_octets" },
246         { "tx_collisions" },
247
248         { "tx_xon_sent" },
249         { "tx_xoff_sent" },
250         { "tx_flow_control" },
251         { "tx_mac_errors" },
252         { "tx_single_collisions" },
253         { "tx_mult_collisions" },
254         { "tx_deferred" },
255         { "tx_excessive_collisions" },
256         { "tx_late_collisions" },
257         { "tx_collide_2times" },
258         { "tx_collide_3times" },
259         { "tx_collide_4times" },
260         { "tx_collide_5times" },
261         { "tx_collide_6times" },
262         { "tx_collide_7times" },
263         { "tx_collide_8times" },
264         { "tx_collide_9times" },
265         { "tx_collide_10times" },
266         { "tx_collide_11times" },
267         { "tx_collide_12times" },
268         { "tx_collide_13times" },
269         { "tx_collide_14times" },
270         { "tx_collide_15times" },
271         { "tx_ucast_packets" },
272         { "tx_mcast_packets" },
273         { "tx_bcast_packets" },
274         { "tx_carrier_sense_errors" },
275         { "tx_discards" },
276         { "tx_errors" },
277
278         { "dma_writeq_full" },
279         { "dma_write_prioq_full" },
280         { "rxbds_empty" },
281         { "rx_discards" },
282         { "rx_errors" },
283         { "rx_threshold_hit" },
284
285         { "dma_readq_full" },
286         { "dma_read_prioq_full" },
287         { "tx_comp_queue_full" },
288
289         { "ring_set_send_prod_index" },
290         { "ring_status_update" },
291         { "nic_irqs" },
292         { "nic_avoided_irqs" },
293         { "nic_tx_threshold_hit" }
294 };
295
296 static const struct {
297         const char string[ETH_GSTRING_LEN];
298 } ethtool_test_keys[TG3_NUM_TEST] = {
299         { "nvram test     (online) " },
300         { "link test      (online) " },
301         { "register test  (offline)" },
302         { "memory test    (offline)" },
303         { "loopback test  (offline)" },
304         { "interrupt test (offline)" },
305 };
306
307 static void tg3_write32(struct tg3 *tp, u32 off, u32 val)
308 {
309         writel(val, tp->regs + off);
310 }
311
312 static u32 tg3_read32(struct tg3 *tp, u32 off)
313 {
314         return (readl(tp->regs + off));
315 }
316
317 static void tg3_ape_write32(struct tg3 *tp, u32 off, u32 val)
318 {
319         writel(val, tp->aperegs + off);
320 }
321
322 static u32 tg3_ape_read32(struct tg3 *tp, u32 off)
323 {
324         return (readl(tp->aperegs + off));
325 }
326
327 static void tg3_write_indirect_reg32(struct tg3 *tp, u32 off, u32 val)
328 {
329         unsigned long flags;
330
331         spin_lock_irqsave(&tp->indirect_lock, flags);
332         pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off);
333         pci_write_config_dword(tp->pdev, TG3PCI_REG_DATA, val);
334         spin_unlock_irqrestore(&tp->indirect_lock, flags);
335 }
336
337 static void tg3_write_flush_reg32(struct tg3 *tp, u32 off, u32 val)
338 {
339         writel(val, tp->regs + off);
340         readl(tp->regs + off);
341 }
342
343 static u32 tg3_read_indirect_reg32(struct tg3 *tp, u32 off)
344 {
345         unsigned long flags;
346         u32 val;
347
348         spin_lock_irqsave(&tp->indirect_lock, flags);
349         pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off);
350         pci_read_config_dword(tp->pdev, TG3PCI_REG_DATA, &val);
351         spin_unlock_irqrestore(&tp->indirect_lock, flags);
352         return val;
353 }
354
355 static void tg3_write_indirect_mbox(struct tg3 *tp, u32 off, u32 val)
356 {
357         unsigned long flags;
358
359         if (off == (MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW)) {
360                 pci_write_config_dword(tp->pdev, TG3PCI_RCV_RET_RING_CON_IDX +
361                                        TG3_64BIT_REG_LOW, val);
362                 return;
363         }
364         if (off == (MAILBOX_RCV_STD_PROD_IDX + TG3_64BIT_REG_LOW)) {
365                 pci_write_config_dword(tp->pdev, TG3PCI_STD_RING_PROD_IDX +
366                                        TG3_64BIT_REG_LOW, val);
367                 return;
368         }
369
370         spin_lock_irqsave(&tp->indirect_lock, flags);
371         pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off + 0x5600);
372         pci_write_config_dword(tp->pdev, TG3PCI_REG_DATA, val);
373         spin_unlock_irqrestore(&tp->indirect_lock, flags);
374
375         /* In indirect mode when disabling interrupts, we also need
376          * to clear the interrupt bit in the GRC local ctrl register.
377          */
378         if ((off == (MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW)) &&
379             (val == 0x1)) {
380                 pci_write_config_dword(tp->pdev, TG3PCI_MISC_LOCAL_CTRL,
381                                        tp->grc_local_ctrl|GRC_LCLCTRL_CLEARINT);
382         }
383 }
384
385 static u32 tg3_read_indirect_mbox(struct tg3 *tp, u32 off)
386 {
387         unsigned long flags;
388         u32 val;
389
390         spin_lock_irqsave(&tp->indirect_lock, flags);
391         pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off + 0x5600);
392         pci_read_config_dword(tp->pdev, TG3PCI_REG_DATA, &val);
393         spin_unlock_irqrestore(&tp->indirect_lock, flags);
394         return val;
395 }
396
397 /* usec_wait specifies the wait time in usec when writing to certain registers
398  * where it is unsafe to read back the register without some delay.
399  * GRC_LOCAL_CTRL is one example if the GPIOs are toggled to switch power.
400  * TG3PCI_CLOCK_CTRL is another example if the clock frequencies are changed.
401  */
402 static void _tw32_flush(struct tg3 *tp, u32 off, u32 val, u32 usec_wait)
403 {
404         if ((tp->tg3_flags & TG3_FLAG_PCIX_TARGET_HWBUG) ||
405             (tp->tg3_flags2 & TG3_FLG2_ICH_WORKAROUND))
406                 /* Non-posted methods */
407                 tp->write32(tp, off, val);
408         else {
409                 /* Posted method */
410                 tg3_write32(tp, off, val);
411                 if (usec_wait)
412                         udelay(usec_wait);
413                 tp->read32(tp, off);
414         }
415         /* Wait again after the read for the posted method to guarantee that
416          * the wait time is met.
417          */
418         if (usec_wait)
419                 udelay(usec_wait);
420 }
421
422 static inline void tw32_mailbox_flush(struct tg3 *tp, u32 off, u32 val)
423 {
424         tp->write32_mbox(tp, off, val);
425         if (!(tp->tg3_flags & TG3_FLAG_MBOX_WRITE_REORDER) &&
426             !(tp->tg3_flags2 & TG3_FLG2_ICH_WORKAROUND))
427                 tp->read32_mbox(tp, off);
428 }
429
430 static void tg3_write32_tx_mbox(struct tg3 *tp, u32 off, u32 val)
431 {
432         void __iomem *mbox = tp->regs + off;
433         writel(val, mbox);
434         if (tp->tg3_flags & TG3_FLAG_TXD_MBOX_HWBUG)
435                 writel(val, mbox);
436         if (tp->tg3_flags & TG3_FLAG_MBOX_WRITE_REORDER)
437                 readl(mbox);
438 }
439
440 static u32 tg3_read32_mbox_5906(struct tg3 *tp, u32 off)
441 {
442         return (readl(tp->regs + off + GRCMBOX_BASE));
443 }
444
445 static void tg3_write32_mbox_5906(struct tg3 *tp, u32 off, u32 val)
446 {
447         writel(val, tp->regs + off + GRCMBOX_BASE);
448 }
449
450 #define tw32_mailbox(reg, val)  tp->write32_mbox(tp, reg, val)
451 #define tw32_mailbox_f(reg, val)        tw32_mailbox_flush(tp, (reg), (val))
452 #define tw32_rx_mbox(reg, val)  tp->write32_rx_mbox(tp, reg, val)
453 #define tw32_tx_mbox(reg, val)  tp->write32_tx_mbox(tp, reg, val)
454 #define tr32_mailbox(reg)       tp->read32_mbox(tp, reg)
455
456 #define tw32(reg,val)           tp->write32(tp, reg, val)
457 #define tw32_f(reg,val)         _tw32_flush(tp,(reg),(val), 0)
458 #define tw32_wait_f(reg,val,us) _tw32_flush(tp,(reg),(val), (us))
459 #define tr32(reg)               tp->read32(tp, reg)
460
461 static void tg3_write_mem(struct tg3 *tp, u32 off, u32 val)
462 {
463         unsigned long flags;
464
465         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) &&
466             (off >= NIC_SRAM_STATS_BLK) && (off < NIC_SRAM_TX_BUFFER_DESC))
467                 return;
468
469         spin_lock_irqsave(&tp->indirect_lock, flags);
470         if (tp->tg3_flags & TG3_FLAG_SRAM_USE_CONFIG) {
471                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off);
472                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
473
474                 /* Always leave this as zero. */
475                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
476         } else {
477                 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, off);
478                 tw32_f(TG3PCI_MEM_WIN_DATA, val);
479
480                 /* Always leave this as zero. */
481                 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, 0);
482         }
483         spin_unlock_irqrestore(&tp->indirect_lock, flags);
484 }
485
486 static void tg3_read_mem(struct tg3 *tp, u32 off, u32 *val)
487 {
488         unsigned long flags;
489
490         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) &&
491             (off >= NIC_SRAM_STATS_BLK) && (off < NIC_SRAM_TX_BUFFER_DESC)) {
492                 *val = 0;
493                 return;
494         }
495
496         spin_lock_irqsave(&tp->indirect_lock, flags);
497         if (tp->tg3_flags & TG3_FLAG_SRAM_USE_CONFIG) {
498                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off);
499                 pci_read_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
500
501                 /* Always leave this as zero. */
502                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
503         } else {
504                 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, off);
505                 *val = tr32(TG3PCI_MEM_WIN_DATA);
506
507                 /* Always leave this as zero. */
508                 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, 0);
509         }
510         spin_unlock_irqrestore(&tp->indirect_lock, flags);
511 }
512
513 static void tg3_ape_lock_init(struct tg3 *tp)
514 {
515         int i;
516
517         /* Make sure the driver hasn't any stale locks. */
518         for (i = 0; i < 8; i++)
519                 tg3_ape_write32(tp, TG3_APE_LOCK_GRANT + 4 * i,
520                                 APE_LOCK_GRANT_DRIVER);
521 }
522
523 static int tg3_ape_lock(struct tg3 *tp, int locknum)
524 {
525         int i, off;
526         int ret = 0;
527         u32 status;
528
529         if (!(tp->tg3_flags3 & TG3_FLG3_ENABLE_APE))
530                 return 0;
531
532         switch (locknum) {
533                 case TG3_APE_LOCK_MEM:
534                         break;
535                 default:
536                         return -EINVAL;
537         }
538
539         off = 4 * locknum;
540
541         tg3_ape_write32(tp, TG3_APE_LOCK_REQ + off, APE_LOCK_REQ_DRIVER);
542
543         /* Wait for up to 1 millisecond to acquire lock. */
544         for (i = 0; i < 100; i++) {
545                 status = tg3_ape_read32(tp, TG3_APE_LOCK_GRANT + off);
546                 if (status == APE_LOCK_GRANT_DRIVER)
547                         break;
548                 udelay(10);
549         }
550
551         if (status != APE_LOCK_GRANT_DRIVER) {
552                 /* Revoke the lock request. */
553                 tg3_ape_write32(tp, TG3_APE_LOCK_GRANT + off,
554                                 APE_LOCK_GRANT_DRIVER);
555
556                 ret = -EBUSY;
557         }
558
559         return ret;
560 }
561
562 static void tg3_ape_unlock(struct tg3 *tp, int locknum)
563 {
564         int off;
565
566         if (!(tp->tg3_flags3 & TG3_FLG3_ENABLE_APE))
567                 return;
568
569         switch (locknum) {
570                 case TG3_APE_LOCK_MEM:
571                         break;
572                 default:
573                         return;
574         }
575
576         off = 4 * locknum;
577         tg3_ape_write32(tp, TG3_APE_LOCK_GRANT + off, APE_LOCK_GRANT_DRIVER);
578 }
579
580 static void tg3_disable_ints(struct tg3 *tp)
581 {
582         tw32(TG3PCI_MISC_HOST_CTRL,
583              (tp->misc_host_ctrl | MISC_HOST_CTRL_MASK_PCI_INT));
584         tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
585 }
586
587 static inline void tg3_cond_int(struct tg3 *tp)
588 {
589         if (!(tp->tg3_flags & TG3_FLAG_TAGGED_STATUS) &&
590             (tp->hw_status->status & SD_STATUS_UPDATED))
591                 tw32(GRC_LOCAL_CTRL, tp->grc_local_ctrl | GRC_LCLCTRL_SETINT);
592         else
593                 tw32(HOSTCC_MODE, tp->coalesce_mode |
594                      (HOSTCC_MODE_ENABLE | HOSTCC_MODE_NOW));
595 }
596
597 static void tg3_enable_ints(struct tg3 *tp)
598 {
599         tp->irq_sync = 0;
600         wmb();
601
602         tw32(TG3PCI_MISC_HOST_CTRL,
603              (tp->misc_host_ctrl & ~MISC_HOST_CTRL_MASK_PCI_INT));
604         tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
605                        (tp->last_tag << 24));
606         if (tp->tg3_flags2 & TG3_FLG2_1SHOT_MSI)
607                 tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
608                                (tp->last_tag << 24));
609         tg3_cond_int(tp);
610 }
611
612 static inline unsigned int tg3_has_work(struct tg3 *tp)
613 {
614         struct tg3_hw_status *sblk = tp->hw_status;
615         unsigned int work_exists = 0;
616
617         /* check for phy events */
618         if (!(tp->tg3_flags &
619               (TG3_FLAG_USE_LINKCHG_REG |
620                TG3_FLAG_POLL_SERDES))) {
621                 if (sblk->status & SD_STATUS_LINK_CHG)
622                         work_exists = 1;
623         }
624         /* check for RX/TX work to do */
625         if (sblk->idx[0].tx_consumer != tp->tx_cons ||
626             sblk->idx[0].rx_producer != tp->rx_rcb_ptr)
627                 work_exists = 1;
628
629         return work_exists;
630 }
631
632 /* tg3_restart_ints
633  *  similar to tg3_enable_ints, but it accurately determines whether there
634  *  is new work pending and can return without flushing the PIO write
635  *  which reenables interrupts
636  */
637 static void tg3_restart_ints(struct tg3 *tp)
638 {
639         tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
640                      tp->last_tag << 24);
641         mmiowb();
642
643         /* When doing tagged status, this work check is unnecessary.
644          * The last_tag we write above tells the chip which piece of
645          * work we've completed.
646          */
647         if (!(tp->tg3_flags & TG3_FLAG_TAGGED_STATUS) &&
648             tg3_has_work(tp))
649                 tw32(HOSTCC_MODE, tp->coalesce_mode |
650                      (HOSTCC_MODE_ENABLE | HOSTCC_MODE_NOW));
651 }
652
653 static inline void tg3_netif_stop(struct tg3 *tp)
654 {
655         tp->dev->trans_start = jiffies; /* prevent tx timeout */
656         napi_disable(&tp->napi);
657         netif_tx_disable(tp->dev);
658 }
659
660 static inline void tg3_netif_start(struct tg3 *tp)
661 {
662         netif_wake_queue(tp->dev);
663         /* NOTE: unconditional netif_wake_queue is only appropriate
664          * so long as all callers are assured to have free tx slots
665          * (such as after tg3_init_hw)
666          */
667         napi_enable(&tp->napi);
668         tp->hw_status->status |= SD_STATUS_UPDATED;
669         tg3_enable_ints(tp);
670 }
671
672 static void tg3_switch_clocks(struct tg3 *tp)
673 {
674         u32 clock_ctrl = tr32(TG3PCI_CLOCK_CTRL);
675         u32 orig_clock_ctrl;
676
677         if ((tp->tg3_flags & TG3_FLAG_CPMU_PRESENT) ||
678             (tp->tg3_flags2 & TG3_FLG2_5780_CLASS))
679                 return;
680
681         orig_clock_ctrl = clock_ctrl;
682         clock_ctrl &= (CLOCK_CTRL_FORCE_CLKRUN |
683                        CLOCK_CTRL_CLKRUN_OENABLE |
684                        0x1f);
685         tp->pci_clock_ctrl = clock_ctrl;
686
687         if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
688                 if (orig_clock_ctrl & CLOCK_CTRL_625_CORE) {
689                         tw32_wait_f(TG3PCI_CLOCK_CTRL,
690                                     clock_ctrl | CLOCK_CTRL_625_CORE, 40);
691                 }
692         } else if ((orig_clock_ctrl & CLOCK_CTRL_44MHZ_CORE) != 0) {
693                 tw32_wait_f(TG3PCI_CLOCK_CTRL,
694                             clock_ctrl |
695                             (CLOCK_CTRL_44MHZ_CORE | CLOCK_CTRL_ALTCLK),
696                             40);
697                 tw32_wait_f(TG3PCI_CLOCK_CTRL,
698                             clock_ctrl | (CLOCK_CTRL_ALTCLK),
699                             40);
700         }
701         tw32_wait_f(TG3PCI_CLOCK_CTRL, clock_ctrl, 40);
702 }
703
704 #define PHY_BUSY_LOOPS  5000
705
706 static int tg3_readphy(struct tg3 *tp, int reg, u32 *val)
707 {
708         u32 frame_val;
709         unsigned int loops;
710         int ret;
711
712         if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
713                 tw32_f(MAC_MI_MODE,
714                      (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
715                 udelay(80);
716         }
717
718         *val = 0x0;
719
720         frame_val  = ((PHY_ADDR << MI_COM_PHY_ADDR_SHIFT) &
721                       MI_COM_PHY_ADDR_MASK);
722         frame_val |= ((reg << MI_COM_REG_ADDR_SHIFT) &
723                       MI_COM_REG_ADDR_MASK);
724         frame_val |= (MI_COM_CMD_READ | MI_COM_START);
725
726         tw32_f(MAC_MI_COM, frame_val);
727
728         loops = PHY_BUSY_LOOPS;
729         while (loops != 0) {
730                 udelay(10);
731                 frame_val = tr32(MAC_MI_COM);
732
733                 if ((frame_val & MI_COM_BUSY) == 0) {
734                         udelay(5);
735                         frame_val = tr32(MAC_MI_COM);
736                         break;
737                 }
738                 loops -= 1;
739         }
740
741         ret = -EBUSY;
742         if (loops != 0) {
743                 *val = frame_val & MI_COM_DATA_MASK;
744                 ret = 0;
745         }
746
747         if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
748                 tw32_f(MAC_MI_MODE, tp->mi_mode);
749                 udelay(80);
750         }
751
752         return ret;
753 }
754
755 static int tg3_writephy(struct tg3 *tp, int reg, u32 val)
756 {
757         u32 frame_val;
758         unsigned int loops;
759         int ret;
760
761         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906 &&
762             (reg == MII_TG3_CTRL || reg == MII_TG3_AUX_CTRL))
763                 return 0;
764
765         if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
766                 tw32_f(MAC_MI_MODE,
767                      (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
768                 udelay(80);
769         }
770
771         frame_val  = ((PHY_ADDR << MI_COM_PHY_ADDR_SHIFT) &
772                       MI_COM_PHY_ADDR_MASK);
773         frame_val |= ((reg << MI_COM_REG_ADDR_SHIFT) &
774                       MI_COM_REG_ADDR_MASK);
775         frame_val |= (val & MI_COM_DATA_MASK);
776         frame_val |= (MI_COM_CMD_WRITE | MI_COM_START);
777
778         tw32_f(MAC_MI_COM, frame_val);
779
780         loops = PHY_BUSY_LOOPS;
781         while (loops != 0) {
782                 udelay(10);
783                 frame_val = tr32(MAC_MI_COM);
784                 if ((frame_val & MI_COM_BUSY) == 0) {
785                         udelay(5);
786                         frame_val = tr32(MAC_MI_COM);
787                         break;
788                 }
789                 loops -= 1;
790         }
791
792         ret = -EBUSY;
793         if (loops != 0)
794                 ret = 0;
795
796         if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
797                 tw32_f(MAC_MI_MODE, tp->mi_mode);
798                 udelay(80);
799         }
800
801         return ret;
802 }
803
804 static void tg3_phy_toggle_automdix(struct tg3 *tp, int enable)
805 {
806         u32 phy;
807
808         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS) ||
809             (tp->tg3_flags2 & TG3_FLG2_ANY_SERDES))
810                 return;
811
812         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
813                 u32 ephy;
814
815                 if (!tg3_readphy(tp, MII_TG3_EPHY_TEST, &ephy)) {
816                         tg3_writephy(tp, MII_TG3_EPHY_TEST,
817                                      ephy | MII_TG3_EPHY_SHADOW_EN);
818                         if (!tg3_readphy(tp, MII_TG3_EPHYTST_MISCCTRL, &phy)) {
819                                 if (enable)
820                                         phy |= MII_TG3_EPHYTST_MISCCTRL_MDIX;
821                                 else
822                                         phy &= ~MII_TG3_EPHYTST_MISCCTRL_MDIX;
823                                 tg3_writephy(tp, MII_TG3_EPHYTST_MISCCTRL, phy);
824                         }
825                         tg3_writephy(tp, MII_TG3_EPHY_TEST, ephy);
826                 }
827         } else {
828                 phy = MII_TG3_AUXCTL_MISC_RDSEL_MISC |
829                       MII_TG3_AUXCTL_SHDWSEL_MISC;
830                 if (!tg3_writephy(tp, MII_TG3_AUX_CTRL, phy) &&
831                     !tg3_readphy(tp, MII_TG3_AUX_CTRL, &phy)) {
832                         if (enable)
833                                 phy |= MII_TG3_AUXCTL_MISC_FORCE_AMDIX;
834                         else
835                                 phy &= ~MII_TG3_AUXCTL_MISC_FORCE_AMDIX;
836                         phy |= MII_TG3_AUXCTL_MISC_WREN;
837                         tg3_writephy(tp, MII_TG3_AUX_CTRL, phy);
838                 }
839         }
840 }
841
842 static void tg3_phy_set_wirespeed(struct tg3 *tp)
843 {
844         u32 val;
845
846         if (tp->tg3_flags2 & TG3_FLG2_NO_ETH_WIRE_SPEED)
847                 return;
848
849         if (!tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x7007) &&
850             !tg3_readphy(tp, MII_TG3_AUX_CTRL, &val))
851                 tg3_writephy(tp, MII_TG3_AUX_CTRL,
852                              (val | (1 << 15) | (1 << 4)));
853 }
854
855 static int tg3_bmcr_reset(struct tg3 *tp)
856 {
857         u32 phy_control;
858         int limit, err;
859
860         /* OK, reset it, and poll the BMCR_RESET bit until it
861          * clears or we time out.
862          */
863         phy_control = BMCR_RESET;
864         err = tg3_writephy(tp, MII_BMCR, phy_control);
865         if (err != 0)
866                 return -EBUSY;
867
868         limit = 5000;
869         while (limit--) {
870                 err = tg3_readphy(tp, MII_BMCR, &phy_control);
871                 if (err != 0)
872                         return -EBUSY;
873
874                 if ((phy_control & BMCR_RESET) == 0) {
875                         udelay(40);
876                         break;
877                 }
878                 udelay(10);
879         }
880         if (limit <= 0)
881                 return -EBUSY;
882
883         return 0;
884 }
885
886 static int tg3_wait_macro_done(struct tg3 *tp)
887 {
888         int limit = 100;
889
890         while (limit--) {
891                 u32 tmp32;
892
893                 if (!tg3_readphy(tp, 0x16, &tmp32)) {
894                         if ((tmp32 & 0x1000) == 0)
895                                 break;
896                 }
897         }
898         if (limit <= 0)
899                 return -EBUSY;
900
901         return 0;
902 }
903
904 static int tg3_phy_write_and_check_testpat(struct tg3 *tp, int *resetp)
905 {
906         static const u32 test_pat[4][6] = {
907         { 0x00005555, 0x00000005, 0x00002aaa, 0x0000000a, 0x00003456, 0x00000003 },
908         { 0x00002aaa, 0x0000000a, 0x00003333, 0x00000003, 0x0000789a, 0x00000005 },
909         { 0x00005a5a, 0x00000005, 0x00002a6a, 0x0000000a, 0x00001bcd, 0x00000003 },
910         { 0x00002a5a, 0x0000000a, 0x000033c3, 0x00000003, 0x00002ef1, 0x00000005 }
911         };
912         int chan;
913
914         for (chan = 0; chan < 4; chan++) {
915                 int i;
916
917                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
918                              (chan * 0x2000) | 0x0200);
919                 tg3_writephy(tp, 0x16, 0x0002);
920
921                 for (i = 0; i < 6; i++)
922                         tg3_writephy(tp, MII_TG3_DSP_RW_PORT,
923                                      test_pat[chan][i]);
924
925                 tg3_writephy(tp, 0x16, 0x0202);
926                 if (tg3_wait_macro_done(tp)) {
927                         *resetp = 1;
928                         return -EBUSY;
929                 }
930
931                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
932                              (chan * 0x2000) | 0x0200);
933                 tg3_writephy(tp, 0x16, 0x0082);
934                 if (tg3_wait_macro_done(tp)) {
935                         *resetp = 1;
936                         return -EBUSY;
937                 }
938
939                 tg3_writephy(tp, 0x16, 0x0802);
940                 if (tg3_wait_macro_done(tp)) {
941                         *resetp = 1;
942                         return -EBUSY;
943                 }
944
945                 for (i = 0; i < 6; i += 2) {
946                         u32 low, high;
947
948                         if (tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &low) ||
949                             tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &high) ||
950                             tg3_wait_macro_done(tp)) {
951                                 *resetp = 1;
952                                 return -EBUSY;
953                         }
954                         low &= 0x7fff;
955                         high &= 0x000f;
956                         if (low != test_pat[chan][i] ||
957                             high != test_pat[chan][i+1]) {
958                                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000b);
959                                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x4001);
960                                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x4005);
961
962                                 return -EBUSY;
963                         }
964                 }
965         }
966
967         return 0;
968 }
969
970 static int tg3_phy_reset_chanpat(struct tg3 *tp)
971 {
972         int chan;
973
974         for (chan = 0; chan < 4; chan++) {
975                 int i;
976
977                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
978                              (chan * 0x2000) | 0x0200);
979                 tg3_writephy(tp, 0x16, 0x0002);
980                 for (i = 0; i < 6; i++)
981                         tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x000);
982                 tg3_writephy(tp, 0x16, 0x0202);
983                 if (tg3_wait_macro_done(tp))
984                         return -EBUSY;
985         }
986
987         return 0;
988 }
989
990 static int tg3_phy_reset_5703_4_5(struct tg3 *tp)
991 {
992         u32 reg32, phy9_orig;
993         int retries, do_phy_reset, err;
994
995         retries = 10;
996         do_phy_reset = 1;
997         do {
998                 if (do_phy_reset) {
999                         err = tg3_bmcr_reset(tp);
1000                         if (err)
1001                                 return err;
1002                         do_phy_reset = 0;
1003                 }
1004
1005                 /* Disable transmitter and interrupt.  */
1006                 if (tg3_readphy(tp, MII_TG3_EXT_CTRL, &reg32))
1007                         continue;
1008
1009                 reg32 |= 0x3000;
1010                 tg3_writephy(tp, MII_TG3_EXT_CTRL, reg32);
1011
1012                 /* Set full-duplex, 1000 mbps.  */
1013                 tg3_writephy(tp, MII_BMCR,
1014                              BMCR_FULLDPLX | TG3_BMCR_SPEED1000);
1015
1016                 /* Set to master mode.  */
1017                 if (tg3_readphy(tp, MII_TG3_CTRL, &phy9_orig))
1018                         continue;
1019
1020                 tg3_writephy(tp, MII_TG3_CTRL,
1021                              (MII_TG3_CTRL_AS_MASTER |
1022                               MII_TG3_CTRL_ENABLE_AS_MASTER));
1023
1024                 /* Enable SM_DSP_CLOCK and 6dB.  */
1025                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0c00);
1026
1027                 /* Block the PHY control access.  */
1028                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8005);
1029                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0800);
1030
1031                 err = tg3_phy_write_and_check_testpat(tp, &do_phy_reset);
1032                 if (!err)
1033                         break;
1034         } while (--retries);
1035
1036         err = tg3_phy_reset_chanpat(tp);
1037         if (err)
1038                 return err;
1039
1040         tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8005);
1041         tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0000);
1042
1043         tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8200);
1044         tg3_writephy(tp, 0x16, 0x0000);
1045
1046         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
1047             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
1048                 /* Set Extended packet length bit for jumbo frames */
1049                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x4400);
1050         }
1051         else {
1052                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0400);
1053         }
1054
1055         tg3_writephy(tp, MII_TG3_CTRL, phy9_orig);
1056
1057         if (!tg3_readphy(tp, MII_TG3_EXT_CTRL, &reg32)) {
1058                 reg32 &= ~0x3000;
1059                 tg3_writephy(tp, MII_TG3_EXT_CTRL, reg32);
1060         } else if (!err)
1061                 err = -EBUSY;
1062
1063         return err;
1064 }
1065
1066 static void tg3_link_report(struct tg3 *);
1067
1068 /* This will reset the tigon3 PHY if there is no valid
1069  * link unless the FORCE argument is non-zero.
1070  */
1071 static int tg3_phy_reset(struct tg3 *tp)
1072 {
1073         u32 phy_status;
1074         int err;
1075
1076         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
1077                 u32 val;
1078
1079                 val = tr32(GRC_MISC_CFG);
1080                 tw32_f(GRC_MISC_CFG, val & ~GRC_MISC_CFG_EPHY_IDDQ);
1081                 udelay(40);
1082         }
1083         err  = tg3_readphy(tp, MII_BMSR, &phy_status);
1084         err |= tg3_readphy(tp, MII_BMSR, &phy_status);
1085         if (err != 0)
1086                 return -EBUSY;
1087
1088         if (netif_running(tp->dev) && netif_carrier_ok(tp->dev)) {
1089                 netif_carrier_off(tp->dev);
1090                 tg3_link_report(tp);
1091         }
1092
1093         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
1094             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
1095             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
1096                 err = tg3_phy_reset_5703_4_5(tp);
1097                 if (err)
1098                         return err;
1099                 goto out;
1100         }
1101
1102         err = tg3_bmcr_reset(tp);
1103         if (err)
1104                 return err;
1105
1106 out:
1107         if (tp->tg3_flags2 & TG3_FLG2_PHY_ADC_BUG) {
1108                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0c00);
1109                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x201f);
1110                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x2aaa);
1111                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000a);
1112                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0323);
1113                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0400);
1114         }
1115         if (tp->tg3_flags2 & TG3_FLG2_PHY_5704_A0_BUG) {
1116                 tg3_writephy(tp, 0x1c, 0x8d68);
1117                 tg3_writephy(tp, 0x1c, 0x8d68);
1118         }
1119         if (tp->tg3_flags2 & TG3_FLG2_PHY_BER_BUG) {
1120                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0c00);
1121                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000a);
1122                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x310b);
1123                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x201f);
1124                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x9506);
1125                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x401f);
1126                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x14e2);
1127                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0400);
1128         }
1129         else if (tp->tg3_flags2 & TG3_FLG2_PHY_JITTER_BUG) {
1130                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0c00);
1131                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000a);
1132                 if (tp->tg3_flags2 & TG3_FLG2_PHY_ADJUST_TRIM) {
1133                         tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x110b);
1134                         tg3_writephy(tp, MII_TG3_TEST1,
1135                                      MII_TG3_TEST1_TRIM_EN | 0x4);
1136                 } else
1137                         tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x010b);
1138                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0400);
1139         }
1140         /* Set Extended packet length bit (bit 14) on all chips that */
1141         /* support jumbo frames */
1142         if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5401) {
1143                 /* Cannot do read-modify-write on 5401 */
1144                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x4c20);
1145         } else if (tp->tg3_flags2 & TG3_FLG2_JUMBO_CAPABLE) {
1146                 u32 phy_reg;
1147
1148                 /* Set bit 14 with read-modify-write to preserve other bits */
1149                 if (!tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0007) &&
1150                     !tg3_readphy(tp, MII_TG3_AUX_CTRL, &phy_reg))
1151                         tg3_writephy(tp, MII_TG3_AUX_CTRL, phy_reg | 0x4000);
1152         }
1153
1154         /* Set phy register 0x10 bit 0 to high fifo elasticity to support
1155          * jumbo frames transmission.
1156          */
1157         if (tp->tg3_flags2 & TG3_FLG2_JUMBO_CAPABLE) {
1158                 u32 phy_reg;
1159
1160                 if (!tg3_readphy(tp, MII_TG3_EXT_CTRL, &phy_reg))
1161                     tg3_writephy(tp, MII_TG3_EXT_CTRL,
1162                                  phy_reg | MII_TG3_EXT_CTRL_FIFO_ELASTIC);
1163         }
1164
1165         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
1166                 /* adjust output voltage */
1167                 tg3_writephy(tp, MII_TG3_EPHY_PTEST, 0x12);
1168         }
1169
1170         tg3_phy_toggle_automdix(tp, 1);
1171         tg3_phy_set_wirespeed(tp);
1172         return 0;
1173 }
1174
1175 static void tg3_frob_aux_power(struct tg3 *tp)
1176 {
1177         struct tg3 *tp_peer = tp;
1178
1179         if ((tp->tg3_flags2 & TG3_FLG2_IS_NIC) == 0)
1180                 return;
1181
1182         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) ||
1183             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714)) {
1184                 struct net_device *dev_peer;
1185
1186                 dev_peer = pci_get_drvdata(tp->pdev_peer);
1187                 /* remove_one() may have been run on the peer. */
1188                 if (!dev_peer)
1189                         tp_peer = tp;
1190                 else
1191                         tp_peer = netdev_priv(dev_peer);
1192         }
1193
1194         if ((tp->tg3_flags & TG3_FLAG_WOL_ENABLE) != 0 ||
1195             (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) != 0 ||
1196             (tp_peer->tg3_flags & TG3_FLAG_WOL_ENABLE) != 0 ||
1197             (tp_peer->tg3_flags & TG3_FLAG_ENABLE_ASF) != 0) {
1198                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
1199                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
1200                         tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1201                                     (GRC_LCLCTRL_GPIO_OE0 |
1202                                      GRC_LCLCTRL_GPIO_OE1 |
1203                                      GRC_LCLCTRL_GPIO_OE2 |
1204                                      GRC_LCLCTRL_GPIO_OUTPUT0 |
1205                                      GRC_LCLCTRL_GPIO_OUTPUT1),
1206                                     100);
1207                 } else {
1208                         u32 no_gpio2;
1209                         u32 grc_local_ctrl = 0;
1210
1211                         if (tp_peer != tp &&
1212                             (tp_peer->tg3_flags & TG3_FLAG_INIT_COMPLETE) != 0)
1213                                 return;
1214
1215                         /* Workaround to prevent overdrawing Amps. */
1216                         if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
1217                             ASIC_REV_5714) {
1218                                 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE3;
1219                                 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1220                                             grc_local_ctrl, 100);
1221                         }
1222
1223                         /* On 5753 and variants, GPIO2 cannot be used. */
1224                         no_gpio2 = tp->nic_sram_data_cfg &
1225                                     NIC_SRAM_DATA_CFG_NO_GPIO2;
1226
1227                         grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE0 |
1228                                          GRC_LCLCTRL_GPIO_OE1 |
1229                                          GRC_LCLCTRL_GPIO_OE2 |
1230                                          GRC_LCLCTRL_GPIO_OUTPUT1 |
1231                                          GRC_LCLCTRL_GPIO_OUTPUT2;
1232                         if (no_gpio2) {
1233                                 grc_local_ctrl &= ~(GRC_LCLCTRL_GPIO_OE2 |
1234                                                     GRC_LCLCTRL_GPIO_OUTPUT2);
1235                         }
1236                         tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1237                                                     grc_local_ctrl, 100);
1238
1239                         grc_local_ctrl |= GRC_LCLCTRL_GPIO_OUTPUT0;
1240
1241                         tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1242                                                     grc_local_ctrl, 100);
1243
1244                         if (!no_gpio2) {
1245                                 grc_local_ctrl &= ~GRC_LCLCTRL_GPIO_OUTPUT2;
1246                                 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1247                                             grc_local_ctrl, 100);
1248                         }
1249                 }
1250         } else {
1251                 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
1252                     GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701) {
1253                         if (tp_peer != tp &&
1254                             (tp_peer->tg3_flags & TG3_FLAG_INIT_COMPLETE) != 0)
1255                                 return;
1256
1257                         tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1258                                     (GRC_LCLCTRL_GPIO_OE1 |
1259                                      GRC_LCLCTRL_GPIO_OUTPUT1), 100);
1260
1261                         tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1262                                     GRC_LCLCTRL_GPIO_OE1, 100);
1263
1264                         tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1265                                     (GRC_LCLCTRL_GPIO_OE1 |
1266                                      GRC_LCLCTRL_GPIO_OUTPUT1), 100);
1267                 }
1268         }
1269 }
1270
1271 static int tg3_5700_link_polarity(struct tg3 *tp, u32 speed)
1272 {
1273         if (tp->led_ctrl == LED_CTRL_MODE_PHY_2)
1274                 return 1;
1275         else if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5411) {
1276                 if (speed != SPEED_10)
1277                         return 1;
1278         } else if (speed == SPEED_10)
1279                 return 1;
1280
1281         return 0;
1282 }
1283
1284 static int tg3_setup_phy(struct tg3 *, int);
1285
1286 #define RESET_KIND_SHUTDOWN     0
1287 #define RESET_KIND_INIT         1
1288 #define RESET_KIND_SUSPEND      2
1289
1290 static void tg3_write_sig_post_reset(struct tg3 *, int);
1291 static int tg3_halt_cpu(struct tg3 *, u32);
1292 static int tg3_nvram_lock(struct tg3 *);
1293 static void tg3_nvram_unlock(struct tg3 *);
1294
1295 static void tg3_power_down_phy(struct tg3 *tp)
1296 {
1297         if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) {
1298                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
1299                         u32 sg_dig_ctrl = tr32(SG_DIG_CTRL);
1300                         u32 serdes_cfg = tr32(MAC_SERDES_CFG);
1301
1302                         sg_dig_ctrl |=
1303                                 SG_DIG_USING_HW_AUTONEG | SG_DIG_SOFT_RESET;
1304                         tw32(SG_DIG_CTRL, sg_dig_ctrl);
1305                         tw32(MAC_SERDES_CFG, serdes_cfg | (1 << 15));
1306                 }
1307                 return;
1308         }
1309
1310         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
1311                 u32 val;
1312
1313                 tg3_bmcr_reset(tp);
1314                 val = tr32(GRC_MISC_CFG);
1315                 tw32_f(GRC_MISC_CFG, val | GRC_MISC_CFG_EPHY_IDDQ);
1316                 udelay(40);
1317                 return;
1318         } else {
1319                 tg3_writephy(tp, MII_TG3_EXT_CTRL,
1320                              MII_TG3_EXT_CTRL_FORCE_LED_OFF);
1321                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x01b2);
1322         }
1323
1324         /* The PHY should not be powered down on some chips because
1325          * of bugs.
1326          */
1327         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
1328             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
1329             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780 &&
1330              (tp->tg3_flags2 & TG3_FLG2_MII_SERDES)))
1331                 return;
1332         tg3_writephy(tp, MII_BMCR, BMCR_PDOWN);
1333 }
1334
1335 static int tg3_set_power_state(struct tg3 *tp, pci_power_t state)
1336 {
1337         u32 misc_host_ctrl;
1338         u16 power_control, power_caps;
1339         int pm = tp->pm_cap;
1340
1341         /* Make sure register accesses (indirect or otherwise)
1342          * will function correctly.
1343          */
1344         pci_write_config_dword(tp->pdev,
1345                                TG3PCI_MISC_HOST_CTRL,
1346                                tp->misc_host_ctrl);
1347
1348         pci_read_config_word(tp->pdev,
1349                              pm + PCI_PM_CTRL,
1350                              &power_control);
1351         power_control |= PCI_PM_CTRL_PME_STATUS;
1352         power_control &= ~(PCI_PM_CTRL_STATE_MASK);
1353         switch (state) {
1354         case PCI_D0:
1355                 power_control |= 0;
1356                 pci_write_config_word(tp->pdev,
1357                                       pm + PCI_PM_CTRL,
1358                                       power_control);
1359                 udelay(100);    /* Delay after power state change */
1360
1361                 /* Switch out of Vaux if it is a NIC */
1362                 if (tp->tg3_flags2 & TG3_FLG2_IS_NIC)
1363                         tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl, 100);
1364
1365                 return 0;
1366
1367         case PCI_D1:
1368                 power_control |= 1;
1369                 break;
1370
1371         case PCI_D2:
1372                 power_control |= 2;
1373                 break;
1374
1375         case PCI_D3hot:
1376                 power_control |= 3;
1377                 break;
1378
1379         default:
1380                 printk(KERN_WARNING PFX "%s: Invalid power state (%d) "
1381                        "requested.\n",
1382                        tp->dev->name, state);
1383                 return -EINVAL;
1384         };
1385
1386         power_control |= PCI_PM_CTRL_PME_ENABLE;
1387
1388         misc_host_ctrl = tr32(TG3PCI_MISC_HOST_CTRL);
1389         tw32(TG3PCI_MISC_HOST_CTRL,
1390              misc_host_ctrl | MISC_HOST_CTRL_MASK_PCI_INT);
1391
1392         if (tp->link_config.phy_is_low_power == 0) {
1393                 tp->link_config.phy_is_low_power = 1;
1394                 tp->link_config.orig_speed = tp->link_config.speed;
1395                 tp->link_config.orig_duplex = tp->link_config.duplex;
1396                 tp->link_config.orig_autoneg = tp->link_config.autoneg;
1397         }
1398
1399         if (!(tp->tg3_flags2 & TG3_FLG2_ANY_SERDES)) {
1400                 tp->link_config.speed = SPEED_10;
1401                 tp->link_config.duplex = DUPLEX_HALF;
1402                 tp->link_config.autoneg = AUTONEG_ENABLE;
1403                 tg3_setup_phy(tp, 0);
1404         }
1405
1406         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
1407                 u32 val;
1408
1409                 val = tr32(GRC_VCPU_EXT_CTRL);
1410                 tw32(GRC_VCPU_EXT_CTRL, val | GRC_VCPU_EXT_CTRL_DISABLE_WOL);
1411         } else if (!(tp->tg3_flags & TG3_FLAG_ENABLE_ASF)) {
1412                 int i;
1413                 u32 val;
1414
1415                 for (i = 0; i < 200; i++) {
1416                         tg3_read_mem(tp, NIC_SRAM_FW_ASF_STATUS_MBOX, &val);
1417                         if (val == ~NIC_SRAM_FIRMWARE_MBOX_MAGIC1)
1418                                 break;
1419                         msleep(1);
1420                 }
1421         }
1422         if (tp->tg3_flags & TG3_FLAG_WOL_CAP)
1423                 tg3_write_mem(tp, NIC_SRAM_WOL_MBOX, WOL_SIGNATURE |
1424                                                      WOL_DRV_STATE_SHUTDOWN |
1425                                                      WOL_DRV_WOL |
1426                                                      WOL_SET_MAGIC_PKT);
1427
1428         pci_read_config_word(tp->pdev, pm + PCI_PM_PMC, &power_caps);
1429
1430         if (tp->tg3_flags & TG3_FLAG_WOL_ENABLE) {
1431                 u32 mac_mode;
1432
1433                 if (!(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)) {
1434                         tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x5a);
1435                         udelay(40);
1436
1437                         if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES)
1438                                 mac_mode = MAC_MODE_PORT_MODE_GMII;
1439                         else
1440                                 mac_mode = MAC_MODE_PORT_MODE_MII;
1441
1442                         mac_mode |= tp->mac_mode & MAC_MODE_LINK_POLARITY;
1443                         if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
1444                             ASIC_REV_5700) {
1445                                 u32 speed = (tp->tg3_flags &
1446                                              TG3_FLAG_WOL_SPEED_100MB) ?
1447                                              SPEED_100 : SPEED_10;
1448                                 if (tg3_5700_link_polarity(tp, speed))
1449                                         mac_mode |= MAC_MODE_LINK_POLARITY;
1450                                 else
1451                                         mac_mode &= ~MAC_MODE_LINK_POLARITY;
1452                         }
1453                 } else {
1454                         mac_mode = MAC_MODE_PORT_MODE_TBI;
1455                 }
1456
1457                 if (!(tp->tg3_flags2 & TG3_FLG2_5750_PLUS))
1458                         tw32(MAC_LED_CTRL, tp->led_ctrl);
1459
1460                 if (((power_caps & PCI_PM_CAP_PME_D3cold) &&
1461                      (tp->tg3_flags & TG3_FLAG_WOL_ENABLE)))
1462                         mac_mode |= MAC_MODE_MAGIC_PKT_ENABLE;
1463
1464                 tw32_f(MAC_MODE, mac_mode);
1465                 udelay(100);
1466
1467                 tw32_f(MAC_RX_MODE, RX_MODE_ENABLE);
1468                 udelay(10);
1469         }
1470
1471         if (!(tp->tg3_flags & TG3_FLAG_WOL_SPEED_100MB) &&
1472             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
1473              GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)) {
1474                 u32 base_val;
1475
1476                 base_val = tp->pci_clock_ctrl;
1477                 base_val |= (CLOCK_CTRL_RXCLK_DISABLE |
1478                              CLOCK_CTRL_TXCLK_DISABLE);
1479
1480                 tw32_wait_f(TG3PCI_CLOCK_CTRL, base_val | CLOCK_CTRL_ALTCLK |
1481                             CLOCK_CTRL_PWRDOWN_PLL133, 40);
1482         } else if ((tp->tg3_flags2 & TG3_FLG2_5780_CLASS) ||
1483                    (tp->tg3_flags & TG3_FLAG_CPMU_PRESENT) ||
1484                    (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)) {
1485                 /* do nothing */
1486         } else if (!((tp->tg3_flags2 & TG3_FLG2_5750_PLUS) &&
1487                      (tp->tg3_flags & TG3_FLAG_ENABLE_ASF))) {
1488                 u32 newbits1, newbits2;
1489
1490                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
1491                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
1492                         newbits1 = (CLOCK_CTRL_RXCLK_DISABLE |
1493                                     CLOCK_CTRL_TXCLK_DISABLE |
1494                                     CLOCK_CTRL_ALTCLK);
1495                         newbits2 = newbits1 | CLOCK_CTRL_44MHZ_CORE;
1496                 } else if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
1497                         newbits1 = CLOCK_CTRL_625_CORE;
1498                         newbits2 = newbits1 | CLOCK_CTRL_ALTCLK;
1499                 } else {
1500                         newbits1 = CLOCK_CTRL_ALTCLK;
1501                         newbits2 = newbits1 | CLOCK_CTRL_44MHZ_CORE;
1502                 }
1503
1504                 tw32_wait_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl | newbits1,
1505                             40);
1506
1507                 tw32_wait_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl | newbits2,
1508                             40);
1509
1510                 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
1511                         u32 newbits3;
1512
1513                         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
1514                             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
1515                                 newbits3 = (CLOCK_CTRL_RXCLK_DISABLE |
1516                                             CLOCK_CTRL_TXCLK_DISABLE |
1517                                             CLOCK_CTRL_44MHZ_CORE);
1518                         } else {
1519                                 newbits3 = CLOCK_CTRL_44MHZ_CORE;
1520                         }
1521
1522                         tw32_wait_f(TG3PCI_CLOCK_CTRL,
1523                                     tp->pci_clock_ctrl | newbits3, 40);
1524                 }
1525         }
1526
1527         if (!(tp->tg3_flags & TG3_FLAG_WOL_ENABLE) &&
1528             !(tp->tg3_flags & TG3_FLAG_ENABLE_ASF) &&
1529             !(tp->tg3_flags3 & TG3_FLG3_ENABLE_APE))
1530                 tg3_power_down_phy(tp);
1531
1532         tg3_frob_aux_power(tp);
1533
1534         /* Workaround for unstable PLL clock */
1535         if ((GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_AX) ||
1536             (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_BX)) {
1537                 u32 val = tr32(0x7d00);
1538
1539                 val &= ~((1 << 16) | (1 << 4) | (1 << 2) | (1 << 1) | 1);
1540                 tw32(0x7d00, val);
1541                 if (!(tp->tg3_flags & TG3_FLAG_ENABLE_ASF)) {
1542                         int err;
1543
1544                         err = tg3_nvram_lock(tp);
1545                         tg3_halt_cpu(tp, RX_CPU_BASE);
1546                         if (!err)
1547                                 tg3_nvram_unlock(tp);
1548                 }
1549         }
1550
1551         tg3_write_sig_post_reset(tp, RESET_KIND_SHUTDOWN);
1552
1553         /* Finally, set the new power state. */
1554         pci_write_config_word(tp->pdev, pm + PCI_PM_CTRL, power_control);
1555         udelay(100);    /* Delay after power state change */
1556
1557         return 0;
1558 }
1559
1560 static void tg3_link_report(struct tg3 *tp)
1561 {
1562         if (!netif_carrier_ok(tp->dev)) {
1563                 if (netif_msg_link(tp))
1564                         printk(KERN_INFO PFX "%s: Link is down.\n",
1565                                tp->dev->name);
1566         } else if (netif_msg_link(tp)) {
1567                 printk(KERN_INFO PFX "%s: Link is up at %d Mbps, %s duplex.\n",
1568                        tp->dev->name,
1569                        (tp->link_config.active_speed == SPEED_1000 ?
1570                         1000 :
1571                         (tp->link_config.active_speed == SPEED_100 ?
1572                          100 : 10)),
1573                        (tp->link_config.active_duplex == DUPLEX_FULL ?
1574                         "full" : "half"));
1575
1576                 printk(KERN_INFO PFX "%s: Flow control is %s for TX and "
1577                        "%s for RX.\n",
1578                        tp->dev->name,
1579                        (tp->tg3_flags & TG3_FLAG_TX_PAUSE) ? "on" : "off",
1580                        (tp->tg3_flags & TG3_FLAG_RX_PAUSE) ? "on" : "off");
1581         }
1582 }
1583
1584 static void tg3_setup_flow_control(struct tg3 *tp, u32 local_adv, u32 remote_adv)
1585 {
1586         u32 new_tg3_flags = 0;
1587         u32 old_rx_mode = tp->rx_mode;
1588         u32 old_tx_mode = tp->tx_mode;
1589
1590         if (tp->tg3_flags & TG3_FLAG_PAUSE_AUTONEG) {
1591
1592                 /* Convert 1000BaseX flow control bits to 1000BaseT
1593                  * bits before resolving flow control.
1594                  */
1595                 if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES) {
1596                         local_adv &= ~(ADVERTISE_PAUSE_CAP |
1597                                        ADVERTISE_PAUSE_ASYM);
1598                         remote_adv &= ~(LPA_PAUSE_CAP | LPA_PAUSE_ASYM);
1599
1600                         if (local_adv & ADVERTISE_1000XPAUSE)
1601                                 local_adv |= ADVERTISE_PAUSE_CAP;
1602                         if (local_adv & ADVERTISE_1000XPSE_ASYM)
1603                                 local_adv |= ADVERTISE_PAUSE_ASYM;
1604                         if (remote_adv & LPA_1000XPAUSE)
1605                                 remote_adv |= LPA_PAUSE_CAP;
1606                         if (remote_adv & LPA_1000XPAUSE_ASYM)
1607                                 remote_adv |= LPA_PAUSE_ASYM;
1608                 }
1609
1610                 if (local_adv & ADVERTISE_PAUSE_CAP) {
1611                         if (local_adv & ADVERTISE_PAUSE_ASYM) {
1612                                 if (remote_adv & LPA_PAUSE_CAP)
1613                                         new_tg3_flags |=
1614                                                 (TG3_FLAG_RX_PAUSE |
1615                                                 TG3_FLAG_TX_PAUSE);
1616                                 else if (remote_adv & LPA_PAUSE_ASYM)
1617                                         new_tg3_flags |=
1618                                                 (TG3_FLAG_RX_PAUSE);
1619                         } else {
1620                                 if (remote_adv & LPA_PAUSE_CAP)
1621                                         new_tg3_flags |=
1622                                                 (TG3_FLAG_RX_PAUSE |
1623                                                 TG3_FLAG_TX_PAUSE);
1624                         }
1625                 } else if (local_adv & ADVERTISE_PAUSE_ASYM) {
1626                         if ((remote_adv & LPA_PAUSE_CAP) &&
1627                         (remote_adv & LPA_PAUSE_ASYM))
1628                                 new_tg3_flags |= TG3_FLAG_TX_PAUSE;
1629                 }
1630
1631                 tp->tg3_flags &= ~(TG3_FLAG_RX_PAUSE | TG3_FLAG_TX_PAUSE);
1632                 tp->tg3_flags |= new_tg3_flags;
1633         } else {
1634                 new_tg3_flags = tp->tg3_flags;
1635         }
1636
1637         if (new_tg3_flags & TG3_FLAG_RX_PAUSE)
1638                 tp->rx_mode |= RX_MODE_FLOW_CTRL_ENABLE;
1639         else
1640                 tp->rx_mode &= ~RX_MODE_FLOW_CTRL_ENABLE;
1641
1642         if (old_rx_mode != tp->rx_mode) {
1643                 tw32_f(MAC_RX_MODE, tp->rx_mode);
1644         }
1645
1646         if (new_tg3_flags & TG3_FLAG_TX_PAUSE)
1647                 tp->tx_mode |= TX_MODE_FLOW_CTRL_ENABLE;
1648         else
1649                 tp->tx_mode &= ~TX_MODE_FLOW_CTRL_ENABLE;
1650
1651         if (old_tx_mode != tp->tx_mode) {
1652                 tw32_f(MAC_TX_MODE, tp->tx_mode);
1653         }
1654 }
1655
1656 static void tg3_aux_stat_to_speed_duplex(struct tg3 *tp, u32 val, u16 *speed, u8 *duplex)
1657 {
1658         switch (val & MII_TG3_AUX_STAT_SPDMASK) {
1659         case MII_TG3_AUX_STAT_10HALF:
1660                 *speed = SPEED_10;
1661                 *duplex = DUPLEX_HALF;
1662                 break;
1663
1664         case MII_TG3_AUX_STAT_10FULL:
1665                 *speed = SPEED_10;
1666                 *duplex = DUPLEX_FULL;
1667                 break;
1668
1669         case MII_TG3_AUX_STAT_100HALF:
1670                 *speed = SPEED_100;
1671                 *duplex = DUPLEX_HALF;
1672                 break;
1673
1674         case MII_TG3_AUX_STAT_100FULL:
1675                 *speed = SPEED_100;
1676                 *duplex = DUPLEX_FULL;
1677                 break;
1678
1679         case MII_TG3_AUX_STAT_1000HALF:
1680                 *speed = SPEED_1000;
1681                 *duplex = DUPLEX_HALF;
1682                 break;
1683
1684         case MII_TG3_AUX_STAT_1000FULL:
1685                 *speed = SPEED_1000;
1686                 *duplex = DUPLEX_FULL;
1687                 break;
1688
1689         default:
1690                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
1691                         *speed = (val & MII_TG3_AUX_STAT_100) ? SPEED_100 :
1692                                  SPEED_10;
1693                         *duplex = (val & MII_TG3_AUX_STAT_FULL) ? DUPLEX_FULL :
1694                                   DUPLEX_HALF;
1695                         break;
1696                 }
1697                 *speed = SPEED_INVALID;
1698                 *duplex = DUPLEX_INVALID;
1699                 break;
1700         };
1701 }
1702
1703 static void tg3_phy_copper_begin(struct tg3 *tp)
1704 {
1705         u32 new_adv;
1706         int i;
1707
1708         if (tp->link_config.phy_is_low_power) {
1709                 /* Entering low power mode.  Disable gigabit and
1710                  * 100baseT advertisements.
1711                  */
1712                 tg3_writephy(tp, MII_TG3_CTRL, 0);
1713
1714                 new_adv = (ADVERTISE_10HALF | ADVERTISE_10FULL |
1715                            ADVERTISE_CSMA | ADVERTISE_PAUSE_CAP);
1716                 if (tp->tg3_flags & TG3_FLAG_WOL_SPEED_100MB)
1717                         new_adv |= (ADVERTISE_100HALF | ADVERTISE_100FULL);
1718
1719                 tg3_writephy(tp, MII_ADVERTISE, new_adv);
1720         } else if (tp->link_config.speed == SPEED_INVALID) {
1721                 if (tp->tg3_flags & TG3_FLAG_10_100_ONLY)
1722                         tp->link_config.advertising &=
1723                                 ~(ADVERTISED_1000baseT_Half |
1724                                   ADVERTISED_1000baseT_Full);
1725
1726                 new_adv = (ADVERTISE_CSMA | ADVERTISE_PAUSE_CAP);
1727                 if (tp->link_config.advertising & ADVERTISED_10baseT_Half)
1728                         new_adv |= ADVERTISE_10HALF;
1729                 if (tp->link_config.advertising & ADVERTISED_10baseT_Full)
1730                         new_adv |= ADVERTISE_10FULL;
1731                 if (tp->link_config.advertising & ADVERTISED_100baseT_Half)
1732                         new_adv |= ADVERTISE_100HALF;
1733                 if (tp->link_config.advertising & ADVERTISED_100baseT_Full)
1734                         new_adv |= ADVERTISE_100FULL;
1735                 tg3_writephy(tp, MII_ADVERTISE, new_adv);
1736
1737                 if (tp->link_config.advertising &
1738                     (ADVERTISED_1000baseT_Half | ADVERTISED_1000baseT_Full)) {
1739                         new_adv = 0;
1740                         if (tp->link_config.advertising & ADVERTISED_1000baseT_Half)
1741                                 new_adv |= MII_TG3_CTRL_ADV_1000_HALF;
1742                         if (tp->link_config.advertising & ADVERTISED_1000baseT_Full)
1743                                 new_adv |= MII_TG3_CTRL_ADV_1000_FULL;
1744                         if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY) &&
1745                             (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
1746                              tp->pci_chip_rev_id == CHIPREV_ID_5701_B0))
1747                                 new_adv |= (MII_TG3_CTRL_AS_MASTER |
1748                                             MII_TG3_CTRL_ENABLE_AS_MASTER);
1749                         tg3_writephy(tp, MII_TG3_CTRL, new_adv);
1750                 } else {
1751                         tg3_writephy(tp, MII_TG3_CTRL, 0);
1752                 }
1753         } else {
1754                 /* Asking for a specific link mode. */
1755                 if (tp->link_config.speed == SPEED_1000) {
1756                         new_adv = ADVERTISE_CSMA | ADVERTISE_PAUSE_CAP;
1757                         tg3_writephy(tp, MII_ADVERTISE, new_adv);
1758
1759                         if (tp->link_config.duplex == DUPLEX_FULL)
1760                                 new_adv = MII_TG3_CTRL_ADV_1000_FULL;
1761                         else
1762                                 new_adv = MII_TG3_CTRL_ADV_1000_HALF;
1763                         if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
1764                             tp->pci_chip_rev_id == CHIPREV_ID_5701_B0)
1765                                 new_adv |= (MII_TG3_CTRL_AS_MASTER |
1766                                             MII_TG3_CTRL_ENABLE_AS_MASTER);
1767                         tg3_writephy(tp, MII_TG3_CTRL, new_adv);
1768                 } else {
1769                         tg3_writephy(tp, MII_TG3_CTRL, 0);
1770
1771                         new_adv = ADVERTISE_CSMA | ADVERTISE_PAUSE_CAP;
1772                         if (tp->link_config.speed == SPEED_100) {
1773                                 if (tp->link_config.duplex == DUPLEX_FULL)
1774                                         new_adv |= ADVERTISE_100FULL;
1775                                 else
1776                                         new_adv |= ADVERTISE_100HALF;
1777                         } else {
1778                                 if (tp->link_config.duplex == DUPLEX_FULL)
1779                                         new_adv |= ADVERTISE_10FULL;
1780                                 else
1781                                         new_adv |= ADVERTISE_10HALF;
1782                         }
1783                         tg3_writephy(tp, MII_ADVERTISE, new_adv);
1784                 }
1785         }
1786
1787         if (tp->link_config.autoneg == AUTONEG_DISABLE &&
1788             tp->link_config.speed != SPEED_INVALID) {
1789                 u32 bmcr, orig_bmcr;
1790
1791                 tp->link_config.active_speed = tp->link_config.speed;
1792                 tp->link_config.active_duplex = tp->link_config.duplex;
1793
1794                 bmcr = 0;
1795                 switch (tp->link_config.speed) {
1796                 default:
1797                 case SPEED_10:
1798                         break;
1799
1800                 case SPEED_100:
1801                         bmcr |= BMCR_SPEED100;
1802                         break;
1803
1804                 case SPEED_1000:
1805                         bmcr |= TG3_BMCR_SPEED1000;
1806                         break;
1807                 };
1808
1809                 if (tp->link_config.duplex == DUPLEX_FULL)
1810                         bmcr |= BMCR_FULLDPLX;
1811
1812                 if (!tg3_readphy(tp, MII_BMCR, &orig_bmcr) &&
1813                     (bmcr != orig_bmcr)) {
1814                         tg3_writephy(tp, MII_BMCR, BMCR_LOOPBACK);
1815                         for (i = 0; i < 1500; i++) {
1816                                 u32 tmp;
1817
1818                                 udelay(10);
1819                                 if (tg3_readphy(tp, MII_BMSR, &tmp) ||
1820                                     tg3_readphy(tp, MII_BMSR, &tmp))
1821                                         continue;
1822                                 if (!(tmp & BMSR_LSTATUS)) {
1823                                         udelay(40);
1824                                         break;
1825                                 }
1826                         }
1827                         tg3_writephy(tp, MII_BMCR, bmcr);
1828                         udelay(40);
1829                 }
1830         } else {
1831                 tg3_writephy(tp, MII_BMCR,
1832                              BMCR_ANENABLE | BMCR_ANRESTART);
1833         }
1834 }
1835
1836 static int tg3_init_5401phy_dsp(struct tg3 *tp)
1837 {
1838         int err;
1839
1840         /* Turn off tap power management. */
1841         /* Set Extended packet length bit */
1842         err  = tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x4c20);
1843
1844         err |= tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x0012);
1845         err |= tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x1804);
1846
1847         err |= tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x0013);
1848         err |= tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x1204);
1849
1850         err |= tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8006);
1851         err |= tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0132);
1852
1853         err |= tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8006);
1854         err |= tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0232);
1855
1856         err |= tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x201f);
1857         err |= tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0a20);
1858
1859         udelay(40);
1860
1861         return err;
1862 }
1863
1864 static int tg3_copper_is_advertising_all(struct tg3 *tp, u32 mask)
1865 {
1866         u32 adv_reg, all_mask = 0;
1867
1868         if (mask & ADVERTISED_10baseT_Half)
1869                 all_mask |= ADVERTISE_10HALF;
1870         if (mask & ADVERTISED_10baseT_Full)
1871                 all_mask |= ADVERTISE_10FULL;
1872         if (mask & ADVERTISED_100baseT_Half)
1873                 all_mask |= ADVERTISE_100HALF;
1874         if (mask & ADVERTISED_100baseT_Full)
1875                 all_mask |= ADVERTISE_100FULL;
1876
1877         if (tg3_readphy(tp, MII_ADVERTISE, &adv_reg))
1878                 return 0;
1879
1880         if ((adv_reg & all_mask) != all_mask)
1881                 return 0;
1882         if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY)) {
1883                 u32 tg3_ctrl;
1884
1885                 all_mask = 0;
1886                 if (mask & ADVERTISED_1000baseT_Half)
1887                         all_mask |= ADVERTISE_1000HALF;
1888                 if (mask & ADVERTISED_1000baseT_Full)
1889                         all_mask |= ADVERTISE_1000FULL;
1890
1891                 if (tg3_readphy(tp, MII_TG3_CTRL, &tg3_ctrl))
1892                         return 0;
1893
1894                 if ((tg3_ctrl & all_mask) != all_mask)
1895                         return 0;
1896         }
1897         return 1;
1898 }
1899
1900 static int tg3_setup_copper_phy(struct tg3 *tp, int force_reset)
1901 {
1902         int current_link_up;
1903         u32 bmsr, dummy;
1904         u16 current_speed;
1905         u8 current_duplex;
1906         int i, err;
1907
1908         tw32(MAC_EVENT, 0);
1909
1910         tw32_f(MAC_STATUS,
1911              (MAC_STATUS_SYNC_CHANGED |
1912               MAC_STATUS_CFG_CHANGED |
1913               MAC_STATUS_MI_COMPLETION |
1914               MAC_STATUS_LNKSTATE_CHANGED));
1915         udelay(40);
1916
1917         tp->mi_mode = MAC_MI_MODE_BASE;
1918         tw32_f(MAC_MI_MODE, tp->mi_mode);
1919         udelay(80);
1920
1921         tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x02);
1922
1923         /* Some third-party PHYs need to be reset on link going
1924          * down.
1925          */
1926         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
1927              GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
1928              GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) &&
1929             netif_carrier_ok(tp->dev)) {
1930                 tg3_readphy(tp, MII_BMSR, &bmsr);
1931                 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
1932                     !(bmsr & BMSR_LSTATUS))
1933                         force_reset = 1;
1934         }
1935         if (force_reset)
1936                 tg3_phy_reset(tp);
1937
1938         if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5401) {
1939                 tg3_readphy(tp, MII_BMSR, &bmsr);
1940                 if (tg3_readphy(tp, MII_BMSR, &bmsr) ||
1941                     !(tp->tg3_flags & TG3_FLAG_INIT_COMPLETE))
1942                         bmsr = 0;
1943
1944                 if (!(bmsr & BMSR_LSTATUS)) {
1945                         err = tg3_init_5401phy_dsp(tp);
1946                         if (err)
1947                                 return err;
1948
1949                         tg3_readphy(tp, MII_BMSR, &bmsr);
1950                         for (i = 0; i < 1000; i++) {
1951                                 udelay(10);
1952                                 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
1953                                     (bmsr & BMSR_LSTATUS)) {
1954                                         udelay(40);
1955                                         break;
1956                                 }
1957                         }
1958
1959                         if ((tp->phy_id & PHY_ID_REV_MASK) == PHY_REV_BCM5401_B0 &&
1960                             !(bmsr & BMSR_LSTATUS) &&
1961                             tp->link_config.active_speed == SPEED_1000) {
1962                                 err = tg3_phy_reset(tp);
1963                                 if (!err)
1964                                         err = tg3_init_5401phy_dsp(tp);
1965                                 if (err)
1966                                         return err;
1967                         }
1968                 }
1969         } else if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
1970                    tp->pci_chip_rev_id == CHIPREV_ID_5701_B0) {
1971                 /* 5701 {A0,B0} CRC bug workaround */
1972                 tg3_writephy(tp, 0x15, 0x0a75);
1973                 tg3_writephy(tp, 0x1c, 0x8c68);
1974                 tg3_writephy(tp, 0x1c, 0x8d68);
1975                 tg3_writephy(tp, 0x1c, 0x8c68);
1976         }
1977
1978         /* Clear pending interrupts... */
1979         tg3_readphy(tp, MII_TG3_ISTAT, &dummy);
1980         tg3_readphy(tp, MII_TG3_ISTAT, &dummy);
1981
1982         if (tp->tg3_flags & TG3_FLAG_USE_MI_INTERRUPT)
1983                 tg3_writephy(tp, MII_TG3_IMASK, ~MII_TG3_INT_LINKCHG);
1984         else if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5906)
1985                 tg3_writephy(tp, MII_TG3_IMASK, ~0);
1986
1987         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
1988             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
1989                 if (tp->led_ctrl == LED_CTRL_MODE_PHY_1)
1990                         tg3_writephy(tp, MII_TG3_EXT_CTRL,
1991                                      MII_TG3_EXT_CTRL_LNK3_LED_MODE);
1992                 else
1993                         tg3_writephy(tp, MII_TG3_EXT_CTRL, 0);
1994         }
1995
1996         current_link_up = 0;
1997         current_speed = SPEED_INVALID;
1998         current_duplex = DUPLEX_INVALID;
1999
2000         if (tp->tg3_flags2 & TG3_FLG2_CAPACITIVE_COUPLING) {
2001                 u32 val;
2002
2003                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x4007);
2004                 tg3_readphy(tp, MII_TG3_AUX_CTRL, &val);
2005                 if (!(val & (1 << 10))) {
2006                         val |= (1 << 10);
2007                         tg3_writephy(tp, MII_TG3_AUX_CTRL, val);
2008                         goto relink;
2009                 }
2010         }
2011
2012         bmsr = 0;
2013         for (i = 0; i < 100; i++) {
2014                 tg3_readphy(tp, MII_BMSR, &bmsr);
2015                 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
2016                     (bmsr & BMSR_LSTATUS))
2017                         break;
2018                 udelay(40);
2019         }
2020
2021         if (bmsr & BMSR_LSTATUS) {
2022                 u32 aux_stat, bmcr;
2023
2024                 tg3_readphy(tp, MII_TG3_AUX_STAT, &aux_stat);
2025                 for (i = 0; i < 2000; i++) {
2026                         udelay(10);
2027                         if (!tg3_readphy(tp, MII_TG3_AUX_STAT, &aux_stat) &&
2028                             aux_stat)
2029                                 break;
2030                 }
2031
2032                 tg3_aux_stat_to_speed_duplex(tp, aux_stat,
2033                                              &current_speed,
2034                                              &current_duplex);
2035
2036                 bmcr = 0;
2037                 for (i = 0; i < 200; i++) {
2038                         tg3_readphy(tp, MII_BMCR, &bmcr);
2039                         if (tg3_readphy(tp, MII_BMCR, &bmcr))
2040                                 continue;
2041                         if (bmcr && bmcr != 0x7fff)
2042                                 break;
2043                         udelay(10);
2044                 }
2045
2046                 if (tp->link_config.autoneg == AUTONEG_ENABLE) {
2047                         if (bmcr & BMCR_ANENABLE) {
2048                                 current_link_up = 1;
2049
2050                                 /* Force autoneg restart if we are exiting
2051                                  * low power mode.
2052                                  */
2053                                 if (!tg3_copper_is_advertising_all(tp,
2054                                                 tp->link_config.advertising))
2055                                         current_link_up = 0;
2056                         } else {
2057                                 current_link_up = 0;
2058                         }
2059                 } else {
2060                         if (!(bmcr & BMCR_ANENABLE) &&
2061                             tp->link_config.speed == current_speed &&
2062                             tp->link_config.duplex == current_duplex) {
2063                                 current_link_up = 1;
2064                         } else {
2065                                 current_link_up = 0;
2066                         }
2067                 }
2068
2069                 tp->link_config.active_speed = current_speed;
2070                 tp->link_config.active_duplex = current_duplex;
2071         }
2072
2073         if (current_link_up == 1 &&
2074             (tp->link_config.active_duplex == DUPLEX_FULL) &&
2075             (tp->link_config.autoneg == AUTONEG_ENABLE)) {
2076                 u32 local_adv, remote_adv;
2077
2078                 if (tg3_readphy(tp, MII_ADVERTISE, &local_adv))
2079                         local_adv = 0;
2080                 local_adv &= (ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM);
2081
2082                 if (tg3_readphy(tp, MII_LPA, &remote_adv))
2083                         remote_adv = 0;
2084
2085                 remote_adv &= (LPA_PAUSE_CAP | LPA_PAUSE_ASYM);
2086
2087                 /* If we are not advertising full pause capability,
2088                  * something is wrong.  Bring the link down and reconfigure.
2089                  */
2090                 if (local_adv != ADVERTISE_PAUSE_CAP) {
2091                         current_link_up = 0;
2092                 } else {
2093                         tg3_setup_flow_control(tp, local_adv, remote_adv);
2094                 }
2095         }
2096 relink:
2097         if (current_link_up == 0 || tp->link_config.phy_is_low_power) {
2098                 u32 tmp;
2099
2100                 tg3_phy_copper_begin(tp);
2101
2102                 tg3_readphy(tp, MII_BMSR, &tmp);
2103                 if (!tg3_readphy(tp, MII_BMSR, &tmp) &&
2104                     (tmp & BMSR_LSTATUS))
2105                         current_link_up = 1;
2106         }
2107
2108         tp->mac_mode &= ~MAC_MODE_PORT_MODE_MASK;
2109         if (current_link_up == 1) {
2110                 if (tp->link_config.active_speed == SPEED_100 ||
2111                     tp->link_config.active_speed == SPEED_10)
2112                         tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
2113                 else
2114                         tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
2115         } else
2116                 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
2117
2118         tp->mac_mode &= ~MAC_MODE_HALF_DUPLEX;
2119         if (tp->link_config.active_duplex == DUPLEX_HALF)
2120                 tp->mac_mode |= MAC_MODE_HALF_DUPLEX;
2121
2122         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700) {
2123                 if (current_link_up == 1 &&
2124                     tg3_5700_link_polarity(tp, tp->link_config.active_speed))
2125                         tp->mac_mode |= MAC_MODE_LINK_POLARITY;
2126                 else
2127                         tp->mac_mode &= ~MAC_MODE_LINK_POLARITY;
2128         }
2129
2130         /* ??? Without this setting Netgear GA302T PHY does not
2131          * ??? send/receive packets...
2132          */
2133         if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5411 &&
2134             tp->pci_chip_rev_id == CHIPREV_ID_5700_ALTIMA) {
2135                 tp->mi_mode |= MAC_MI_MODE_AUTO_POLL;
2136                 tw32_f(MAC_MI_MODE, tp->mi_mode);
2137                 udelay(80);
2138         }
2139
2140         tw32_f(MAC_MODE, tp->mac_mode);
2141         udelay(40);
2142
2143         if (tp->tg3_flags & TG3_FLAG_USE_LINKCHG_REG) {
2144                 /* Polled via timer. */
2145                 tw32_f(MAC_EVENT, 0);
2146         } else {
2147                 tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
2148         }
2149         udelay(40);
2150
2151         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 &&
2152             current_link_up == 1 &&
2153             tp->link_config.active_speed == SPEED_1000 &&
2154             ((tp->tg3_flags & TG3_FLAG_PCIX_MODE) ||
2155              (tp->tg3_flags & TG3_FLAG_PCI_HIGH_SPEED))) {
2156                 udelay(120);
2157                 tw32_f(MAC_STATUS,
2158                      (MAC_STATUS_SYNC_CHANGED |
2159                       MAC_STATUS_CFG_CHANGED));
2160                 udelay(40);
2161                 tg3_write_mem(tp,
2162                               NIC_SRAM_FIRMWARE_MBOX,
2163                               NIC_SRAM_FIRMWARE_MBOX_MAGIC2);
2164         }
2165
2166         if (current_link_up != netif_carrier_ok(tp->dev)) {
2167                 if (current_link_up)
2168                         netif_carrier_on(tp->dev);
2169                 else
2170                         netif_carrier_off(tp->dev);
2171                 tg3_link_report(tp);
2172         }
2173
2174         return 0;
2175 }
2176
2177 struct tg3_fiber_aneginfo {
2178         int state;
2179 #define ANEG_STATE_UNKNOWN              0
2180 #define ANEG_STATE_AN_ENABLE            1
2181 #define ANEG_STATE_RESTART_INIT         2
2182 #define ANEG_STATE_RESTART              3
2183 #define ANEG_STATE_DISABLE_LINK_OK      4
2184 #define ANEG_STATE_ABILITY_DETECT_INIT  5
2185 #define ANEG_STATE_ABILITY_DETECT       6
2186 #define ANEG_STATE_ACK_DETECT_INIT      7
2187 #define ANEG_STATE_ACK_DETECT           8
2188 #define ANEG_STATE_COMPLETE_ACK_INIT    9
2189 #define ANEG_STATE_COMPLETE_ACK         10
2190 #define ANEG_STATE_IDLE_DETECT_INIT     11
2191 #define ANEG_STATE_IDLE_DETECT          12
2192 #define ANEG_STATE_LINK_OK              13
2193 #define ANEG_STATE_NEXT_PAGE_WAIT_INIT  14
2194 #define ANEG_STATE_NEXT_PAGE_WAIT       15
2195
2196         u32 flags;
2197 #define MR_AN_ENABLE            0x00000001
2198 #define MR_RESTART_AN           0x00000002
2199 #define MR_AN_COMPLETE          0x00000004
2200 #define MR_PAGE_RX              0x00000008
2201 #define MR_NP_LOADED            0x00000010
2202 #define MR_TOGGLE_TX            0x00000020
2203 #define MR_LP_ADV_FULL_DUPLEX   0x00000040
2204 #define MR_LP_ADV_HALF_DUPLEX   0x00000080
2205 #define MR_LP_ADV_SYM_PAUSE     0x00000100
2206 #define MR_LP_ADV_ASYM_PAUSE    0x00000200
2207 #define MR_LP_ADV_REMOTE_FAULT1 0x00000400
2208 #define MR_LP_ADV_REMOTE_FAULT2 0x00000800
2209 #define MR_LP_ADV_NEXT_PAGE     0x00001000
2210 #define MR_TOGGLE_RX            0x00002000
2211 #define MR_NP_RX                0x00004000
2212
2213 #define MR_LINK_OK              0x80000000
2214
2215         unsigned long link_time, cur_time;
2216
2217         u32 ability_match_cfg;
2218         int ability_match_count;
2219
2220         char ability_match, idle_match, ack_match;
2221
2222         u32 txconfig, rxconfig;
2223 #define ANEG_CFG_NP             0x00000080
2224 #define ANEG_CFG_ACK            0x00000040
2225 #define ANEG_CFG_RF2            0x00000020
2226 #define ANEG_CFG_RF1            0x00000010
2227 #define ANEG_CFG_PS2            0x00000001
2228 #define ANEG_CFG_PS1            0x00008000
2229 #define ANEG_CFG_HD             0x00004000
2230 #define ANEG_CFG_FD             0x00002000
2231 #define ANEG_CFG_INVAL          0x00001f06
2232
2233 };
2234 #define ANEG_OK         0
2235 #define ANEG_DONE       1
2236 #define ANEG_TIMER_ENAB 2
2237 #define ANEG_FAILED     -1
2238
2239 #define ANEG_STATE_SETTLE_TIME  10000
2240
2241 static int tg3_fiber_aneg_smachine(struct tg3 *tp,
2242                                    struct tg3_fiber_aneginfo *ap)
2243 {
2244         unsigned long delta;
2245         u32 rx_cfg_reg;
2246         int ret;
2247
2248         if (ap->state == ANEG_STATE_UNKNOWN) {
2249                 ap->rxconfig = 0;
2250                 ap->link_time = 0;
2251                 ap->cur_time = 0;
2252                 ap->ability_match_cfg = 0;
2253                 ap->ability_match_count = 0;
2254                 ap->ability_match = 0;
2255                 ap->idle_match = 0;
2256                 ap->ack_match = 0;
2257         }
2258         ap->cur_time++;
2259
2260         if (tr32(MAC_STATUS) & MAC_STATUS_RCVD_CFG) {
2261                 rx_cfg_reg = tr32(MAC_RX_AUTO_NEG);
2262
2263                 if (rx_cfg_reg != ap->ability_match_cfg) {
2264                         ap->ability_match_cfg = rx_cfg_reg;
2265                         ap->ability_match = 0;
2266                         ap->ability_match_count = 0;
2267                 } else {
2268                         if (++ap->ability_match_count > 1) {
2269                                 ap->ability_match = 1;
2270                                 ap->ability_match_cfg = rx_cfg_reg;
2271                         }
2272                 }
2273                 if (rx_cfg_reg & ANEG_CFG_ACK)
2274                         ap->ack_match = 1;
2275                 else
2276                         ap->ack_match = 0;
2277
2278                 ap->idle_match = 0;
2279         } else {
2280                 ap->idle_match = 1;
2281                 ap->ability_match_cfg = 0;
2282                 ap->ability_match_count = 0;
2283                 ap->ability_match = 0;
2284                 ap->ack_match = 0;
2285
2286                 rx_cfg_reg = 0;
2287         }
2288
2289         ap->rxconfig = rx_cfg_reg;
2290         ret = ANEG_OK;
2291
2292         switch(ap->state) {
2293         case ANEG_STATE_UNKNOWN:
2294                 if (ap->flags & (MR_AN_ENABLE | MR_RESTART_AN))
2295                         ap->state = ANEG_STATE_AN_ENABLE;
2296
2297                 /* fallthru */
2298         case ANEG_STATE_AN_ENABLE:
2299                 ap->flags &= ~(MR_AN_COMPLETE | MR_PAGE_RX);
2300                 if (ap->flags & MR_AN_ENABLE) {
2301                         ap->link_time = 0;
2302                         ap->cur_time = 0;
2303                         ap->ability_match_cfg = 0;
2304                         ap->ability_match_count = 0;
2305                         ap->ability_match = 0;
2306                         ap->idle_match = 0;
2307                         ap->ack_match = 0;
2308
2309                         ap->state = ANEG_STATE_RESTART_INIT;
2310                 } else {
2311                         ap->state = ANEG_STATE_DISABLE_LINK_OK;
2312                 }
2313                 break;
2314
2315         case ANEG_STATE_RESTART_INIT:
2316                 ap->link_time = ap->cur_time;
2317                 ap->flags &= ~(MR_NP_LOADED);
2318                 ap->txconfig = 0;
2319                 tw32(MAC_TX_AUTO_NEG, 0);
2320                 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
2321                 tw32_f(MAC_MODE, tp->mac_mode);
2322                 udelay(40);
2323
2324                 ret = ANEG_TIMER_ENAB;
2325                 ap->state = ANEG_STATE_RESTART;
2326
2327                 /* fallthru */
2328         case ANEG_STATE_RESTART:
2329                 delta = ap->cur_time - ap->link_time;
2330                 if (delta > ANEG_STATE_SETTLE_TIME) {
2331                         ap->state = ANEG_STATE_ABILITY_DETECT_INIT;
2332                 } else {
2333                         ret = ANEG_TIMER_ENAB;
2334                 }
2335                 break;
2336
2337         case ANEG_STATE_DISABLE_LINK_OK:
2338                 ret = ANEG_DONE;
2339                 break;
2340
2341         case ANEG_STATE_ABILITY_DETECT_INIT:
2342                 ap->flags &= ~(MR_TOGGLE_TX);
2343                 ap->txconfig = (ANEG_CFG_FD | ANEG_CFG_PS1);
2344                 tw32(MAC_TX_AUTO_NEG, ap->txconfig);
2345                 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
2346                 tw32_f(MAC_MODE, tp->mac_mode);
2347                 udelay(40);
2348
2349                 ap->state = ANEG_STATE_ABILITY_DETECT;
2350                 break;
2351
2352         case ANEG_STATE_ABILITY_DETECT:
2353                 if (ap->ability_match != 0 && ap->rxconfig != 0) {
2354                         ap->state = ANEG_STATE_ACK_DETECT_INIT;
2355                 }
2356                 break;
2357
2358         case ANEG_STATE_ACK_DETECT_INIT:
2359                 ap->txconfig |= ANEG_CFG_ACK;
2360                 tw32(MAC_TX_AUTO_NEG, ap->txconfig);
2361                 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
2362                 tw32_f(MAC_MODE, tp->mac_mode);
2363                 udelay(40);
2364
2365                 ap->state = ANEG_STATE_ACK_DETECT;
2366
2367                 /* fallthru */
2368         case ANEG_STATE_ACK_DETECT:
2369                 if (ap->ack_match != 0) {
2370                         if ((ap->rxconfig & ~ANEG_CFG_ACK) ==
2371                             (ap->ability_match_cfg & ~ANEG_CFG_ACK)) {
2372                                 ap->state = ANEG_STATE_COMPLETE_ACK_INIT;
2373                         } else {
2374                                 ap->state = ANEG_STATE_AN_ENABLE;
2375                         }
2376                 } else if (ap->ability_match != 0 &&
2377                            ap->rxconfig == 0) {
2378                         ap->state = ANEG_STATE_AN_ENABLE;
2379                 }
2380                 break;
2381
2382         case ANEG_STATE_COMPLETE_ACK_INIT:
2383                 if (ap->rxconfig & ANEG_CFG_INVAL) {
2384                         ret = ANEG_FAILED;
2385                         break;
2386                 }
2387                 ap->flags &= ~(MR_LP_ADV_FULL_DUPLEX |
2388                                MR_LP_ADV_HALF_DUPLEX |
2389                                MR_LP_ADV_SYM_PAUSE |
2390                                MR_LP_ADV_ASYM_PAUSE |
2391                                MR_LP_ADV_REMOTE_FAULT1 |
2392                                MR_LP_ADV_REMOTE_FAULT2 |
2393                                MR_LP_ADV_NEXT_PAGE |
2394                                MR_TOGGLE_RX |
2395                                MR_NP_RX);
2396                 if (ap->rxconfig & ANEG_CFG_FD)
2397                         ap->flags |= MR_LP_ADV_FULL_DUPLEX;
2398                 if (ap->rxconfig & ANEG_CFG_HD)
2399                         ap->flags |= MR_LP_ADV_HALF_DUPLEX;
2400                 if (ap->rxconfig & ANEG_CFG_PS1)
2401                         ap->flags |= MR_LP_ADV_SYM_PAUSE;
2402                 if (ap->rxconfig & ANEG_CFG_PS2)
2403                         ap->flags |= MR_LP_ADV_ASYM_PAUSE;
2404                 if (ap->rxconfig & ANEG_CFG_RF1)
2405                         ap->flags |= MR_LP_ADV_REMOTE_FAULT1;
2406                 if (ap->rxconfig & ANEG_CFG_RF2)
2407                         ap->flags |= MR_LP_ADV_REMOTE_FAULT2;
2408                 if (ap->rxconfig & ANEG_CFG_NP)
2409                         ap->flags |= MR_LP_ADV_NEXT_PAGE;
2410
2411                 ap->link_time = ap->cur_time;
2412
2413                 ap->flags ^= (MR_TOGGLE_TX);
2414                 if (ap->rxconfig & 0x0008)
2415                         ap->flags |= MR_TOGGLE_RX;
2416                 if (ap->rxconfig & ANEG_CFG_NP)
2417                         ap->flags |= MR_NP_RX;
2418                 ap->flags |= MR_PAGE_RX;
2419
2420                 ap->state = ANEG_STATE_COMPLETE_ACK;
2421                 ret = ANEG_TIMER_ENAB;
2422                 break;
2423
2424         case ANEG_STATE_COMPLETE_ACK:
2425                 if (ap->ability_match != 0 &&
2426                     ap->rxconfig == 0) {
2427                         ap->state = ANEG_STATE_AN_ENABLE;
2428                         break;
2429                 }
2430                 delta = ap->cur_time - ap->link_time;
2431                 if (delta > ANEG_STATE_SETTLE_TIME) {
2432                         if (!(ap->flags & (MR_LP_ADV_NEXT_PAGE))) {
2433                                 ap->state = ANEG_STATE_IDLE_DETECT_INIT;
2434                         } else {
2435                                 if ((ap->txconfig & ANEG_CFG_NP) == 0 &&
2436                                     !(ap->flags & MR_NP_RX)) {
2437                                         ap->state = ANEG_STATE_IDLE_DETECT_INIT;
2438                                 } else {
2439                                         ret = ANEG_FAILED;
2440                                 }
2441                         }
2442                 }
2443                 break;
2444
2445         case ANEG_STATE_IDLE_DETECT_INIT:
2446                 ap->link_time = ap->cur_time;
2447                 tp->mac_mode &= ~MAC_MODE_SEND_CONFIGS;
2448                 tw32_f(MAC_MODE, tp->mac_mode);
2449                 udelay(40);
2450
2451                 ap->state = ANEG_STATE_IDLE_DETECT;
2452                 ret = ANEG_TIMER_ENAB;
2453                 break;
2454
2455         case ANEG_STATE_IDLE_DETECT:
2456                 if (ap->ability_match != 0 &&
2457                     ap->rxconfig == 0) {
2458                         ap->state = ANEG_STATE_AN_ENABLE;
2459                         break;
2460                 }
2461                 delta = ap->cur_time - ap->link_time;
2462                 if (delta > ANEG_STATE_SETTLE_TIME) {
2463                         /* XXX another gem from the Broadcom driver :( */
2464                         ap->state = ANEG_STATE_LINK_OK;
2465                 }
2466                 break;
2467
2468         case ANEG_STATE_LINK_OK:
2469                 ap->flags |= (MR_AN_COMPLETE | MR_LINK_OK);
2470                 ret = ANEG_DONE;
2471                 break;
2472
2473         case ANEG_STATE_NEXT_PAGE_WAIT_INIT:
2474                 /* ??? unimplemented */
2475                 break;
2476
2477         case ANEG_STATE_NEXT_PAGE_WAIT:
2478                 /* ??? unimplemented */
2479                 break;
2480
2481         default:
2482                 ret = ANEG_FAILED;
2483                 break;
2484         };
2485
2486         return ret;
2487 }
2488
2489 static int fiber_autoneg(struct tg3 *tp, u32 *flags)
2490 {
2491         int res = 0;
2492         struct tg3_fiber_aneginfo aninfo;
2493         int status = ANEG_FAILED;
2494         unsigned int tick;
2495         u32 tmp;
2496
2497         tw32_f(MAC_TX_AUTO_NEG, 0);
2498
2499         tmp = tp->mac_mode & ~MAC_MODE_PORT_MODE_MASK;
2500         tw32_f(MAC_MODE, tmp | MAC_MODE_PORT_MODE_GMII);
2501         udelay(40);
2502
2503         tw32_f(MAC_MODE, tp->mac_mode | MAC_MODE_SEND_CONFIGS);
2504         udelay(40);
2505
2506         memset(&aninfo, 0, sizeof(aninfo));
2507         aninfo.flags |= MR_AN_ENABLE;
2508         aninfo.state = ANEG_STATE_UNKNOWN;
2509         aninfo.cur_time = 0;
2510         tick = 0;
2511         while (++tick < 195000) {
2512                 status = tg3_fiber_aneg_smachine(tp, &aninfo);
2513                 if (status == ANEG_DONE || status == ANEG_FAILED)
2514                         break;
2515
2516                 udelay(1);
2517         }
2518
2519         tp->mac_mode &= ~MAC_MODE_SEND_CONFIGS;
2520         tw32_f(MAC_MODE, tp->mac_mode);
2521         udelay(40);
2522
2523         *flags = aninfo.flags;
2524
2525         if (status == ANEG_DONE &&
2526             (aninfo.flags & (MR_AN_COMPLETE | MR_LINK_OK |
2527                              MR_LP_ADV_FULL_DUPLEX)))
2528                 res = 1;
2529
2530         return res;
2531 }
2532
2533 static void tg3_init_bcm8002(struct tg3 *tp)
2534 {
2535         u32 mac_status = tr32(MAC_STATUS);
2536         int i;
2537
2538         /* Reset when initting first time or we have a link. */
2539         if ((tp->tg3_flags & TG3_FLAG_INIT_COMPLETE) &&
2540             !(mac_status & MAC_STATUS_PCS_SYNCED))
2541                 return;
2542
2543         /* Set PLL lock range. */
2544         tg3_writephy(tp, 0x16, 0x8007);
2545
2546         /* SW reset */
2547         tg3_writephy(tp, MII_BMCR, BMCR_RESET);
2548
2549         /* Wait for reset to complete. */
2550         /* XXX schedule_timeout() ... */
2551         for (i = 0; i < 500; i++)
2552                 udelay(10);
2553
2554         /* Config mode; select PMA/Ch 1 regs. */
2555         tg3_writephy(tp, 0x10, 0x8411);
2556
2557         /* Enable auto-lock and comdet, select txclk for tx. */
2558         tg3_writephy(tp, 0x11, 0x0a10);
2559
2560         tg3_writephy(tp, 0x18, 0x00a0);
2561         tg3_writephy(tp, 0x16, 0x41ff);
2562
2563         /* Assert and deassert POR. */
2564         tg3_writephy(tp, 0x13, 0x0400);
2565         udelay(40);
2566         tg3_writephy(tp, 0x13, 0x0000);
2567
2568         tg3_writephy(tp, 0x11, 0x0a50);
2569         udelay(40);
2570         tg3_writephy(tp, 0x11, 0x0a10);
2571
2572         /* Wait for signal to stabilize */
2573         /* XXX schedule_timeout() ... */
2574         for (i = 0; i < 15000; i++)
2575                 udelay(10);
2576
2577         /* Deselect the channel register so we can read the PHYID
2578          * later.
2579          */
2580         tg3_writephy(tp, 0x10, 0x8011);
2581 }
2582
2583 static int tg3_setup_fiber_hw_autoneg(struct tg3 *tp, u32 mac_status)
2584 {
2585         u32 sg_dig_ctrl, sg_dig_status;
2586         u32 serdes_cfg, expected_sg_dig_ctrl;
2587         int workaround, port_a;
2588         int current_link_up;
2589
2590         serdes_cfg = 0;
2591         expected_sg_dig_ctrl = 0;
2592         workaround = 0;
2593         port_a = 1;
2594         current_link_up = 0;
2595
2596         if (tp->pci_chip_rev_id != CHIPREV_ID_5704_A0 &&
2597             tp->pci_chip_rev_id != CHIPREV_ID_5704_A1) {
2598                 workaround = 1;
2599                 if (tr32(TG3PCI_DUAL_MAC_CTRL) & DUAL_MAC_CTRL_ID)
2600                         port_a = 0;
2601
2602                 /* preserve bits 0-11,13,14 for signal pre-emphasis */
2603                 /* preserve bits 20-23 for voltage regulator */
2604                 serdes_cfg = tr32(MAC_SERDES_CFG) & 0x00f06fff;
2605         }
2606
2607         sg_dig_ctrl = tr32(SG_DIG_CTRL);
2608
2609         if (tp->link_config.autoneg != AUTONEG_ENABLE) {
2610                 if (sg_dig_ctrl & (1 << 31)) {
2611                         if (workaround) {
2612                                 u32 val = serdes_cfg;
2613
2614                                 if (port_a)
2615                                         val |= 0xc010000;
2616                                 else
2617                                         val |= 0x4010000;
2618                                 tw32_f(MAC_SERDES_CFG, val);
2619                         }
2620                         tw32_f(SG_DIG_CTRL, 0x01388400);
2621                 }
2622                 if (mac_status & MAC_STATUS_PCS_SYNCED) {
2623                         tg3_setup_flow_control(tp, 0, 0);
2624                         current_link_up = 1;
2625                 }
2626                 goto out;
2627         }
2628
2629         /* Want auto-negotiation.  */
2630         expected_sg_dig_ctrl = 0x81388400;
2631
2632         /* Pause capability */
2633         expected_sg_dig_ctrl |= (1 << 11);
2634
2635         /* Asymettric pause */
2636         expected_sg_dig_ctrl |= (1 << 12);
2637
2638         if (sg_dig_ctrl != expected_sg_dig_ctrl) {
2639                 if ((tp->tg3_flags2 & TG3_FLG2_PARALLEL_DETECT) &&
2640                     tp->serdes_counter &&
2641                     ((mac_status & (MAC_STATUS_PCS_SYNCED |
2642                                     MAC_STATUS_RCVD_CFG)) ==
2643                      MAC_STATUS_PCS_SYNCED)) {
2644                         tp->serdes_counter--;
2645                         current_link_up = 1;
2646                         goto out;
2647                 }
2648 restart_autoneg:
2649                 if (workaround)
2650                         tw32_f(MAC_SERDES_CFG, serdes_cfg | 0xc011000);
2651                 tw32_f(SG_DIG_CTRL, expected_sg_dig_ctrl | (1 << 30));
2652                 udelay(5);
2653                 tw32_f(SG_DIG_CTRL, expected_sg_dig_ctrl);
2654
2655                 tp->serdes_counter = SERDES_AN_TIMEOUT_5704S;
2656                 tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
2657         } else if (mac_status & (MAC_STATUS_PCS_SYNCED |
2658                                  MAC_STATUS_SIGNAL_DET)) {
2659                 sg_dig_status = tr32(SG_DIG_STATUS);
2660                 mac_status = tr32(MAC_STATUS);
2661
2662                 if ((sg_dig_status & (1 << 1)) &&
2663                     (mac_status & MAC_STATUS_PCS_SYNCED)) {
2664                         u32 local_adv, remote_adv;
2665
2666                         local_adv = ADVERTISE_PAUSE_CAP;
2667                         remote_adv = 0;
2668                         if (sg_dig_status & (1 << 19))
2669                                 remote_adv |= LPA_PAUSE_CAP;
2670                         if (sg_dig_status & (1 << 20))
2671                                 remote_adv |= LPA_PAUSE_ASYM;
2672
2673                         tg3_setup_flow_control(tp, local_adv, remote_adv);
2674                         current_link_up = 1;
2675                         tp->serdes_counter = 0;
2676                         tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
2677                 } else if (!(sg_dig_status & (1 << 1))) {
2678                         if (tp->serdes_counter)
2679                                 tp->serdes_counter--;
2680                         else {
2681                                 if (workaround) {
2682                                         u32 val = serdes_cfg;
2683
2684                                         if (port_a)
2685                                                 val |= 0xc010000;
2686                                         else
2687                                                 val |= 0x4010000;
2688
2689                                         tw32_f(MAC_SERDES_CFG, val);
2690                                 }
2691
2692                                 tw32_f(SG_DIG_CTRL, 0x01388400);
2693                                 udelay(40);
2694
2695                                 /* Link parallel detection - link is up */
2696                                 /* only if we have PCS_SYNC and not */
2697                                 /* receiving config code words */
2698                                 mac_status = tr32(MAC_STATUS);
2699                                 if ((mac_status & MAC_STATUS_PCS_SYNCED) &&
2700                                     !(mac_status & MAC_STATUS_RCVD_CFG)) {
2701                                         tg3_setup_flow_control(tp, 0, 0);
2702                                         current_link_up = 1;
2703                                         tp->tg3_flags2 |=
2704                                                 TG3_FLG2_PARALLEL_DETECT;
2705                                         tp->serdes_counter =
2706                                                 SERDES_PARALLEL_DET_TIMEOUT;
2707                                 } else
2708                                         goto restart_autoneg;
2709                         }
2710                 }
2711         } else {
2712                 tp->serdes_counter = SERDES_AN_TIMEOUT_5704S;
2713                 tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
2714         }
2715
2716 out:
2717         return current_link_up;
2718 }
2719
2720 static int tg3_setup_fiber_by_hand(struct tg3 *tp, u32 mac_status)
2721 {
2722         int current_link_up = 0;
2723
2724         if (!(mac_status & MAC_STATUS_PCS_SYNCED))
2725                 goto out;
2726
2727         if (tp->link_config.autoneg == AUTONEG_ENABLE) {
2728                 u32 flags;
2729                 int i;
2730
2731                 if (fiber_autoneg(tp, &flags)) {
2732                         u32 local_adv, remote_adv;
2733
2734                         local_adv = ADVERTISE_PAUSE_CAP;
2735                         remote_adv = 0;
2736                         if (flags & MR_LP_ADV_SYM_PAUSE)
2737                                 remote_adv |= LPA_PAUSE_CAP;
2738                         if (flags & MR_LP_ADV_ASYM_PAUSE)
2739                                 remote_adv |= LPA_PAUSE_ASYM;
2740
2741                         tg3_setup_flow_control(tp, local_adv, remote_adv);
2742
2743                         current_link_up = 1;
2744                 }
2745                 for (i = 0; i < 30; i++) {
2746                         udelay(20);
2747                         tw32_f(MAC_STATUS,
2748                                (MAC_STATUS_SYNC_CHANGED |
2749                                 MAC_STATUS_CFG_CHANGED));
2750                         udelay(40);
2751                         if ((tr32(MAC_STATUS) &
2752                              (MAC_STATUS_SYNC_CHANGED |
2753                               MAC_STATUS_CFG_CHANGED)) == 0)
2754                                 break;
2755                 }
2756
2757                 mac_status = tr32(MAC_STATUS);
2758                 if (current_link_up == 0 &&
2759                     (mac_status & MAC_STATUS_PCS_SYNCED) &&
2760                     !(mac_status & MAC_STATUS_RCVD_CFG))
2761                         current_link_up = 1;
2762         } else {
2763                 /* Forcing 1000FD link up. */
2764                 current_link_up = 1;
2765
2766                 tw32_f(MAC_MODE, (tp->mac_mode | MAC_MODE_SEND_CONFIGS));
2767                 udelay(40);
2768
2769                 tw32_f(MAC_MODE, tp->mac_mode);
2770                 udelay(40);
2771         }
2772
2773 out:
2774         return current_link_up;
2775 }
2776
2777 static int tg3_setup_fiber_phy(struct tg3 *tp, int force_reset)
2778 {
2779         u32 orig_pause_cfg;
2780         u16 orig_active_speed;
2781         u8 orig_active_duplex;
2782         u32 mac_status;
2783         int current_link_up;
2784         int i;
2785
2786         orig_pause_cfg =
2787                 (tp->tg3_flags & (TG3_FLAG_RX_PAUSE |
2788                                   TG3_FLAG_TX_PAUSE));
2789         orig_active_speed = tp->link_config.active_speed;
2790         orig_active_duplex = tp->link_config.active_duplex;
2791
2792         if (!(tp->tg3_flags2 & TG3_FLG2_HW_AUTONEG) &&
2793             netif_carrier_ok(tp->dev) &&
2794             (tp->tg3_flags & TG3_FLAG_INIT_COMPLETE)) {
2795                 mac_status = tr32(MAC_STATUS);
2796                 mac_status &= (MAC_STATUS_PCS_SYNCED |
2797                                MAC_STATUS_SIGNAL_DET |
2798                                MAC_STATUS_CFG_CHANGED |
2799                                MAC_STATUS_RCVD_CFG);
2800                 if (mac_status == (MAC_STATUS_PCS_SYNCED |
2801                                    MAC_STATUS_SIGNAL_DET)) {
2802                         tw32_f(MAC_STATUS, (MAC_STATUS_SYNC_CHANGED |
2803                                             MAC_STATUS_CFG_CHANGED));
2804                         return 0;
2805                 }
2806         }
2807
2808         tw32_f(MAC_TX_AUTO_NEG, 0);
2809
2810         tp->mac_mode &= ~(MAC_MODE_PORT_MODE_MASK | MAC_MODE_HALF_DUPLEX);
2811         tp->mac_mode |= MAC_MODE_PORT_MODE_TBI;
2812         tw32_f(MAC_MODE, tp->mac_mode);
2813         udelay(40);
2814
2815         if (tp->phy_id == PHY_ID_BCM8002)
2816                 tg3_init_bcm8002(tp);
2817
2818         /* Enable link change event even when serdes polling.  */
2819         tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
2820         udelay(40);
2821
2822         current_link_up = 0;
2823         mac_status = tr32(MAC_STATUS);
2824
2825         if (tp->tg3_flags2 & TG3_FLG2_HW_AUTONEG)
2826                 current_link_up = tg3_setup_fiber_hw_autoneg(tp, mac_status);
2827         else
2828                 current_link_up = tg3_setup_fiber_by_hand(tp, mac_status);
2829
2830         tp->hw_status->status =
2831                 (SD_STATUS_UPDATED |
2832                  (tp->hw_status->status & ~SD_STATUS_LINK_CHG));
2833
2834         for (i = 0; i < 100; i++) {
2835                 tw32_f(MAC_STATUS, (MAC_STATUS_SYNC_CHANGED |
2836                                     MAC_STATUS_CFG_CHANGED));
2837                 udelay(5);
2838                 if ((tr32(MAC_STATUS) & (MAC_STATUS_SYNC_CHANGED |
2839                                          MAC_STATUS_CFG_CHANGED |
2840                                          MAC_STATUS_LNKSTATE_CHANGED)) == 0)
2841                         break;
2842         }
2843
2844         mac_status = tr32(MAC_STATUS);
2845         if ((mac_status & MAC_STATUS_PCS_SYNCED) == 0) {
2846                 current_link_up = 0;
2847                 if (tp->link_config.autoneg == AUTONEG_ENABLE &&
2848                     tp->serdes_counter == 0) {
2849                         tw32_f(MAC_MODE, (tp->mac_mode |
2850                                           MAC_MODE_SEND_CONFIGS));
2851                         udelay(1);
2852                         tw32_f(MAC_MODE, tp->mac_mode);
2853                 }
2854         }
2855
2856         if (current_link_up == 1) {
2857                 tp->link_config.active_speed = SPEED_1000;
2858                 tp->link_config.active_duplex = DUPLEX_FULL;
2859                 tw32(MAC_LED_CTRL, (tp->led_ctrl |
2860                                     LED_CTRL_LNKLED_OVERRIDE |
2861                                     LED_CTRL_1000MBPS_ON));
2862         } else {
2863                 tp->link_config.active_speed = SPEED_INVALID;
2864                 tp->link_config.active_duplex = DUPLEX_INVALID;
2865                 tw32(MAC_LED_CTRL, (tp->led_ctrl |
2866                                     LED_CTRL_LNKLED_OVERRIDE |
2867                                     LED_CTRL_TRAFFIC_OVERRIDE));
2868         }
2869
2870         if (current_link_up != netif_carrier_ok(tp->dev)) {
2871                 if (current_link_up)
2872                         netif_carrier_on(tp->dev);
2873                 else
2874                         netif_carrier_off(tp->dev);
2875                 tg3_link_report(tp);
2876         } else {
2877                 u32 now_pause_cfg =
2878                         tp->tg3_flags & (TG3_FLAG_RX_PAUSE |
2879                                          TG3_FLAG_TX_PAUSE);
2880                 if (orig_pause_cfg != now_pause_cfg ||
2881                     orig_active_speed != tp->link_config.active_speed ||
2882                     orig_active_duplex != tp->link_config.active_duplex)
2883                         tg3_link_report(tp);
2884         }
2885
2886         return 0;
2887 }
2888
2889 static int tg3_setup_fiber_mii_phy(struct tg3 *tp, int force_reset)
2890 {
2891         int current_link_up, err = 0;
2892         u32 bmsr, bmcr;
2893         u16 current_speed;
2894         u8 current_duplex;
2895
2896         tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
2897         tw32_f(MAC_MODE, tp->mac_mode);
2898         udelay(40);
2899
2900         tw32(MAC_EVENT, 0);
2901
2902         tw32_f(MAC_STATUS,
2903              (MAC_STATUS_SYNC_CHANGED |
2904               MAC_STATUS_CFG_CHANGED |
2905               MAC_STATUS_MI_COMPLETION |
2906               MAC_STATUS_LNKSTATE_CHANGED));
2907         udelay(40);
2908
2909         if (force_reset)
2910                 tg3_phy_reset(tp);
2911
2912         current_link_up = 0;
2913         current_speed = SPEED_INVALID;
2914         current_duplex = DUPLEX_INVALID;
2915
2916         err |= tg3_readphy(tp, MII_BMSR, &bmsr);
2917         err |= tg3_readphy(tp, MII_BMSR, &bmsr);
2918         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) {
2919                 if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
2920                         bmsr |= BMSR_LSTATUS;
2921                 else
2922                         bmsr &= ~BMSR_LSTATUS;
2923         }
2924
2925         err |= tg3_readphy(tp, MII_BMCR, &bmcr);
2926
2927         if ((tp->link_config.autoneg == AUTONEG_ENABLE) && !force_reset &&
2928             (tp->tg3_flags2 & TG3_FLG2_PARALLEL_DETECT)) {
2929                 /* do nothing, just check for link up at the end */
2930         } else if (tp->link_config.autoneg == AUTONEG_ENABLE) {
2931                 u32 adv, new_adv;
2932
2933                 err |= tg3_readphy(tp, MII_ADVERTISE, &adv);
2934                 new_adv = adv & ~(ADVERTISE_1000XFULL | ADVERTISE_1000XHALF |
2935                                   ADVERTISE_1000XPAUSE |
2936                                   ADVERTISE_1000XPSE_ASYM |
2937                                   ADVERTISE_SLCT);
2938
2939                 /* Always advertise symmetric PAUSE just like copper */
2940                 new_adv |= ADVERTISE_1000XPAUSE;
2941
2942                 if (tp->link_config.advertising & ADVERTISED_1000baseT_Half)
2943                         new_adv |= ADVERTISE_1000XHALF;
2944                 if (tp->link_config.advertising & ADVERTISED_1000baseT_Full)
2945                         new_adv |= ADVERTISE_1000XFULL;
2946
2947                 if ((new_adv != adv) || !(bmcr & BMCR_ANENABLE)) {
2948                         tg3_writephy(tp, MII_ADVERTISE, new_adv);
2949                         bmcr |= BMCR_ANENABLE | BMCR_ANRESTART;
2950                         tg3_writephy(tp, MII_BMCR, bmcr);
2951
2952                         tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
2953                         tp->serdes_counter = SERDES_AN_TIMEOUT_5714S;
2954                         tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
2955
2956                         return err;
2957                 }
2958         } else {
2959                 u32 new_bmcr;
2960
2961                 bmcr &= ~BMCR_SPEED1000;
2962                 new_bmcr = bmcr & ~(BMCR_ANENABLE | BMCR_FULLDPLX);
2963
2964                 if (tp->link_config.duplex == DUPLEX_FULL)
2965                         new_bmcr |= BMCR_FULLDPLX;
2966
2967                 if (new_bmcr != bmcr) {
2968                         /* BMCR_SPEED1000 is a reserved bit that needs
2969                          * to be set on write.
2970                          */
2971                         new_bmcr |= BMCR_SPEED1000;
2972
2973                         /* Force a linkdown */
2974                         if (netif_carrier_ok(tp->dev)) {
2975                                 u32 adv;
2976
2977                                 err |= tg3_readphy(tp, MII_ADVERTISE, &adv);
2978                                 adv &= ~(ADVERTISE_1000XFULL |
2979                                          ADVERTISE_1000XHALF |
2980                                          ADVERTISE_SLCT);
2981                                 tg3_writephy(tp, MII_ADVERTISE, adv);
2982                                 tg3_writephy(tp, MII_BMCR, bmcr |
2983                                                            BMCR_ANRESTART |
2984                                                            BMCR_ANENABLE);
2985                                 udelay(10);
2986                                 netif_carrier_off(tp->dev);
2987                         }
2988                         tg3_writephy(tp, MII_BMCR, new_bmcr);
2989                         bmcr = new_bmcr;
2990                         err |= tg3_readphy(tp, MII_BMSR, &bmsr);
2991                         err |= tg3_readphy(tp, MII_BMSR, &bmsr);
2992                         if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
2993                             ASIC_REV_5714) {
2994                                 if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
2995                                         bmsr |= BMSR_LSTATUS;
2996                                 else
2997                                         bmsr &= ~BMSR_LSTATUS;
2998                         }
2999                         tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
3000                 }
3001         }
3002
3003         if (bmsr & BMSR_LSTATUS) {
3004                 current_speed = SPEED_1000;
3005                 current_link_up = 1;
3006                 if (bmcr & BMCR_FULLDPLX)
3007                         current_duplex = DUPLEX_FULL;
3008                 else
3009                         current_duplex = DUPLEX_HALF;
3010
3011                 if (bmcr & BMCR_ANENABLE) {
3012                         u32 local_adv, remote_adv, common;
3013
3014                         err |= tg3_readphy(tp, MII_ADVERTISE, &local_adv);
3015                         err |= tg3_readphy(tp, MII_LPA, &remote_adv);
3016                         common = local_adv & remote_adv;
3017                         if (common & (ADVERTISE_1000XHALF |
3018                                       ADVERTISE_1000XFULL)) {
3019                                 if (common & ADVERTISE_1000XFULL)
3020                                         current_duplex = DUPLEX_FULL;
3021                                 else
3022                                         current_duplex = DUPLEX_HALF;
3023
3024                                 tg3_setup_flow_control(tp, local_adv,
3025                                                        remote_adv);
3026                         }
3027                         else
3028                                 current_link_up = 0;
3029                 }
3030         }
3031
3032         tp->mac_mode &= ~MAC_MODE_HALF_DUPLEX;
3033         if (tp->link_config.active_duplex == DUPLEX_HALF)
3034                 tp->mac_mode |= MAC_MODE_HALF_DUPLEX;
3035
3036         tw32_f(MAC_MODE, tp->mac_mode);
3037         udelay(40);
3038
3039         tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
3040
3041         tp->link_config.active_speed = current_speed;
3042         tp->link_config.active_duplex = current_duplex;
3043
3044         if (current_link_up != netif_carrier_ok(tp->dev)) {
3045                 if (current_link_up)
3046                         netif_carrier_on(tp->dev);
3047                 else {
3048                         netif_carrier_off(tp->dev);
3049                         tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
3050                 }
3051                 tg3_link_report(tp);
3052         }
3053         return err;
3054 }
3055
3056 static void tg3_serdes_parallel_detect(struct tg3 *tp)
3057 {
3058         if (tp->serdes_counter) {
3059                 /* Give autoneg time to complete. */
3060                 tp->serdes_counter--;
3061                 return;
3062         }
3063         if (!netif_carrier_ok(tp->dev) &&
3064             (tp->link_config.autoneg == AUTONEG_ENABLE)) {
3065                 u32 bmcr;
3066
3067                 tg3_readphy(tp, MII_BMCR, &bmcr);
3068                 if (bmcr & BMCR_ANENABLE) {
3069                         u32 phy1, phy2;
3070
3071                         /* Select shadow register 0x1f */
3072                         tg3_writephy(tp, 0x1c, 0x7c00);
3073                         tg3_readphy(tp, 0x1c, &phy1);
3074
3075                         /* Select expansion interrupt status register */
3076                         tg3_writephy(tp, 0x17, 0x0f01);
3077                         tg3_readphy(tp, 0x15, &phy2);
3078                         tg3_readphy(tp, 0x15, &phy2);
3079
3080                         if ((phy1 & 0x10) && !(phy2 & 0x20)) {
3081                                 /* We have signal detect and not receiving
3082                                  * config code words, link is up by parallel
3083                                  * detection.
3084                                  */
3085
3086                                 bmcr &= ~BMCR_ANENABLE;
3087                                 bmcr |= BMCR_SPEED1000 | BMCR_FULLDPLX;
3088                                 tg3_writephy(tp, MII_BMCR, bmcr);
3089                                 tp->tg3_flags2 |= TG3_FLG2_PARALLEL_DETECT;
3090                         }
3091                 }
3092         }
3093         else if (netif_carrier_ok(tp->dev) &&
3094                  (tp->link_config.autoneg == AUTONEG_ENABLE) &&
3095                  (tp->tg3_flags2 & TG3_FLG2_PARALLEL_DETECT)) {
3096                 u32 phy2;
3097
3098                 /* Select expansion interrupt status register */
3099                 tg3_writephy(tp, 0x17, 0x0f01);
3100                 tg3_readphy(tp, 0x15, &phy2);
3101                 if (phy2 & 0x20) {
3102                         u32 bmcr;
3103
3104                         /* Config code words received, turn on autoneg. */
3105                         tg3_readphy(tp, MII_BMCR, &bmcr);
3106                         tg3_writephy(tp, MII_BMCR, bmcr | BMCR_ANENABLE);
3107
3108                         tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
3109
3110                 }
3111         }
3112 }
3113
3114 static int tg3_setup_phy(struct tg3 *tp, int force_reset)
3115 {
3116         int err;
3117
3118         if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) {
3119                 err = tg3_setup_fiber_phy(tp, force_reset);
3120         } else if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES) {
3121                 err = tg3_setup_fiber_mii_phy(tp, force_reset);
3122         } else {
3123                 err = tg3_setup_copper_phy(tp, force_reset);
3124         }
3125
3126         if (tp->link_config.active_speed == SPEED_1000 &&
3127             tp->link_config.active_duplex == DUPLEX_HALF)
3128                 tw32(MAC_TX_LENGTHS,
3129                      ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
3130                       (6 << TX_LENGTHS_IPG_SHIFT) |
3131                       (0xff << TX_LENGTHS_SLOT_TIME_SHIFT)));
3132         else
3133                 tw32(MAC_TX_LENGTHS,
3134                      ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
3135                       (6 << TX_LENGTHS_IPG_SHIFT) |
3136                       (32 << TX_LENGTHS_SLOT_TIME_SHIFT)));
3137
3138         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
3139                 if (netif_carrier_ok(tp->dev)) {
3140                         tw32(HOSTCC_STAT_COAL_TICKS,
3141                              tp->coal.stats_block_coalesce_usecs);
3142                 } else {
3143                         tw32(HOSTCC_STAT_COAL_TICKS, 0);
3144                 }
3145         }
3146
3147         if (tp->tg3_flags & TG3_FLAG_ASPM_WORKAROUND) {
3148                 u32 val = tr32(PCIE_PWR_MGMT_THRESH);
3149                 if (!netif_carrier_ok(tp->dev))
3150                         val = (val & ~PCIE_PWR_MGMT_L1_THRESH_MSK) |
3151                               tp->pwrmgmt_thresh;
3152                 else
3153                         val |= PCIE_PWR_MGMT_L1_THRESH_MSK;
3154                 tw32(PCIE_PWR_MGMT_THRESH, val);
3155         }
3156
3157         return err;
3158 }
3159
3160 /* This is called whenever we suspect that the system chipset is re-
3161  * ordering the sequence of MMIO to the tx send mailbox. The symptom
3162  * is bogus tx completions. We try to recover by setting the
3163  * TG3_FLAG_MBOX_WRITE_REORDER flag and resetting the chip later
3164  * in the workqueue.
3165  */
3166 static void tg3_tx_recover(struct tg3 *tp)
3167 {
3168         BUG_ON((tp->tg3_flags & TG3_FLAG_MBOX_WRITE_REORDER) ||
3169                tp->write32_tx_mbox == tg3_write_indirect_mbox);
3170
3171         printk(KERN_WARNING PFX "%s: The system may be re-ordering memory-"
3172                "mapped I/O cycles to the network device, attempting to "
3173                "recover. Please report the problem to the driver maintainer "
3174                "and include system chipset information.\n", tp->dev->name);
3175
3176         spin_lock(&tp->lock);
3177         tp->tg3_flags |= TG3_FLAG_TX_RECOVERY_PENDING;
3178         spin_unlock(&tp->lock);
3179 }
3180
3181 static inline u32 tg3_tx_avail(struct tg3 *tp)
3182 {
3183         smp_mb();
3184         return (tp->tx_pending -
3185                 ((tp->tx_prod - tp->tx_cons) & (TG3_TX_RING_SIZE - 1)));
3186 }
3187
3188 /* Tigon3 never reports partial packet sends.  So we do not
3189  * need special logic to handle SKBs that have not had all
3190  * of their frags sent yet, like SunGEM does.
3191  */
3192 static void tg3_tx(struct tg3 *tp)
3193 {
3194         u32 hw_idx = tp->hw_status->idx[0].tx_consumer;
3195         u32 sw_idx = tp->tx_cons;
3196
3197         while (sw_idx != hw_idx) {
3198                 struct tx_ring_info *ri = &tp->tx_buffers[sw_idx];
3199                 struct sk_buff *skb = ri->skb;
3200                 int i, tx_bug = 0;
3201
3202                 if (unlikely(skb == NULL)) {
3203                         tg3_tx_recover(tp);
3204                         return;
3205                 }
3206
3207                 pci_unmap_single(tp->pdev,
3208                                  pci_unmap_addr(ri, mapping),
3209                                  skb_headlen(skb),
3210                                  PCI_DMA_TODEVICE);
3211
3212                 ri->skb = NULL;
3213
3214                 sw_idx = NEXT_TX(sw_idx);
3215
3216                 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
3217                         ri = &tp->tx_buffers[sw_idx];
3218                         if (unlikely(ri->skb != NULL || sw_idx == hw_idx))
3219                                 tx_bug = 1;
3220
3221                         pci_unmap_page(tp->pdev,
3222                                        pci_unmap_addr(ri, mapping),
3223                                        skb_shinfo(skb)->frags[i].size,
3224                                        PCI_DMA_TODEVICE);
3225
3226                         sw_idx = NEXT_TX(sw_idx);
3227                 }
3228
3229                 dev_kfree_skb(skb);
3230
3231                 if (unlikely(tx_bug)) {
3232                         tg3_tx_recover(tp);
3233                         return;
3234                 }
3235         }
3236
3237         tp->tx_cons = sw_idx;
3238
3239         /* Need to make the tx_cons update visible to tg3_start_xmit()
3240          * before checking for netif_queue_stopped().  Without the
3241          * memory barrier, there is a small possibility that tg3_start_xmit()
3242          * will miss it and cause the queue to be stopped forever.
3243          */
3244         smp_mb();
3245
3246         if (unlikely(netif_queue_stopped(tp->dev) &&
3247                      (tg3_tx_avail(tp) > TG3_TX_WAKEUP_THRESH(tp)))) {
3248                 netif_tx_lock(tp->dev);
3249                 if (netif_queue_stopped(tp->dev) &&
3250                     (tg3_tx_avail(tp) > TG3_TX_WAKEUP_THRESH(tp)))
3251                         netif_wake_queue(tp->dev);
3252                 netif_tx_unlock(tp->dev);
3253         }
3254 }
3255
3256 /* Returns size of skb allocated or < 0 on error.
3257  *
3258  * We only need to fill in the address because the other members
3259  * of the RX descriptor are invariant, see tg3_init_rings.
3260  *
3261  * Note the purposeful assymetry of cpu vs. chip accesses.  For
3262  * posting buffers we only dirty the first cache line of the RX
3263  * descriptor (containing the address).  Whereas for the RX status
3264  * buffers the cpu only reads the last cacheline of the RX descriptor
3265  * (to fetch the error flags, vlan tag, checksum, and opaque cookie).
3266  */
3267 static int tg3_alloc_rx_skb(struct tg3 *tp, u32 opaque_key,
3268                             int src_idx, u32 dest_idx_unmasked)
3269 {
3270         struct tg3_rx_buffer_desc *desc;
3271         struct ring_info *map, *src_map;
3272         struct sk_buff *skb;
3273         dma_addr_t mapping;
3274         int skb_size, dest_idx;
3275
3276         src_map = NULL;
3277         switch (opaque_key) {
3278         case RXD_OPAQUE_RING_STD:
3279                 dest_idx = dest_idx_unmasked % TG3_RX_RING_SIZE;
3280                 desc = &tp->rx_std[dest_idx];
3281                 map = &tp->rx_std_buffers[dest_idx];
3282                 if (src_idx >= 0)
3283                         src_map = &tp->rx_std_buffers[src_idx];
3284                 skb_size = tp->rx_pkt_buf_sz;
3285                 break;
3286
3287         case RXD_OPAQUE_RING_JUMBO:
3288                 dest_idx = dest_idx_unmasked % TG3_RX_JUMBO_RING_SIZE;
3289                 desc = &tp->rx_jumbo[dest_idx];
3290                 map = &tp->rx_jumbo_buffers[dest_idx];
3291                 if (src_idx >= 0)
3292                         src_map = &tp->rx_jumbo_buffers[src_idx];
3293                 skb_size = RX_JUMBO_PKT_BUF_SZ;
3294                 break;
3295
3296         default:
3297                 return -EINVAL;
3298         };
3299
3300         /* Do not overwrite any of the map or rp information
3301          * until we are sure we can commit to a new buffer.
3302          *
3303          * Callers depend upon this behavior and assume that
3304          * we leave everything unchanged if we fail.
3305          */
3306         skb = netdev_alloc_skb(tp->dev, skb_size);
3307         if (skb == NULL)
3308                 return -ENOMEM;
3309
3310         skb_reserve(skb, tp->rx_offset);
3311
3312         mapping = pci_map_single(tp->pdev, skb->data,
3313                                  skb_size - tp->rx_offset,
3314                                  PCI_DMA_FROMDEVICE);
3315
3316         map->skb = skb;
3317         pci_unmap_addr_set(map, mapping, mapping);
3318
3319         if (src_map != NULL)
3320                 src_map->skb = NULL;
3321
3322         desc->addr_hi = ((u64)mapping >> 32);
3323         desc->addr_lo = ((u64)mapping & 0xffffffff);
3324
3325         return skb_size;
3326 }
3327
3328 /* We only need to move over in the address because the other
3329  * members of the RX descriptor are invariant.  See notes above
3330  * tg3_alloc_rx_skb for full details.
3331  */
3332 static void tg3_recycle_rx(struct tg3 *tp, u32 opaque_key,
3333                            int src_idx, u32 dest_idx_unmasked)
3334 {
3335         struct tg3_rx_buffer_desc *src_desc, *dest_desc;
3336         struct ring_info *src_map, *dest_map;
3337         int dest_idx;
3338
3339         switch (opaque_key) {
3340         case RXD_OPAQUE_RING_STD:
3341                 dest_idx = dest_idx_unmasked % TG3_RX_RING_SIZE;
3342                 dest_desc = &tp->rx_std[dest_idx];
3343                 dest_map = &tp->rx_std_buffers[dest_idx];
3344                 src_desc = &tp->rx_std[src_idx];
3345                 src_map = &tp->rx_std_buffers[src_idx];
3346                 break;
3347
3348         case RXD_OPAQUE_RING_JUMBO:
3349                 dest_idx = dest_idx_unmasked % TG3_RX_JUMBO_RING_SIZE;
3350                 dest_desc = &tp->rx_jumbo[dest_idx];
3351                 dest_map = &tp->rx_jumbo_buffers[dest_idx];
3352                 src_desc = &tp->rx_jumbo[src_idx];
3353                 src_map = &tp->rx_jumbo_buffers[src_idx];
3354                 break;
3355
3356         default:
3357                 return;
3358         };
3359
3360         dest_map->skb = src_map->skb;
3361         pci_unmap_addr_set(dest_map, mapping,
3362                            pci_unmap_addr(src_map, mapping));
3363         dest_desc->addr_hi = src_desc->addr_hi;
3364         dest_desc->addr_lo = src_desc->addr_lo;
3365
3366         src_map->skb = NULL;
3367 }
3368
3369 #if TG3_VLAN_TAG_USED
3370 static int tg3_vlan_rx(struct tg3 *tp, struct sk_buff *skb, u16 vlan_tag)
3371 {
3372         return vlan_hwaccel_receive_skb(skb, tp->vlgrp, vlan_tag);
3373 }
3374 #endif
3375
3376 /* The RX ring scheme is composed of multiple rings which post fresh
3377  * buffers to the chip, and one special ring the chip uses to report
3378  * status back to the host.
3379  *
3380  * The special ring reports the status of received packets to the
3381  * host.  The chip does not write into the original descriptor the
3382  * RX buffer was obtained from.  The chip simply takes the original
3383  * descriptor as provided by the host, updates the status and length
3384  * field, then writes this into the next status ring entry.
3385  *
3386  * Each ring the host uses to post buffers to the chip is described
3387  * by a TG3_BDINFO entry in the chips SRAM area.  When a packet arrives,
3388  * it is first placed into the on-chip ram.  When the packet's length
3389  * is known, it walks down the TG3_BDINFO entries to select the ring.
3390  * Each TG3_BDINFO specifies a MAXLEN field and the first TG3_BDINFO
3391  * which is within the range of the new packet's length is chosen.
3392  *
3393  * The "separate ring for rx status" scheme may sound queer, but it makes
3394  * sense from a cache coherency perspective.  If only the host writes
3395  * to the buffer post rings, and only the chip writes to the rx status
3396  * rings, then cache lines never move beyond shared-modified state.
3397  * If both the host and chip were to write into the same ring, cache line
3398  * eviction could occur since both entities want it in an exclusive state.
3399  */
3400 static int tg3_rx(struct tg3 *tp, int budget)
3401 {
3402         u32 work_mask, rx_std_posted = 0;
3403         u32 sw_idx = tp->rx_rcb_ptr;
3404         u16 hw_idx;
3405         int received;
3406
3407         hw_idx = tp->hw_status->idx[0].rx_producer;
3408         /*
3409          * We need to order the read of hw_idx and the read of
3410          * the opaque cookie.
3411          */
3412         rmb();
3413         work_mask = 0;
3414         received = 0;
3415         while (sw_idx != hw_idx && budget > 0) {
3416                 struct tg3_rx_buffer_desc *desc = &tp->rx_rcb[sw_idx];
3417                 unsigned int len;
3418                 struct sk_buff *skb;
3419                 dma_addr_t dma_addr;
3420                 u32 opaque_key, desc_idx, *post_ptr;
3421
3422                 desc_idx = desc->opaque & RXD_OPAQUE_INDEX_MASK;
3423                 opaque_key = desc->opaque & RXD_OPAQUE_RING_MASK;
3424                 if (opaque_key == RXD_OPAQUE_RING_STD) {
3425                         dma_addr = pci_unmap_addr(&tp->rx_std_buffers[desc_idx],
3426                                                   mapping);
3427                         skb = tp->rx_std_buffers[desc_idx].skb;
3428                         post_ptr = &tp->rx_std_ptr;
3429                         rx_std_posted++;
3430                 } else if (opaque_key == RXD_OPAQUE_RING_JUMBO) {
3431                         dma_addr = pci_unmap_addr(&tp->rx_jumbo_buffers[desc_idx],
3432                                                   mapping);
3433                         skb = tp->rx_jumbo_buffers[desc_idx].skb;
3434                         post_ptr = &tp->rx_jumbo_ptr;
3435                 }
3436                 else {
3437                         goto next_pkt_nopost;
3438                 }
3439
3440                 work_mask |= opaque_key;
3441
3442                 if ((desc->err_vlan & RXD_ERR_MASK) != 0 &&
3443                     (desc->err_vlan != RXD_ERR_ODD_NIBBLE_RCVD_MII)) {
3444                 drop_it:
3445                         tg3_recycle_rx(tp, opaque_key,
3446                                        desc_idx, *post_ptr);
3447                 drop_it_no_recycle:
3448                         /* Other statistics kept track of by card. */
3449                         tp->net_stats.rx_dropped++;
3450                         goto next_pkt;
3451                 }
3452
3453                 len = ((desc->idx_len & RXD_LEN_MASK) >> RXD_LEN_SHIFT) - 4; /* omit crc */
3454
3455                 if (len > RX_COPY_THRESHOLD
3456                         && tp->rx_offset == 2
3457                         /* rx_offset != 2 iff this is a 5701 card running
3458                          * in PCI-X mode [see tg3_get_invariants()] */
3459                 ) {
3460                         int skb_size;
3461
3462                         skb_size = tg3_alloc_rx_skb(tp, opaque_key,
3463                                                     desc_idx, *post_ptr);
3464                         if (skb_size < 0)
3465                                 goto drop_it;
3466
3467                         pci_unmap_single(tp->pdev, dma_addr,
3468                                          skb_size - tp->rx_offset,
3469                                          PCI_DMA_FROMDEVICE);
3470
3471                         skb_put(skb, len);
3472                 } else {
3473                         struct sk_buff *copy_skb;
3474
3475                         tg3_recycle_rx(tp, opaque_key,
3476                                        desc_idx, *post_ptr);
3477
3478                         copy_skb = netdev_alloc_skb(tp->dev, len + 2);
3479                         if (copy_skb == NULL)
3480                                 goto drop_it_no_recycle;
3481
3482                         skb_reserve(copy_skb, 2);
3483                         skb_put(copy_skb, len);
3484                         pci_dma_sync_single_for_cpu(tp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE);
3485                         skb_copy_from_linear_data(skb, copy_skb->data, len);
3486                         pci_dma_sync_single_for_device(tp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE);
3487
3488                         /* We'll reuse the original ring buffer. */
3489                         skb = copy_skb;
3490                 }
3491
3492                 if ((tp->tg3_flags & TG3_FLAG_RX_CHECKSUMS) &&
3493                     (desc->type_flags & RXD_FLAG_TCPUDP_CSUM) &&
3494                     (((desc->ip_tcp_csum & RXD_TCPCSUM_MASK)
3495                       >> RXD_TCPCSUM_SHIFT) == 0xffff))
3496                         skb->ip_summed = CHECKSUM_UNNECESSARY;
3497                 else
3498                         skb->ip_summed = CHECKSUM_NONE;
3499
3500                 skb->protocol = eth_type_trans(skb, tp->dev);
3501 #if TG3_VLAN_TAG_USED
3502                 if (tp->vlgrp != NULL &&
3503                     desc->type_flags & RXD_FLAG_VLAN) {
3504                         tg3_vlan_rx(tp, skb,
3505                                     desc->err_vlan & RXD_VLAN_MASK);
3506                 } else
3507 #endif
3508                         netif_receive_skb(skb);
3509
3510                 tp->dev->last_rx = jiffies;
3511                 received++;
3512                 budget--;
3513
3514 next_pkt:
3515                 (*post_ptr)++;
3516
3517                 if (unlikely(rx_std_posted >= tp->rx_std_max_post)) {
3518                         u32 idx = *post_ptr % TG3_RX_RING_SIZE;
3519
3520                         tw32_rx_mbox(MAILBOX_RCV_STD_PROD_IDX +
3521                                      TG3_64BIT_REG_LOW, idx);
3522                         work_mask &= ~RXD_OPAQUE_RING_STD;
3523                         rx_std_posted = 0;
3524                 }
3525 next_pkt_nopost:
3526                 sw_idx++;
3527                 sw_idx &= (TG3_RX_RCB_RING_SIZE(tp) - 1);
3528
3529                 /* Refresh hw_idx to see if there is new work */
3530                 if (sw_idx == hw_idx) {
3531                         hw_idx = tp->hw_status->idx[0].rx_producer;
3532                         rmb();
3533                 }
3534         }
3535
3536         /* ACK the status ring. */
3537         tp->rx_rcb_ptr = sw_idx;
3538         tw32_rx_mbox(MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW, sw_idx);
3539
3540         /* Refill RX ring(s). */
3541         if (work_mask & RXD_OPAQUE_RING_STD) {
3542                 sw_idx = tp->rx_std_ptr % TG3_RX_RING_SIZE;
3543                 tw32_rx_mbox(MAILBOX_RCV_STD_PROD_IDX + TG3_64BIT_REG_LOW,
3544                              sw_idx);
3545         }
3546         if (work_mask & RXD_OPAQUE_RING_JUMBO) {
3547                 sw_idx = tp->rx_jumbo_ptr % TG3_RX_JUMBO_RING_SIZE;
3548                 tw32_rx_mbox(MAILBOX_RCV_JUMBO_PROD_IDX + TG3_64BIT_REG_LOW,
3549                              sw_idx);
3550         }
3551         mmiowb();
3552
3553         return received;
3554 }
3555
3556 static int tg3_poll(struct napi_struct *napi, int budget)
3557 {
3558         struct tg3 *tp = container_of(napi, struct tg3, napi);
3559         struct net_device *netdev = tp->dev;
3560         struct tg3_hw_status *sblk = tp->hw_status;
3561         int work_done = 0;
3562
3563         /* handle link change and other phy events */
3564         if (!(tp->tg3_flags &
3565               (TG3_FLAG_USE_LINKCHG_REG |
3566                TG3_FLAG_POLL_SERDES))) {
3567                 if (sblk->status & SD_STATUS_LINK_CHG) {
3568                         sblk->status = SD_STATUS_UPDATED |
3569                                 (sblk->status & ~SD_STATUS_LINK_CHG);
3570                         spin_lock(&tp->lock);
3571                         tg3_setup_phy(tp, 0);
3572                         spin_unlock(&tp->lock);
3573                 }
3574         }
3575
3576         /* run TX completion thread */
3577         if (sblk->idx[0].tx_consumer != tp->tx_cons) {
3578                 tg3_tx(tp);
3579                 if (unlikely(tp->tg3_flags & TG3_FLAG_TX_RECOVERY_PENDING)) {
3580                         netif_rx_complete(netdev, napi);
3581                         schedule_work(&tp->reset_task);
3582                         return 0;
3583                 }
3584         }
3585
3586         /* run RX thread, within the bounds set by NAPI.
3587          * All RX "locking" is done by ensuring outside
3588          * code synchronizes with tg3->napi.poll()
3589          */
3590         if (sblk->idx[0].rx_producer != tp->rx_rcb_ptr)
3591                 work_done = tg3_rx(tp, budget);
3592
3593         if (tp->tg3_flags & TG3_FLAG_TAGGED_STATUS) {
3594                 tp->last_tag = sblk->status_tag;
3595                 rmb();
3596         } else
3597                 sblk->status &= ~SD_STATUS_UPDATED;
3598
3599         /* if no more work, tell net stack and NIC we're done */
3600         if (!tg3_has_work(tp)) {
3601                 netif_rx_complete(netdev, napi);
3602                 tg3_restart_ints(tp);
3603         }
3604
3605         return work_done;
3606 }
3607
3608 static void tg3_irq_quiesce(struct tg3 *tp)
3609 {
3610         BUG_ON(tp->irq_sync);
3611
3612         tp->irq_sync = 1;
3613         smp_mb();
3614
3615         synchronize_irq(tp->pdev->irq);
3616 }
3617
3618 static inline int tg3_irq_sync(struct tg3 *tp)
3619 {
3620         return tp->irq_sync;
3621 }
3622
3623 /* Fully shutdown all tg3 driver activity elsewhere in the system.
3624  * If irq_sync is non-zero, then the IRQ handler must be synchronized
3625  * with as well.  Most of the time, this is not necessary except when
3626  * shutting down the device.
3627  */
3628 static inline void tg3_full_lock(struct tg3 *tp, int irq_sync)
3629 {
3630         spin_lock_bh(&tp->lock);
3631         if (irq_sync)
3632                 tg3_irq_quiesce(tp);
3633 }
3634
3635 static inline void tg3_full_unlock(struct tg3 *tp)
3636 {
3637         spin_unlock_bh(&tp->lock);
3638 }
3639
3640 /* One-shot MSI handler - Chip automatically disables interrupt
3641  * after sending MSI so driver doesn't have to do it.
3642  */
3643 static irqreturn_t tg3_msi_1shot(int irq, void *dev_id)
3644 {
3645         struct net_device *dev = dev_id;
3646         struct tg3 *tp = netdev_priv(dev);
3647
3648         prefetch(tp->hw_status);
3649         prefetch(&tp->rx_rcb[tp->rx_rcb_ptr]);
3650
3651         if (likely(!tg3_irq_sync(tp)))
3652                 netif_rx_schedule(dev, &tp->napi);
3653
3654         return IRQ_HANDLED;
3655 }
3656
3657 /* MSI ISR - No need to check for interrupt sharing and no need to
3658  * flush status block and interrupt mailbox. PCI ordering rules
3659  * guarantee that MSI will arrive after the status block.
3660  */
3661 static irqreturn_t tg3_msi(int irq, void *dev_id)
3662 {
3663         struct net_device *dev = dev_id;
3664         struct tg3 *tp = netdev_priv(dev);
3665
3666         prefetch(tp->hw_status);
3667         prefetch(&tp->rx_rcb[tp->rx_rcb_ptr]);
3668         /*
3669          * Writing any value to intr-mbox-0 clears PCI INTA# and
3670          * chip-internal interrupt pending events.
3671          * Writing non-zero to intr-mbox-0 additional tells the
3672          * NIC to stop sending us irqs, engaging "in-intr-handler"
3673          * event coalescing.
3674          */
3675         tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
3676         if (likely(!tg3_irq_sync(tp)))
3677                 netif_rx_schedule(dev, &tp->napi);
3678
3679         return IRQ_RETVAL(1);
3680 }
3681
3682 static irqreturn_t tg3_interrupt(int irq, void *dev_id)
3683 {
3684         struct net_device *dev = dev_id;
3685         struct tg3 *tp = netdev_priv(dev);
3686         struct tg3_hw_status *sblk = tp->hw_status;
3687         unsigned int handled = 1;
3688
3689         /* In INTx mode, it is possible for the interrupt to arrive at
3690          * the CPU before the status block posted prior to the interrupt.
3691          * Reading the PCI State register will confirm whether the
3692          * interrupt is ours and will flush the status block.
3693          */
3694         if (unlikely(!(sblk->status & SD_STATUS_UPDATED))) {
3695                 if ((tp->tg3_flags & TG3_FLAG_CHIP_RESETTING) ||
3696                     (tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
3697                         handled = 0;
3698                         goto out;
3699                 }
3700         }
3701
3702         /*
3703          * Writing any value to intr-mbox-0 clears PCI INTA# and
3704          * chip-internal interrupt pending events.
3705          * Writing non-zero to intr-mbox-0 additional tells the
3706          * NIC to stop sending us irqs, engaging "in-intr-handler"
3707          * event coalescing.
3708          *
3709          * Flush the mailbox to de-assert the IRQ immediately to prevent
3710          * spurious interrupts.  The flush impacts performance but
3711          * excessive spurious interrupts can be worse in some cases.
3712          */
3713         tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
3714         if (tg3_irq_sync(tp))
3715                 goto out;
3716         sblk->status &= ~SD_STATUS_UPDATED;
3717         if (likely(tg3_has_work(tp))) {
3718                 prefetch(&tp->rx_rcb[tp->rx_rcb_ptr]);
3719                 netif_rx_schedule(dev, &tp->napi);
3720         } else {
3721                 /* No work, shared interrupt perhaps?  re-enable
3722                  * interrupts, and flush that PCI write
3723                  */
3724                 tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
3725                                0x00000000);
3726         }
3727 out:
3728         return IRQ_RETVAL(handled);
3729 }
3730
3731 static irqreturn_t tg3_interrupt_tagged(int irq, void *dev_id)
3732 {
3733         struct net_device *dev = dev_id;
3734         struct tg3 *tp = netdev_priv(dev);
3735         struct tg3_hw_status *sblk = tp->hw_status;
3736         unsigned int handled = 1;
3737
3738         /* In INTx mode, it is possible for the interrupt to arrive at
3739          * the CPU before the status block posted prior to the interrupt.
3740          * Reading the PCI State register will confirm whether the
3741          * interrupt is ours and will flush the status block.
3742          */
3743         if (unlikely(sblk->status_tag == tp->last_tag)) {
3744                 if ((tp->tg3_flags & TG3_FLAG_CHIP_RESETTING) ||
3745                     (tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
3746                         handled = 0;
3747                         goto out;
3748                 }
3749         }
3750
3751         /*
3752          * writing any value to intr-mbox-0 clears PCI INTA# and
3753          * chip-internal interrupt pending events.
3754          * writing non-zero to intr-mbox-0 additional tells the
3755          * NIC to stop sending us irqs, engaging "in-intr-handler"
3756          * event coalescing.
3757          *
3758          * Flush the mailbox to de-assert the IRQ immediately to prevent
3759          * spurious interrupts.  The flush impacts performance but
3760          * excessive spurious interrupts can be worse in some cases.
3761          */
3762         tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
3763         if (tg3_irq_sync(tp))
3764                 goto out;
3765         if (netif_rx_schedule_prep(dev, &tp->napi)) {
3766                 prefetch(&tp->rx_rcb[tp->rx_rcb_ptr]);
3767                 /* Update last_tag to mark that this status has been
3768                  * seen. Because interrupt may be shared, we may be
3769                  * racing with tg3_poll(), so only update last_tag
3770                  * if tg3_poll() is not scheduled.
3771                  */
3772                 tp->last_tag = sblk->status_tag;
3773                 __netif_rx_schedule(dev, &tp->napi);
3774         }
3775 out:
3776         return IRQ_RETVAL(handled);
3777 }
3778
3779 /* ISR for interrupt test */
3780 static irqreturn_t tg3_test_isr(int irq, void *dev_id)
3781 {
3782         struct net_device *dev = dev_id;
3783         struct tg3 *tp = netdev_priv(dev);
3784         struct tg3_hw_status *sblk = tp->hw_status;
3785
3786         if ((sblk->status & SD_STATUS_UPDATED) ||
3787             !(tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
3788                 tg3_disable_ints(tp);
3789                 return IRQ_RETVAL(1);
3790         }
3791         return IRQ_RETVAL(0);
3792 }
3793
3794 static int tg3_init_hw(struct tg3 *, int);
3795 static int tg3_halt(struct tg3 *, int, int);
3796
3797 /* Restart hardware after configuration changes, self-test, etc.
3798  * Invoked with tp->lock held.
3799  */
3800 static int tg3_restart_hw(struct tg3 *tp, int reset_phy)
3801 {
3802         int err;
3803
3804         err = tg3_init_hw(tp, reset_phy);
3805         if (err) {
3806                 printk(KERN_ERR PFX "%s: Failed to re-initialize device, "
3807                        "aborting.\n", tp->dev->name);
3808                 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
3809                 tg3_full_unlock(tp);
3810                 del_timer_sync(&tp->timer);
3811                 tp->irq_sync = 0;
3812                 napi_enable(&tp->napi);
3813                 dev_close(tp->dev);
3814                 tg3_full_lock(tp, 0);
3815         }
3816         return err;
3817 }
3818
3819 #ifdef CONFIG_NET_POLL_CONTROLLER
3820 static void tg3_poll_controller(struct net_device *dev)
3821 {
3822         struct tg3 *tp = netdev_priv(dev);
3823
3824         tg3_interrupt(tp->pdev->irq, dev);
3825 }
3826 #endif
3827
3828 static void tg3_reset_task(struct work_struct *work)
3829 {
3830         struct tg3 *tp = container_of(work, struct tg3, reset_task);
3831         unsigned int restart_timer;
3832
3833         tg3_full_lock(tp, 0);
3834
3835         if (!netif_running(tp->dev)) {
3836                 tg3_full_unlock(tp);
3837                 return;
3838         }
3839
3840         tg3_full_unlock(tp);
3841
3842         tg3_netif_stop(tp);
3843
3844         tg3_full_lock(tp, 1);
3845
3846         restart_timer = tp->tg3_flags2 & TG3_FLG2_RESTART_TIMER;
3847         tp->tg3_flags2 &= ~TG3_FLG2_RESTART_TIMER;
3848
3849         if (tp->tg3_flags & TG3_FLAG_TX_RECOVERY_PENDING) {
3850                 tp->write32_tx_mbox = tg3_write32_tx_mbox;
3851                 tp->write32_rx_mbox = tg3_write_flush_reg32;
3852                 tp->tg3_flags |= TG3_FLAG_MBOX_WRITE_REORDER;
3853                 tp->tg3_flags &= ~TG3_FLAG_TX_RECOVERY_PENDING;
3854         }
3855
3856         tg3_halt(tp, RESET_KIND_SHUTDOWN, 0);
3857         if (tg3_init_hw(tp, 1))
3858                 goto out;
3859
3860         tg3_netif_start(tp);
3861
3862         if (restart_timer)
3863                 mod_timer(&tp->timer, jiffies + 1);
3864
3865 out:
3866         tg3_full_unlock(tp);
3867 }
3868
3869 static void tg3_dump_short_state(struct tg3 *tp)
3870 {
3871         printk(KERN_ERR PFX "DEBUG: MAC_TX_STATUS[%08x] MAC_RX_STATUS[%08x]\n",
3872                tr32(MAC_TX_STATUS), tr32(MAC_RX_STATUS));
3873         printk(KERN_ERR PFX "DEBUG: RDMAC_STATUS[%08x] WDMAC_STATUS[%08x]\n",
3874                tr32(RDMAC_STATUS), tr32(WDMAC_STATUS));
3875 }
3876
3877 static void tg3_tx_timeout(struct net_device *dev)
3878 {
3879         struct tg3 *tp = netdev_priv(dev);
3880
3881         if (netif_msg_tx_err(tp)) {
3882                 printk(KERN_ERR PFX "%s: transmit timed out, resetting\n",
3883                        dev->name);
3884                 tg3_dump_short_state(tp);
3885         }
3886
3887         schedule_work(&tp->reset_task);
3888 }
3889
3890 /* Test for DMA buffers crossing any 4GB boundaries: 4G, 8G, etc */
3891 static inline int tg3_4g_overflow_test(dma_addr_t mapping, int len)
3892 {
3893         u32 base = (u32) mapping & 0xffffffff;
3894
3895         return ((base > 0xffffdcc0) &&
3896                 (base + len + 8 < base));
3897 }
3898
3899 /* Test for DMA addresses > 40-bit */
3900 static inline int tg3_40bit_overflow_test(struct tg3 *tp, dma_addr_t mapping,
3901                                           int len)
3902 {
3903 #if defined(CONFIG_HIGHMEM) && (BITS_PER_LONG == 64)
3904         if (tp->tg3_flags & TG3_FLAG_40BIT_DMA_BUG)
3905                 return (((u64) mapping + len) > DMA_40BIT_MASK);
3906         return 0;
3907 #else
3908         return 0;
3909 #endif
3910 }
3911
3912 static void tg3_set_txd(struct tg3 *, int, dma_addr_t, int, u32, u32);
3913
3914 /* Workaround 4GB and 40-bit hardware DMA bugs. */
3915 static int tigon3_dma_hwbug_workaround(struct tg3 *tp, struct sk_buff *skb,
3916                                        u32 last_plus_one, u32 *start,
3917                                        u32 base_flags, u32 mss)
3918 {
3919         struct sk_buff *new_skb = skb_copy(skb, GFP_ATOMIC);
3920         dma_addr_t new_addr = 0;
3921         u32 entry = *start;
3922         int i, ret = 0;
3923
3924         if (!new_skb) {
3925                 ret = -1;
3926         } else {
3927                 /* New SKB is guaranteed to be linear. */
3928                 entry = *start;
3929                 new_addr = pci_map_single(tp->pdev, new_skb->data, new_skb->len,
3930                                           PCI_DMA_TODEVICE);
3931                 /* Make sure new skb does not cross any 4G boundaries.
3932                  * Drop the packet if it does.
3933                  */
3934                 if (tg3_4g_overflow_test(new_addr, new_skb->len)) {
3935                         ret = -1;
3936                         dev_kfree_skb(new_skb);
3937                         new_skb = NULL;
3938                 } else {
3939                         tg3_set_txd(tp, entry, new_addr, new_skb->len,
3940                                     base_flags, 1 | (mss << 1));
3941                         *start = NEXT_TX(entry);
3942                 }
3943         }
3944
3945         /* Now clean up the sw ring entries. */
3946         i = 0;
3947         while (entry != last_plus_one) {
3948                 int len;
3949
3950                 if (i == 0)
3951                         len = skb_headlen(skb);
3952                 else
3953                         len = skb_shinfo(skb)->frags[i-1].size;
3954                 pci_unmap_single(tp->pdev,
3955                                  pci_unmap_addr(&tp->tx_buffers[entry], mapping),
3956                                  len, PCI_DMA_TODEVICE);
3957                 if (i == 0) {
3958                         tp->tx_buffers[entry].skb = new_skb;
3959                         pci_unmap_addr_set(&tp->tx_buffers[entry], mapping, new_addr);
3960                 } else {
3961                         tp->tx_buffers[entry].skb = NULL;
3962                 }
3963                 entry = NEXT_TX(entry);
3964                 i++;
3965         }
3966
3967         dev_kfree_skb(skb);
3968
3969         return ret;
3970 }
3971
3972 static void tg3_set_txd(struct tg3 *tp, int entry,
3973                         dma_addr_t mapping, int len, u32 flags,
3974                         u32 mss_and_is_end)
3975 {
3976         struct tg3_tx_buffer_desc *txd = &tp->tx_ring[entry];
3977         int is_end = (mss_and_is_end & 0x1);
3978         u32 mss = (mss_and_is_end >> 1);
3979         u32 vlan_tag = 0;
3980
3981         if (is_end)
3982                 flags |= TXD_FLAG_END;
3983         if (flags & TXD_FLAG_VLAN) {
3984                 vlan_tag = flags >> 16;
3985                 flags &= 0xffff;
3986         }
3987         vlan_tag |= (mss << TXD_MSS_SHIFT);
3988
3989         txd->addr_hi = ((u64) mapping >> 32);
3990         txd->addr_lo = ((u64) mapping & 0xffffffff);
3991         txd->len_flags = (len << TXD_LEN_SHIFT) | flags;
3992         txd->vlan_tag = vlan_tag << TXD_VLAN_TAG_SHIFT;
3993 }
3994
3995 /* hard_start_xmit for devices that don't have any bugs and
3996  * support TG3_FLG2_HW_TSO_2 only.
3997  */
3998 static int tg3_start_xmit(struct sk_buff *skb, struct net_device *dev)
3999 {
4000         struct tg3 *tp = netdev_priv(dev);
4001         dma_addr_t mapping;
4002         u32 len, entry, base_flags, mss;
4003
4004         len = skb_headlen(skb);
4005
4006         /* We are running in BH disabled context with netif_tx_lock
4007          * and TX reclaim runs via tp->napi.poll inside of a software
4008          * interrupt.  Furthermore, IRQ processing runs lockless so we have
4009          * no IRQ context deadlocks to worry about either.  Rejoice!
4010          */
4011         if (unlikely(tg3_tx_avail(tp) <= (skb_shinfo(skb)->nr_frags + 1))) {
4012                 if (!netif_queue_stopped(dev)) {
4013                         netif_stop_queue(dev);
4014
4015                         /* This is a hard error, log it. */
4016                         printk(KERN_ERR PFX "%s: BUG! Tx Ring full when "
4017                                "queue awake!\n", dev->name);
4018                 }
4019                 return NETDEV_TX_BUSY;
4020         }
4021
4022         entry = tp->tx_prod;
4023         base_flags = 0;
4024         mss = 0;
4025         if ((mss = skb_shinfo(skb)->gso_size) != 0) {
4026                 int tcp_opt_len, ip_tcp_len;
4027
4028                 if (skb_header_cloned(skb) &&
4029                     pskb_expand_head(skb, 0, 0, GFP_ATOMIC)) {
4030                         dev_kfree_skb(skb);
4031                         goto out_unlock;
4032                 }
4033
4034                 if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6)
4035                         mss |= (skb_headlen(skb) - ETH_HLEN) << 9;
4036                 else {
4037                         struct iphdr *iph = ip_hdr(skb);
4038
4039                         tcp_opt_len = tcp_optlen(skb);
4040                         ip_tcp_len = ip_hdrlen(skb) + sizeof(struct tcphdr);
4041
4042                         iph->check = 0;
4043                         iph->tot_len = htons(mss + ip_tcp_len + tcp_opt_len);
4044                         mss |= (ip_tcp_len + tcp_opt_len) << 9;
4045                 }
4046
4047                 base_flags |= (TXD_FLAG_CPU_PRE_DMA |
4048                                TXD_FLAG_CPU_POST_DMA);
4049
4050                 tcp_hdr(skb)->check = 0;
4051
4052         }
4053         else if (skb->ip_summed == CHECKSUM_PARTIAL)
4054                 base_flags |= TXD_FLAG_TCPUDP_CSUM;
4055 #if TG3_VLAN_TAG_USED
4056         if (tp->vlgrp != NULL && vlan_tx_tag_present(skb))
4057                 base_flags |= (TXD_FLAG_VLAN |
4058                                (vlan_tx_tag_get(skb) << 16));
4059 #endif
4060
4061         /* Queue skb data, a.k.a. the main skb fragment. */
4062         mapping = pci_map_single(tp->pdev, skb->data, len, PCI_DMA_TODEVICE);
4063
4064         tp->tx_buffers[entry].skb = skb;
4065         pci_unmap_addr_set(&tp->tx_buffers[entry], mapping, mapping);
4066
4067         tg3_set_txd(tp, entry, mapping, len, base_flags,
4068                     (skb_shinfo(skb)->nr_frags == 0) | (mss << 1));
4069
4070         entry = NEXT_TX(entry);
4071
4072         /* Now loop through additional data fragments, and queue them. */
4073         if (skb_shinfo(skb)->nr_frags > 0) {
4074                 unsigned int i, last;
4075
4076                 last = skb_shinfo(skb)->nr_frags - 1;
4077                 for (i = 0; i <= last; i++) {
4078                         skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
4079
4080                         len = frag->size;
4081                         mapping = pci_map_page(tp->pdev,
4082                                                frag->page,
4083                                                frag->page_offset,
4084                                                len, PCI_DMA_TODEVICE);
4085
4086                         tp->tx_buffers[entry].skb = NULL;
4087                         pci_unmap_addr_set(&tp->tx_buffers[entry], mapping, mapping);
4088
4089                         tg3_set_txd(tp, entry, mapping, len,
4090                                     base_flags, (i == last) | (mss << 1));
4091
4092                         entry = NEXT_TX(entry);
4093                 }
4094         }
4095
4096         /* Packets are ready, update Tx producer idx local and on card. */
4097         tw32_tx_mbox((MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW), entry);
4098
4099         tp->tx_prod = entry;
4100         if (unlikely(tg3_tx_avail(tp) <= (MAX_SKB_FRAGS + 1))) {
4101                 netif_stop_queue(dev);
4102                 if (tg3_tx_avail(tp) > TG3_TX_WAKEUP_THRESH(tp))
4103                         netif_wake_queue(tp->dev);
4104         }
4105
4106 out_unlock:
4107         mmiowb();
4108
4109         dev->trans_start = jiffies;
4110
4111         return NETDEV_TX_OK;
4112 }
4113
4114 static int tg3_start_xmit_dma_bug(struct sk_buff *, struct net_device *);
4115
4116 /* Use GSO to workaround a rare TSO bug that may be triggered when the
4117  * TSO header is greater than 80 bytes.
4118  */
4119 static int tg3_tso_bug(struct tg3 *tp, struct sk_buff *skb)
4120 {
4121         struct sk_buff *segs, *nskb;
4122
4123         /* Estimate the number of fragments in the worst case */
4124         if (unlikely(tg3_tx_avail(tp) <= (skb_shinfo(skb)->gso_segs * 3))) {
4125                 netif_stop_queue(tp->dev);
4126                 if (tg3_tx_avail(tp) <= (skb_shinfo(skb)->gso_segs * 3))
4127                         return NETDEV_TX_BUSY;
4128
4129                 netif_wake_queue(tp->dev);
4130         }
4131
4132         segs = skb_gso_segment(skb, tp->dev->features & ~NETIF_F_TSO);
4133         if (unlikely(IS_ERR(segs)))
4134                 goto tg3_tso_bug_end;
4135
4136         do {
4137                 nskb = segs;
4138                 segs = segs->next;
4139                 nskb->next = NULL;
4140                 tg3_start_xmit_dma_bug(nskb, tp->dev);
4141         } while (segs);
4142
4143 tg3_tso_bug_end:
4144         dev_kfree_skb(skb);
4145
4146         return NETDEV_TX_OK;
4147 }
4148
4149 /* hard_start_xmit for devices that have the 4G bug and/or 40-bit bug and
4150  * support TG3_FLG2_HW_TSO_1 or firmware TSO only.
4151  */
4152 static int tg3_start_xmit_dma_bug(struct sk_buff *skb, struct net_device *dev)
4153 {
4154         struct tg3 *tp = netdev_priv(dev);
4155         dma_addr_t mapping;
4156         u32 len, entry, base_flags, mss;
4157         int would_hit_hwbug;
4158
4159         len = skb_headlen(skb);
4160
4161         /* We are running in BH disabled context with netif_tx_lock
4162          * and TX reclaim runs via tp->napi.poll inside of a software
4163          * interrupt.  Furthermore, IRQ processing runs lockless so we have
4164          * no IRQ context deadlocks to worry about either.  Rejoice!
4165          */
4166         if (unlikely(tg3_tx_avail(tp) <= (skb_shinfo(skb)->nr_frags + 1))) {
4167                 if (!netif_queue_stopped(dev)) {
4168                         netif_stop_queue(dev);
4169
4170                         /* This is a hard error, log it. */
4171                         printk(KERN_ERR PFX "%s: BUG! Tx Ring full when "
4172                                "queue awake!\n", dev->name);
4173                 }
4174                 return NETDEV_TX_BUSY;
4175         }
4176
4177         entry = tp->tx_prod;
4178         base_flags = 0;
4179         if (skb->ip_summed == CHECKSUM_PARTIAL)
4180                 base_flags |= TXD_FLAG_TCPUDP_CSUM;
4181         mss = 0;
4182         if ((mss = skb_shinfo(skb)->gso_size) != 0) {
4183                 struct iphdr *iph;
4184                 int tcp_opt_len, ip_tcp_len, hdr_len;
4185
4186                 if (skb_header_cloned(skb) &&
4187                     pskb_expand_head(skb, 0, 0, GFP_ATOMIC)) {
4188                         dev_kfree_skb(skb);
4189                         goto out_unlock;
4190                 }
4191
4192                 tcp_opt_len = tcp_optlen(skb);
4193                 ip_tcp_len = ip_hdrlen(skb) + sizeof(struct tcphdr);
4194
4195                 hdr_len = ip_tcp_len + tcp_opt_len;
4196                 if (unlikely((ETH_HLEN + hdr_len) > 80) &&
4197                              (tp->tg3_flags2 & TG3_FLG2_TSO_BUG))
4198                         return (tg3_tso_bug(tp, skb));
4199
4200                 base_flags |= (TXD_FLAG_CPU_PRE_DMA |
4201                                TXD_FLAG_CPU_POST_DMA);
4202
4203                 iph = ip_hdr(skb);
4204                 iph->check = 0;
4205                 iph->tot_len = htons(mss + hdr_len);
4206                 if (tp->tg3_flags2 & TG3_FLG2_HW_TSO) {
4207                         tcp_hdr(skb)->check = 0;
4208                         base_flags &= ~TXD_FLAG_TCPUDP_CSUM;
4209                 } else
4210                         tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr,
4211                                                                  iph->daddr, 0,
4212                                                                  IPPROTO_TCP,
4213                                                                  0);
4214
4215                 if ((tp->tg3_flags2 & TG3_FLG2_HW_TSO) ||
4216                     (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705)) {
4217                         if (tcp_opt_len || iph->ihl > 5) {
4218                                 int tsflags;
4219
4220                                 tsflags = (iph->ihl - 5) + (tcp_opt_len >> 2);
4221                                 mss |= (tsflags << 11);
4222                         }
4223                 } else {
4224                         if (tcp_opt_len || iph->ihl > 5) {
4225                                 int tsflags;
4226
4227                                 tsflags = (iph->ihl - 5) + (tcp_opt_len >> 2);
4228                                 base_flags |= tsflags << 12;
4229                         }
4230                 }
4231         }
4232 #if TG3_VLAN_TAG_USED
4233         if (tp->vlgrp != NULL && vlan_tx_tag_present(skb))
4234                 base_flags |= (TXD_FLAG_VLAN |
4235                                (vlan_tx_tag_get(skb) << 16));
4236 #endif
4237
4238         /* Queue skb data, a.k.a. the main skb fragment. */
4239         mapping = pci_map_single(tp->pdev, skb->data, len, PCI_DMA_TODEVICE);
4240
4241         tp->tx_buffers[entry].skb = skb;
4242         pci_unmap_addr_set(&tp->tx_buffers[entry], mapping, mapping);
4243
4244         would_hit_hwbug = 0;
4245
4246         if (tg3_4g_overflow_test(mapping, len))
4247                 would_hit_hwbug = 1;
4248
4249         tg3_set_txd(tp, entry, mapping, len, base_flags,
4250                     (skb_shinfo(skb)->nr_frags == 0) | (mss << 1));
4251
4252         entry = NEXT_TX(entry);
4253
4254         /* Now loop through additional data fragments, and queue them. */
4255         if (skb_shinfo(skb)->nr_frags > 0) {
4256                 unsigned int i, last;
4257
4258                 last = skb_shinfo(skb)->nr_frags - 1;
4259                 for (i = 0; i <= last; i++) {
4260                         skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
4261
4262                         len = frag->size;
4263                         mapping = pci_map_page(tp->pdev,
4264                                                frag->page,
4265                                                frag->page_offset,
4266                                                len, PCI_DMA_TODEVICE);
4267
4268                         tp->tx_buffers[entry].skb = NULL;
4269                         pci_unmap_addr_set(&tp->tx_buffers[entry], mapping, mapping);
4270
4271                         if (tg3_4g_overflow_test(mapping, len))
4272                                 would_hit_hwbug = 1;
4273
4274                         if (tg3_40bit_overflow_test(tp, mapping, len))
4275                                 would_hit_hwbug = 1;
4276
4277                         if (tp->tg3_flags2 & TG3_FLG2_HW_TSO)
4278                                 tg3_set_txd(tp, entry, mapping, len,
4279                                             base_flags, (i == last)|(mss << 1));
4280                         else
4281                                 tg3_set_txd(tp, entry, mapping, len,
4282                                             base_flags, (i == last));
4283
4284                         entry = NEXT_TX(entry);
4285                 }
4286         }
4287
4288         if (would_hit_hwbug) {
4289                 u32 last_plus_one = entry;
4290                 u32 start;
4291
4292                 start = entry - 1 - skb_shinfo(skb)->nr_frags;
4293                 start &= (TG3_TX_RING_SIZE - 1);
4294
4295                 /* If the workaround fails due to memory/mapping
4296                  * failure, silently drop this packet.
4297                  */
4298                 if (tigon3_dma_hwbug_workaround(tp, skb, last_plus_one,
4299                                                 &start, base_flags, mss))
4300                         goto out_unlock;
4301
4302                 entry = start;
4303         }
4304
4305         /* Packets are ready, update Tx producer idx local and on card. */
4306         tw32_tx_mbox((MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW), entry);
4307
4308         tp->tx_prod = entry;
4309         if (unlikely(tg3_tx_avail(tp) <= (MAX_SKB_FRAGS + 1))) {
4310                 netif_stop_queue(dev);
4311                 if (tg3_tx_avail(tp) > TG3_TX_WAKEUP_THRESH(tp))
4312                         netif_wake_queue(tp->dev);
4313         }
4314
4315 out_unlock:
4316         mmiowb();
4317
4318         dev->trans_start = jiffies;
4319
4320         return NETDEV_TX_OK;
4321 }
4322
4323 static inline void tg3_set_mtu(struct net_device *dev, struct tg3 *tp,
4324                                int new_mtu)
4325 {
4326         dev->mtu = new_mtu;
4327
4328         if (new_mtu > ETH_DATA_LEN) {
4329                 if (tp->tg3_flags2 & TG3_FLG2_5780_CLASS) {
4330                         tp->tg3_flags2 &= ~TG3_FLG2_TSO_CAPABLE;
4331                         ethtool_op_set_tso(dev, 0);
4332                 }
4333                 else
4334                         tp->tg3_flags |= TG3_FLAG_JUMBO_RING_ENABLE;
4335         } else {
4336                 if (tp->tg3_flags2 & TG3_FLG2_5780_CLASS)
4337                         tp->tg3_flags2 |= TG3_FLG2_TSO_CAPABLE;
4338                 tp->tg3_flags &= ~TG3_FLAG_JUMBO_RING_ENABLE;
4339         }
4340 }
4341
4342 static int tg3_change_mtu(struct net_device *dev, int new_mtu)
4343 {
4344         struct tg3 *tp = netdev_priv(dev);
4345         int err;
4346
4347         if (new_mtu < TG3_MIN_MTU || new_mtu > TG3_MAX_MTU(tp))
4348                 return -EINVAL;
4349
4350         if (!netif_running(dev)) {
4351                 /* We'll just catch it later when the
4352                  * device is up'd.
4353                  */
4354                 tg3_set_mtu(dev, tp, new_mtu);
4355                 return 0;
4356         }
4357
4358         tg3_netif_stop(tp);
4359
4360         tg3_full_lock(tp, 1);
4361
4362         tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
4363
4364         tg3_set_mtu(dev, tp, new_mtu);
4365
4366         err = tg3_restart_hw(tp, 0);
4367
4368         if (!err)
4369                 tg3_netif_start(tp);
4370
4371         tg3_full_unlock(tp);
4372
4373         return err;
4374 }
4375
4376 /* Free up pending packets in all rx/tx rings.
4377  *
4378  * The chip has been shut down and the driver detached from
4379  * the networking, so no interrupts or new tx packets will
4380  * end up in the driver.  tp->{tx,}lock is not held and we are not
4381  * in an interrupt context and thus may sleep.
4382  */
4383 static void tg3_free_rings(struct tg3 *tp)
4384 {
4385         struct ring_info *rxp;
4386         int i;
4387
4388         for (i = 0; i < TG3_RX_RING_SIZE; i++) {
4389                 rxp = &tp->rx_std_buffers[i];
4390
4391                 if (rxp->skb == NULL)
4392                         continue;
4393                 pci_unmap_single(tp->pdev,
4394                                  pci_unmap_addr(rxp, mapping),
4395                                  tp->rx_pkt_buf_sz - tp->rx_offset,
4396                                  PCI_DMA_FROMDEVICE);
4397                 dev_kfree_skb_any(rxp->skb);
4398                 rxp->skb = NULL;
4399         }
4400
4401         for (i = 0; i < TG3_RX_JUMBO_RING_SIZE; i++) {
4402                 rxp = &tp->rx_jumbo_buffers[i];
4403
4404                 if (rxp->skb == NULL)
4405                         continue;
4406                 pci_unmap_single(tp->pdev,
4407                                  pci_unmap_addr(rxp, mapping),
4408                                  RX_JUMBO_PKT_BUF_SZ - tp->rx_offset,
4409                                  PCI_DMA_FROMDEVICE);
4410                 dev_kfree_skb_any(rxp->skb);
4411                 rxp->skb = NULL;
4412         }
4413
4414         for (i = 0; i < TG3_TX_RING_SIZE; ) {
4415                 struct tx_ring_info *txp;
4416                 struct sk_buff *skb;
4417                 int j;
4418
4419                 txp = &tp->tx_buffers[i];
4420                 skb = txp->skb;
4421
4422                 if (skb == NULL) {
4423                         i++;
4424                         continue;
4425                 }
4426
4427                 pci_unmap_single(tp->pdev,
4428                                  pci_unmap_addr(txp, mapping),
4429                                  skb_headlen(skb),
4430                                  PCI_DMA_TODEVICE);
4431                 txp->skb = NULL;
4432
4433                 i++;
4434
4435                 for (j = 0; j < skb_shinfo(skb)->nr_frags; j++) {
4436                         txp = &tp->tx_buffers[i & (TG3_TX_RING_SIZE - 1)];
4437                         pci_unmap_page(tp->pdev,
4438                                        pci_unmap_addr(txp, mapping),
4439                                        skb_shinfo(skb)->frags[j].size,
4440                                        PCI_DMA_TODEVICE);
4441                         i++;
4442                 }
4443
4444                 dev_kfree_skb_any(skb);
4445         }
4446 }
4447
4448 /* Initialize tx/rx rings for packet processing.
4449  *
4450  * The chip has been shut down and the driver detached from
4451  * the networking, so no interrupts or new tx packets will
4452  * end up in the driver.  tp->{tx,}lock are held and thus
4453  * we may not sleep.
4454  */
4455 static int tg3_init_rings(struct tg3 *tp)
4456 {
4457         u32 i;
4458
4459         /* Free up all the SKBs. */
4460         tg3_free_rings(tp);
4461
4462         /* Zero out all descriptors. */
4463         memset(tp->rx_std, 0, TG3_RX_RING_BYTES);
4464         memset(tp->rx_jumbo, 0, TG3_RX_JUMBO_RING_BYTES);
4465         memset(tp->rx_rcb, 0, TG3_RX_RCB_RING_BYTES(tp));
4466         memset(tp->tx_ring, 0, TG3_TX_RING_BYTES);
4467
4468         tp->rx_pkt_buf_sz = RX_PKT_BUF_SZ;
4469         if ((tp->tg3_flags2 & TG3_FLG2_5780_CLASS) &&
4470             (tp->dev->mtu > ETH_DATA_LEN))
4471                 tp->rx_pkt_buf_sz = RX_JUMBO_PKT_BUF_SZ;
4472
4473         /* Initialize invariants of the rings, we only set this
4474          * stuff once.  This works because the card does not
4475          * write into the rx buffer posting rings.
4476          */
4477         for (i = 0; i < TG3_RX_RING_SIZE; i++) {
4478                 struct tg3_rx_buffer_desc *rxd;
4479
4480                 rxd = &tp->rx_std[i];
4481                 rxd->idx_len = (tp->rx_pkt_buf_sz - tp->rx_offset - 64)
4482                         << RXD_LEN_SHIFT;
4483                 rxd->type_flags = (RXD_FLAG_END << RXD_FLAGS_SHIFT);
4484                 rxd->opaque = (RXD_OPAQUE_RING_STD |
4485                                (i << RXD_OPAQUE_INDEX_SHIFT));
4486         }
4487
4488         if (tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE) {
4489                 for (i = 0; i < TG3_RX_JUMBO_RING_SIZE; i++) {
4490                         struct tg3_rx_buffer_desc *rxd;
4491
4492                         rxd = &tp->rx_jumbo[i];
4493                         rxd->idx_len = (RX_JUMBO_PKT_BUF_SZ - tp->rx_offset - 64)
4494                                 << RXD_LEN_SHIFT;
4495                         rxd->type_flags = (RXD_FLAG_END << RXD_FLAGS_SHIFT) |
4496                                 RXD_FLAG_JUMBO;
4497                         rxd->opaque = (RXD_OPAQUE_RING_JUMBO |
4498                                (i << RXD_OPAQUE_INDEX_SHIFT));
4499                 }
4500         }
4501
4502         /* Now allocate fresh SKBs for each rx ring. */
4503         for (i = 0; i < tp->rx_pending; i++) {
4504                 if (tg3_alloc_rx_skb(tp, RXD_OPAQUE_RING_STD, -1, i) < 0) {
4505                         printk(KERN_WARNING PFX
4506                                "%s: Using a smaller RX standard ring, "
4507                                "only %d out of %d buffers were allocated "
4508                                "successfully.\n",
4509                                tp->dev->name, i, tp->rx_pending);
4510                         if (i == 0)
4511                                 return -ENOMEM;
4512                         tp->rx_pending = i;
4513                         break;
4514                 }
4515         }
4516
4517         if (tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE) {
4518                 for (i = 0; i < tp->rx_jumbo_pending; i++) {
4519                         if (tg3_alloc_rx_skb(tp, RXD_OPAQUE_RING_JUMBO,
4520                                              -1, i) < 0) {
4521                                 printk(KERN_WARNING PFX
4522                                        "%s: Using a smaller RX jumbo ring, "
4523                                        "only %d out of %d buffers were "
4524                                        "allocated successfully.\n",
4525                                        tp->dev->name, i, tp->rx_jumbo_pending);
4526                                 if (i == 0) {
4527                                         tg3_free_rings(tp);
4528                                         return -ENOMEM;
4529                                 }
4530                                 tp->rx_jumbo_pending = i;
4531                                 break;
4532                         }
4533                 }
4534         }
4535         return 0;
4536 }
4537
4538 /*
4539  * Must not be invoked with interrupt sources disabled and
4540  * the hardware shutdown down.
4541  */
4542 static void tg3_free_consistent(struct tg3 *tp)
4543 {
4544         kfree(tp->rx_std_buffers);
4545         tp->rx_std_buffers = NULL;
4546         if (tp->rx_std) {
4547                 pci_free_consistent(tp->pdev, TG3_RX_RING_BYTES,
4548                                     tp->rx_std, tp->rx_std_mapping);
4549                 tp->rx_std = NULL;
4550         }
4551         if (tp->rx_jumbo) {
4552                 pci_free_consistent(tp->pdev, TG3_RX_JUMBO_RING_BYTES,
4553                                     tp->rx_jumbo, tp->rx_jumbo_mapping);
4554                 tp->rx_jumbo = NULL;
4555         }
4556         if (tp->rx_rcb) {
4557                 pci_free_consistent(tp->pdev, TG3_RX_RCB_RING_BYTES(tp),
4558                                     tp->rx_rcb, tp->rx_rcb_mapping);
4559                 tp->rx_rcb = NULL;
4560         }
4561         if (tp->tx_ring) {
4562                 pci_free_consistent(tp->pdev, TG3_TX_RING_BYTES,
4563                         tp->tx_ring, tp->tx_desc_mapping);
4564                 tp->tx_ring = NULL;
4565         }
4566         if (tp->hw_status) {
4567                 pci_free_consistent(tp->pdev, TG3_HW_STATUS_SIZE,
4568                                     tp->hw_status, tp->status_mapping);
4569                 tp->hw_status = NULL;
4570         }
4571         if (tp->hw_stats) {
4572                 pci_free_consistent(tp->pdev, sizeof(struct tg3_hw_stats),
4573                                     tp->hw_stats, tp->stats_mapping);
4574                 tp->hw_stats = NULL;
4575         }
4576 }
4577
4578 /*
4579  * Must not be invoked with interrupt sources disabled and
4580  * the hardware shutdown down.  Can sleep.
4581  */
4582 static int tg3_alloc_consistent(struct tg3 *tp)
4583 {
4584         tp->rx_std_buffers = kzalloc((sizeof(struct ring_info) *
4585                                       (TG3_RX_RING_SIZE +
4586                                        TG3_RX_JUMBO_RING_SIZE)) +
4587                                      (sizeof(struct tx_ring_info) *
4588                                       TG3_TX_RING_SIZE),
4589                                      GFP_KERNEL);
4590         if (!tp->rx_std_buffers)
4591                 return -ENOMEM;
4592
4593         tp->rx_jumbo_buffers = &tp->rx_std_buffers[TG3_RX_RING_SIZE];
4594         tp->tx_buffers = (struct tx_ring_info *)
4595                 &tp->rx_jumbo_buffers[TG3_RX_JUMBO_RING_SIZE];
4596
4597         tp->rx_std = pci_alloc_consistent(tp->pdev, TG3_RX_RING_BYTES,
4598                                           &tp->rx_std_mapping);
4599         if (!tp->rx_std)
4600                 goto err_out;
4601
4602         tp->rx_jumbo = pci_alloc_consistent(tp->pdev, TG3_RX_JUMBO_RING_BYTES,
4603                                             &tp->rx_jumbo_mapping);
4604
4605         if (!tp->rx_jumbo)
4606                 goto err_out;
4607
4608         tp->rx_rcb = pci_alloc_consistent(tp->pdev, TG3_RX_RCB_RING_BYTES(tp),
4609                                           &tp->rx_rcb_mapping);
4610         if (!tp->rx_rcb)
4611                 goto err_out;
4612
4613         tp->tx_ring = pci_alloc_consistent(tp->pdev, TG3_TX_RING_BYTES,
4614                                            &tp->tx_desc_mapping);
4615         if (!tp->tx_ring)
4616                 goto err_out;
4617
4618         tp->hw_status = pci_alloc_consistent(tp->pdev,
4619                                              TG3_HW_STATUS_SIZE,
4620                                              &tp->status_mapping);
4621         if (!tp->hw_status)
4622                 goto err_out;
4623
4624         tp->hw_stats = pci_alloc_consistent(tp->pdev,
4625                                             sizeof(struct tg3_hw_stats),
4626                                             &tp->stats_mapping);
4627         if (!tp->hw_stats)
4628                 goto err_out;
4629
4630         memset(tp->hw_status, 0, TG3_HW_STATUS_SIZE);
4631         memset(tp->hw_stats, 0, sizeof(struct tg3_hw_stats));
4632
4633         return 0;
4634
4635 err_out:
4636         tg3_free_consistent(tp);
4637         return -ENOMEM;
4638 }
4639
4640 #define MAX_WAIT_CNT 1000
4641
4642 /* To stop a block, clear the enable bit and poll till it
4643  * clears.  tp->lock is held.
4644  */
4645 static int tg3_stop_block(struct tg3 *tp, unsigned long ofs, u32 enable_bit, int silent)
4646 {
4647         unsigned int i;
4648         u32 val;
4649
4650         if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
4651                 switch (ofs) {
4652                 case RCVLSC_MODE:
4653                 case DMAC_MODE:
4654                 case MBFREE_MODE:
4655                 case BUFMGR_MODE:
4656                 case MEMARB_MODE:
4657                         /* We can't enable/disable these bits of the
4658                          * 5705/5750, just say success.
4659                          */
4660                         return 0;
4661
4662                 default:
4663                         break;
4664                 };
4665         }
4666
4667         val = tr32(ofs);
4668         val &= ~enable_bit;
4669         tw32_f(ofs, val);
4670
4671         for (i = 0; i < MAX_WAIT_CNT; i++) {
4672                 udelay(100);
4673                 val = tr32(ofs);
4674                 if ((val & enable_bit) == 0)
4675                         break;
4676         }
4677
4678         if (i == MAX_WAIT_CNT && !silent) {
4679                 printk(KERN_ERR PFX "tg3_stop_block timed out, "
4680                        "ofs=%lx enable_bit=%x\n",
4681                        ofs, enable_bit);
4682                 return -ENODEV;
4683         }
4684
4685         return 0;
4686 }
4687
4688 /* tp->lock is held. */
4689 static int tg3_abort_hw(struct tg3 *tp, int silent)
4690 {
4691         int i, err;
4692
4693         tg3_disable_ints(tp);
4694
4695         tp->rx_mode &= ~RX_MODE_ENABLE;
4696         tw32_f(MAC_RX_MODE, tp->rx_mode);
4697         udelay(10);
4698
4699         err  = tg3_stop_block(tp, RCVBDI_MODE, RCVBDI_MODE_ENABLE, silent);
4700         err |= tg3_stop_block(tp, RCVLPC_MODE, RCVLPC_MODE_ENABLE, silent);
4701         err |= tg3_stop_block(tp, RCVLSC_MODE, RCVLSC_MODE_ENABLE, silent);
4702         err |= tg3_stop_block(tp, RCVDBDI_MODE, RCVDBDI_MODE_ENABLE, silent);
4703         err |= tg3_stop_block(tp, RCVDCC_MODE, RCVDCC_MODE_ENABLE, silent);
4704         err |= tg3_stop_block(tp, RCVCC_MODE, RCVCC_MODE_ENABLE, silent);
4705
4706         err |= tg3_stop_block(tp, SNDBDS_MODE, SNDBDS_MODE_ENABLE, silent);
4707         err |= tg3_stop_block(tp, SNDBDI_MODE, SNDBDI_MODE_ENABLE, silent);
4708         err |= tg3_stop_block(tp, SNDDATAI_MODE, SNDDATAI_MODE_ENABLE, silent);
4709         err |= tg3_stop_block(tp, RDMAC_MODE, RDMAC_MODE_ENABLE, silent);
4710         err |= tg3_stop_block(tp, SNDDATAC_MODE, SNDDATAC_MODE_ENABLE, silent);
4711         err |= tg3_stop_block(tp, DMAC_MODE, DMAC_MODE_ENABLE, silent);
4712         err |= tg3_stop_block(tp, SNDBDC_MODE, SNDBDC_MODE_ENABLE, silent);
4713
4714         tp->mac_mode &= ~MAC_MODE_TDE_ENABLE;
4715         tw32_f(MAC_MODE, tp->mac_mode);
4716         udelay(40);
4717
4718         tp->tx_mode &= ~TX_MODE_ENABLE;
4719         tw32_f(MAC_TX_MODE, tp->tx_mode);
4720
4721         for (i = 0; i < MAX_WAIT_CNT; i++) {
4722                 udelay(100);
4723                 if (!(tr32(MAC_TX_MODE) & TX_MODE_ENABLE))
4724                         break;
4725         }
4726         if (i >= MAX_WAIT_CNT) {
4727                 printk(KERN_ERR PFX "tg3_abort_hw timed out for %s, "
4728                        "TX_MODE_ENABLE will not clear MAC_TX_MODE=%08x\n",
4729                        tp->dev->name, tr32(MAC_TX_MODE));
4730                 err |= -ENODEV;
4731         }
4732
4733         err |= tg3_stop_block(tp, HOSTCC_MODE, HOSTCC_MODE_ENABLE, silent);
4734         err |= tg3_stop_block(tp, WDMAC_MODE, WDMAC_MODE_ENABLE, silent);
4735         err |= tg3_stop_block(tp, MBFREE_MODE, MBFREE_MODE_ENABLE, silent);
4736
4737         tw32(FTQ_RESET, 0xffffffff);
4738         tw32(FTQ_RESET, 0x00000000);
4739
4740         err |= tg3_stop_block(tp, BUFMGR_MODE, BUFMGR_MODE_ENABLE, silent);
4741         err |= tg3_stop_block(tp, MEMARB_MODE, MEMARB_MODE_ENABLE, silent);
4742
4743         if (tp->hw_status)
4744                 memset(tp->hw_status, 0, TG3_HW_STATUS_SIZE);
4745         if (tp->hw_stats)
4746                 memset(tp->hw_stats, 0, sizeof(struct tg3_hw_stats));
4747
4748         return err;
4749 }
4750
4751 /* tp->lock is held. */
4752 static int tg3_nvram_lock(struct tg3 *tp)
4753 {
4754         if (tp->tg3_flags & TG3_FLAG_NVRAM) {
4755                 int i;
4756
4757                 if (tp->nvram_lock_cnt == 0) {
4758                         tw32(NVRAM_SWARB, SWARB_REQ_SET1);
4759                         for (i = 0; i < 8000; i++) {
4760                                 if (tr32(NVRAM_SWARB) & SWARB_GNT1)
4761                                         break;
4762                                 udelay(20);
4763                         }
4764                         if (i == 8000) {
4765                                 tw32(NVRAM_SWARB, SWARB_REQ_CLR1);
4766                                 return -ENODEV;
4767                         }
4768                 }
4769                 tp->nvram_lock_cnt++;
4770         }
4771         return 0;
4772 }
4773
4774 /* tp->lock is held. */
4775 static void tg3_nvram_unlock(struct tg3 *tp)
4776 {
4777         if (tp->tg3_flags & TG3_FLAG_NVRAM) {
4778                 if (tp->nvram_lock_cnt > 0)
4779                         tp->nvram_lock_cnt--;
4780                 if (tp->nvram_lock_cnt == 0)
4781                         tw32_f(NVRAM_SWARB, SWARB_REQ_CLR1);
4782         }
4783 }
4784
4785 /* tp->lock is held. */
4786 static void tg3_enable_nvram_access(struct tg3 *tp)
4787 {
4788         if ((tp->tg3_flags2 & TG3_FLG2_5750_PLUS) &&
4789             !(tp->tg3_flags2 & TG3_FLG2_PROTECTED_NVRAM)) {
4790                 u32 nvaccess = tr32(NVRAM_ACCESS);
4791
4792                 tw32(NVRAM_ACCESS, nvaccess | ACCESS_ENABLE);
4793         }
4794 }
4795
4796 /* tp->lock is held. */
4797 static void tg3_disable_nvram_access(struct tg3 *tp)
4798 {
4799         if ((tp->tg3_flags2 & TG3_FLG2_5750_PLUS) &&
4800             !(tp->tg3_flags2 & TG3_FLG2_PROTECTED_NVRAM)) {
4801                 u32 nvaccess = tr32(NVRAM_ACCESS);
4802
4803                 tw32(NVRAM_ACCESS, nvaccess & ~ACCESS_ENABLE);
4804         }
4805 }
4806
4807 static void tg3_ape_send_event(struct tg3 *tp, u32 event)
4808 {
4809         int i;
4810         u32 apedata;
4811
4812         apedata = tg3_ape_read32(tp, TG3_APE_SEG_SIG);
4813         if (apedata != APE_SEG_SIG_MAGIC)
4814                 return;
4815
4816         apedata = tg3_ape_read32(tp, TG3_APE_FW_STATUS);
4817         if (apedata != APE_FW_STATUS_READY)
4818                 return;
4819
4820         /* Wait for up to 1 millisecond for APE to service previous event. */
4821         for (i = 0; i < 10; i++) {
4822                 if (tg3_ape_lock(tp, TG3_APE_LOCK_MEM))
4823                         return;
4824
4825                 apedata = tg3_ape_read32(tp, TG3_APE_EVENT_STATUS);
4826
4827                 if (!(apedata & APE_EVENT_STATUS_EVENT_PENDING))
4828                         tg3_ape_write32(tp, TG3_APE_EVENT_STATUS,
4829                                         event | APE_EVENT_STATUS_EVENT_PENDING);
4830
4831                 tg3_ape_unlock(tp, TG3_APE_LOCK_MEM);
4832
4833                 if (!(apedata & APE_EVENT_STATUS_EVENT_PENDING))
4834                         break;
4835
4836                 udelay(100);
4837         }
4838
4839         if (!(apedata & APE_EVENT_STATUS_EVENT_PENDING))
4840                 tg3_ape_write32(tp, TG3_APE_EVENT, APE_EVENT_1);
4841 }
4842
4843 static void tg3_ape_driver_state_change(struct tg3 *tp, int kind)
4844 {
4845         u32 event;
4846         u32 apedata;
4847
4848         if (!(tp->tg3_flags3 & TG3_FLG3_ENABLE_APE))
4849                 return;
4850
4851         switch (kind) {
4852                 case RESET_KIND_INIT:
4853                         tg3_ape_write32(tp, TG3_APE_HOST_SEG_SIG,
4854                                         APE_HOST_SEG_SIG_MAGIC);
4855                         tg3_ape_write32(tp, TG3_APE_HOST_SEG_LEN,
4856                                         APE_HOST_SEG_LEN_MAGIC);
4857                         apedata = tg3_ape_read32(tp, TG3_APE_HOST_INIT_COUNT);
4858                         tg3_ape_write32(tp, TG3_APE_HOST_INIT_COUNT, ++apedata);
4859                         tg3_ape_write32(tp, TG3_APE_HOST_DRIVER_ID,
4860                                         APE_HOST_DRIVER_ID_MAGIC);
4861                         tg3_ape_write32(tp, TG3_APE_HOST_BEHAVIOR,
4862                                         APE_HOST_BEHAV_NO_PHYLOCK);
4863
4864                         event = APE_EVENT_STATUS_STATE_START;
4865                         break;
4866                 case RESET_KIND_SHUTDOWN:
4867                         event = APE_EVENT_STATUS_STATE_UNLOAD;
4868                         break;
4869                 case RESET_KIND_SUSPEND:
4870                         event = APE_EVENT_STATUS_STATE_SUSPEND;
4871                         break;
4872                 default:
4873                         return;
4874         }
4875
4876         event |= APE_EVENT_STATUS_DRIVER_EVNT | APE_EVENT_STATUS_STATE_CHNGE;
4877
4878         tg3_ape_send_event(tp, event);
4879 }
4880
4881 /* tp->lock is held. */
4882 static void tg3_write_sig_pre_reset(struct tg3 *tp, int kind)
4883 {
4884         tg3_write_mem(tp, NIC_SRAM_FIRMWARE_MBOX,
4885                       NIC_SRAM_FIRMWARE_MBOX_MAGIC1);
4886
4887         if (tp->tg3_flags2 & TG3_FLG2_ASF_NEW_HANDSHAKE) {
4888                 switch (kind) {
4889                 case RESET_KIND_INIT:
4890                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
4891                                       DRV_STATE_START);
4892                         break;
4893
4894                 case RESET_KIND_SHUTDOWN:
4895                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
4896                                       DRV_STATE_UNLOAD);
4897                         break;
4898
4899                 case RESET_KIND_SUSPEND:
4900                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
4901                                       DRV_STATE_SUSPEND);
4902                         break;
4903
4904                 default:
4905                         break;
4906                 };
4907         }
4908
4909         if (kind == RESET_KIND_INIT ||
4910             kind == RESET_KIND_SUSPEND)
4911                 tg3_ape_driver_state_change(tp, kind);
4912 }
4913
4914 /* tp->lock is held. */
4915 static void tg3_write_sig_post_reset(struct tg3 *tp, int kind)
4916 {
4917         if (tp->tg3_flags2 & TG3_FLG2_ASF_NEW_HANDSHAKE) {
4918                 switch (kind) {
4919                 case RESET_KIND_INIT:
4920                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
4921                                       DRV_STATE_START_DONE);
4922                         break;
4923
4924                 case RESET_KIND_SHUTDOWN:
4925                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
4926                                       DRV_STATE_UNLOAD_DONE);
4927                         break;
4928
4929                 default:
4930                         break;
4931                 };
4932         }
4933
4934         if (kind == RESET_KIND_SHUTDOWN)
4935                 tg3_ape_driver_state_change(tp, kind);
4936 }
4937
4938 /* tp->lock is held. */
4939 static void tg3_write_sig_legacy(struct tg3 *tp, int kind)
4940 {
4941         if (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) {
4942                 switch (kind) {
4943                 case RESET_KIND_INIT:
4944                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
4945                                       DRV_STATE_START);
4946                         break;
4947
4948                 case RESET_KIND_SHUTDOWN:
4949                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
4950                                       DRV_STATE_UNLOAD);
4951                         break;
4952
4953                 case RESET_KIND_SUSPEND:
4954                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
4955                                       DRV_STATE_SUSPEND);
4956                         break;
4957
4958                 default:
4959                         break;
4960                 };
4961         }
4962 }
4963
4964 static int tg3_poll_fw(struct tg3 *tp)
4965 {
4966         int i;
4967         u32 val;
4968
4969         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
4970                 /* Wait up to 20ms for init done. */
4971                 for (i = 0; i < 200; i++) {
4972                         if (tr32(VCPU_STATUS) & VCPU_STATUS_INIT_DONE)
4973                                 return 0;
4974                         udelay(100);
4975                 }
4976                 return -ENODEV;
4977         }
4978
4979         /* Wait for firmware initialization to complete. */
4980         for (i = 0; i < 100000; i++) {
4981                 tg3_read_mem(tp, NIC_SRAM_FIRMWARE_MBOX, &val);
4982                 if (val == ~NIC_SRAM_FIRMWARE_MBOX_MAGIC1)
4983                         break;
4984                 udelay(10);
4985         }
4986
4987         /* Chip might not be fitted with firmware.  Some Sun onboard
4988          * parts are configured like that.  So don't signal the timeout
4989          * of the above loop as an error, but do report the lack of
4990          * running firmware once.
4991          */
4992         if (i >= 100000 &&
4993             !(tp->tg3_flags2 & TG3_FLG2_NO_FWARE_REPORTED)) {
4994                 tp->tg3_flags2 |= TG3_FLG2_NO_FWARE_REPORTED;
4995
4996                 printk(KERN_INFO PFX "%s: No firmware running.\n",
4997                        tp->dev->name);
4998         }
4999
5000         return 0;
5001 }
5002
5003 /* Save PCI command register before chip reset */
5004 static void tg3_save_pci_state(struct tg3 *tp)
5005 {
5006         u32 val;
5007
5008         pci_read_config_dword(tp->pdev, TG3PCI_COMMAND, &val);
5009         tp->pci_cmd = val;
5010 }
5011
5012 /* Restore PCI state after chip reset */
5013 static void tg3_restore_pci_state(struct tg3 *tp)
5014 {
5015         u32 val;
5016
5017         /* Re-enable indirect register accesses. */
5018         pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
5019                                tp->misc_host_ctrl);
5020
5021         /* Set MAX PCI retry to zero. */
5022         val = (PCISTATE_ROM_ENABLE | PCISTATE_ROM_RETRY_ENABLE);
5023         if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0 &&
5024             (tp->tg3_flags & TG3_FLAG_PCIX_MODE))
5025                 val |= PCISTATE_RETRY_SAME_DMA;
5026         /* Allow reads and writes to the APE register and memory space. */
5027         if (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE)
5028                 val |= PCISTATE_ALLOW_APE_CTLSPC_WR |
5029                        PCISTATE_ALLOW_APE_SHMEM_WR;
5030         pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE, val);
5031
5032         pci_write_config_dword(tp->pdev, TG3PCI_COMMAND, tp->pci_cmd);
5033
5034         /* Make sure PCI-X relaxed ordering bit is clear. */
5035         if (tp->pcix_cap) {
5036                 u16 pcix_cmd;
5037
5038                 pci_read_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
5039                                      &pcix_cmd);
5040                 pcix_cmd &= ~PCI_X_CMD_ERO;
5041                 pci_write_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
5042                                       pcix_cmd);
5043         }
5044
5045         if (tp->tg3_flags2 & TG3_FLG2_5780_CLASS) {
5046
5047                 /* Chip reset on 5780 will reset MSI enable bit,
5048                  * so need to restore it.
5049                  */
5050                 if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
5051                         u16 ctrl;
5052
5053                         pci_read_config_word(tp->pdev,
5054                                              tp->msi_cap + PCI_MSI_FLAGS,
5055                                              &ctrl);
5056                         pci_write_config_word(tp->pdev,
5057                                               tp->msi_cap + PCI_MSI_FLAGS,
5058                                               ctrl | PCI_MSI_FLAGS_ENABLE);
5059                         val = tr32(MSGINT_MODE);
5060                         tw32(MSGINT_MODE, val | MSGINT_MODE_ENABLE);
5061                 }
5062         }
5063 }
5064
5065 static void tg3_stop_fw(struct tg3 *);
5066
5067 /* tp->lock is held. */
5068 static int tg3_chip_reset(struct tg3 *tp)
5069 {
5070         u32 val;
5071         void (*write_op)(struct tg3 *, u32, u32);
5072         int err;
5073
5074         tg3_nvram_lock(tp);
5075
5076         /* No matching tg3_nvram_unlock() after this because
5077          * chip reset below will undo the nvram lock.
5078          */
5079         tp->nvram_lock_cnt = 0;
5080
5081         /* GRC_MISC_CFG core clock reset will clear the memory
5082          * enable bit in PCI register 4 and the MSI enable bit
5083          * on some chips, so we save relevant registers here.
5084          */
5085         tg3_save_pci_state(tp);
5086
5087         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752 ||
5088             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
5089             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
5090             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784)
5091                 tw32(GRC_FASTBOOT_PC, 0);
5092
5093         /*
5094          * We must avoid the readl() that normally takes place.
5095          * It locks machines, causes machine checks, and other
5096          * fun things.  So, temporarily disable the 5701
5097          * hardware workaround, while we do the reset.
5098          */
5099         write_op = tp->write32;
5100         if (write_op == tg3_write_flush_reg32)
5101                 tp->write32 = tg3_write32;
5102
5103         /* Prevent the irq handler from reading or writing PCI registers
5104          * during chip reset when the memory enable bit in the PCI command
5105          * register may be cleared.  The chip does not generate interrupt
5106          * at this time, but the irq handler may still be called due to irq
5107          * sharing or irqpoll.
5108          */
5109         tp->tg3_flags |= TG3_FLAG_CHIP_RESETTING;
5110         if (tp->hw_status) {
5111                 tp->hw_status->status = 0;
5112                 tp->hw_status->status_tag = 0;
5113         }
5114         tp->last_tag = 0;
5115         smp_mb();
5116         synchronize_irq(tp->pdev->irq);
5117
5118         /* do the reset */
5119         val = GRC_MISC_CFG_CORECLK_RESET;
5120
5121         if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) {
5122                 if (tr32(0x7e2c) == 0x60) {
5123                         tw32(0x7e2c, 0x20);
5124                 }
5125                 if (tp->pci_chip_rev_id != CHIPREV_ID_5750_A0) {
5126                         tw32(GRC_MISC_CFG, (1 << 29));
5127                         val |= (1 << 29);
5128                 }
5129         }
5130
5131         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
5132                 tw32(VCPU_STATUS, tr32(VCPU_STATUS) | VCPU_STATUS_DRV_RESET);
5133                 tw32(GRC_VCPU_EXT_CTRL,
5134                      tr32(GRC_VCPU_EXT_CTRL) & ~GRC_VCPU_EXT_CTRL_HALT_CPU);
5135         }
5136
5137         if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS)
5138                 val |= GRC_MISC_CFG_KEEP_GPHY_POWER;
5139         tw32(GRC_MISC_CFG, val);
5140
5141         /* restore 5701 hardware bug workaround write method */
5142         tp->write32 = write_op;
5143
5144         /* Unfortunately, we have to delay before the PCI read back.
5145          * Some 575X chips even will not respond to a PCI cfg access
5146          * when the reset command is given to the chip.
5147          *
5148          * How do these hardware designers expect things to work
5149          * properly if the PCI write is posted for a long period
5150          * of time?  It is always necessary to have some method by
5151          * which a register read back can occur to push the write
5152          * out which does the reset.
5153          *
5154          * For most tg3 variants the trick below was working.
5155          * Ho hum...
5156          */
5157         udelay(120);
5158
5159         /* Flush PCI posted writes.  The normal MMIO registers
5160          * are inaccessible at this time so this is the only
5161          * way to make this reliably (actually, this is no longer
5162          * the case, see above).  I tried to use indirect
5163          * register read/write but this upset some 5701 variants.
5164          */
5165         pci_read_config_dword(tp->pdev, PCI_COMMAND, &val);
5166
5167         udelay(120);
5168
5169         if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) {
5170                 if (tp->pci_chip_rev_id == CHIPREV_ID_5750_A0) {
5171                         int i;
5172                         u32 cfg_val;
5173
5174                         /* Wait for link training to complete.  */
5175                         for (i = 0; i < 5000; i++)
5176                                 udelay(100);
5177
5178                         pci_read_config_dword(tp->pdev, 0xc4, &cfg_val);
5179                         pci_write_config_dword(tp->pdev, 0xc4,
5180                                                cfg_val | (1 << 15));
5181                 }
5182                 /* Set PCIE max payload size and clear error status.  */
5183                 pci_write_config_dword(tp->pdev, 0xd8, 0xf5000);
5184         }
5185
5186         tg3_restore_pci_state(tp);
5187
5188         tp->tg3_flags &= ~TG3_FLAG_CHIP_RESETTING;
5189
5190         val = 0;
5191         if (tp->tg3_flags2 & TG3_FLG2_5780_CLASS)
5192                 val = tr32(MEMARB_MODE);
5193         tw32(MEMARB_MODE, val | MEMARB_MODE_ENABLE);
5194
5195         if (tp->pci_chip_rev_id == CHIPREV_ID_5750_A3) {
5196                 tg3_stop_fw(tp);
5197                 tw32(0x5000, 0x400);
5198         }
5199
5200         tw32(GRC_MODE, tp->grc_mode);
5201
5202         if (tp->pci_chip_rev_id == CHIPREV_ID_5705_A0) {
5203                 val = tr32(0xc4);
5204
5205                 tw32(0xc4, val | (1 << 15));
5206         }
5207
5208         if ((tp->nic_sram_data_cfg & NIC_SRAM_DATA_CFG_MINI_PCI) != 0 &&
5209             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
5210                 tp->pci_clock_ctrl |= CLOCK_CTRL_CLKRUN_OENABLE;
5211                 if (tp->pci_chip_rev_id == CHIPREV_ID_5705_A0)
5212                         tp->pci_clock_ctrl |= CLOCK_CTRL_FORCE_CLKRUN;
5213                 tw32(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl);
5214         }
5215
5216         if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) {
5217                 tp->mac_mode = MAC_MODE_PORT_MODE_TBI;
5218                 tw32_f(MAC_MODE, tp->mac_mode);
5219         } else if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES) {
5220                 tp->mac_mode = MAC_MODE_PORT_MODE_GMII;
5221                 tw32_f(MAC_MODE, tp->mac_mode);
5222         } else
5223                 tw32_f(MAC_MODE, 0);
5224         udelay(40);
5225
5226         err = tg3_poll_fw(tp);
5227         if (err)
5228                 return err;
5229
5230         if ((tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) &&
5231             tp->pci_chip_rev_id != CHIPREV_ID_5750_A0) {
5232                 val = tr32(0x7c00);
5233
5234                 tw32(0x7c00, val | (1 << 25));
5235         }
5236
5237         /* Reprobe ASF enable state.  */
5238         tp->tg3_flags &= ~TG3_FLAG_ENABLE_ASF;
5239         tp->tg3_flags2 &= ~TG3_FLG2_ASF_NEW_HANDSHAKE;
5240         tg3_read_mem(tp, NIC_SRAM_DATA_SIG, &val);
5241         if (val == NIC_SRAM_DATA_SIG_MAGIC) {
5242                 u32 nic_cfg;
5243
5244                 tg3_read_mem(tp, NIC_SRAM_DATA_CFG, &nic_cfg);
5245                 if (nic_cfg & NIC_SRAM_DATA_CFG_ASF_ENABLE) {
5246                         tp->tg3_flags |= TG3_FLAG_ENABLE_ASF;
5247                         if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS)
5248                                 tp->tg3_flags2 |= TG3_FLG2_ASF_NEW_HANDSHAKE;
5249                 }
5250         }
5251
5252         return 0;
5253 }
5254
5255 /* tp->lock is held. */
5256 static void tg3_stop_fw(struct tg3 *tp)
5257 {
5258         if ((tp->tg3_flags & TG3_FLAG_ENABLE_ASF) &&
5259            !(tp->tg3_flags3 & TG3_FLG3_ENABLE_APE)) {
5260                 u32 val;
5261                 int i;
5262
5263                 tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX, FWCMD_NICDRV_PAUSE_FW);
5264                 val = tr32(GRC_RX_CPU_EVENT);
5265                 val |= (1 << 14);
5266                 tw32(GRC_RX_CPU_EVENT, val);
5267
5268                 /* Wait for RX cpu to ACK the event.  */
5269                 for (i = 0; i < 100; i++) {
5270                         if (!(tr32(GRC_RX_CPU_EVENT) & (1 << 14)))
5271                                 break;
5272                         udelay(1);
5273                 }
5274         }
5275 }
5276
5277 /* tp->lock is held. */
5278 static int tg3_halt(struct tg3 *tp, int kind, int silent)
5279 {
5280         int err;
5281
5282         tg3_stop_fw(tp);
5283
5284         tg3_write_sig_pre_reset(tp, kind);
5285
5286         tg3_abort_hw(tp, silent);
5287         err = tg3_chip_reset(tp);
5288
5289         tg3_write_sig_legacy(tp, kind);
5290         tg3_write_sig_post_reset(tp, kind);
5291
5292         if (err)
5293                 return err;
5294
5295         return 0;
5296 }
5297
5298 #define TG3_FW_RELEASE_MAJOR    0x0
5299 #define TG3_FW_RELASE_MINOR     0x0
5300 #define TG3_FW_RELEASE_FIX      0x0
5301 #define TG3_FW_START_ADDR       0x08000000
5302 #define TG3_FW_TEXT_ADDR        0x08000000
5303 #define TG3_FW_TEXT_LEN         0x9c0
5304 #define TG3_FW_RODATA_ADDR      0x080009c0
5305 #define TG3_FW_RODATA_LEN       0x60
5306 #define TG3_FW_DATA_ADDR        0x08000a40
5307 #define TG3_FW_DATA_LEN         0x20
5308 #define TG3_FW_SBSS_ADDR        0x08000a60
5309 #define TG3_FW_SBSS_LEN         0xc
5310 #define TG3_FW_BSS_ADDR         0x08000a70
5311 #define TG3_FW_BSS_LEN          0x10
5312
5313 static const u32 tg3FwText[(TG3_FW_TEXT_LEN / sizeof(u32)) + 1] = {
5314         0x00000000, 0x10000003, 0x00000000, 0x0000000d, 0x0000000d, 0x3c1d0800,
5315         0x37bd3ffc, 0x03a0f021, 0x3c100800, 0x26100000, 0x0e000018, 0x00000000,
5316         0x0000000d, 0x3c1d0800, 0x37bd3ffc, 0x03a0f021, 0x3c100800, 0x26100034,
5317         0x0e00021c, 0x00000000, 0x0000000d, 0x00000000, 0x00000000, 0x00000000,
5318         0x27bdffe0, 0x3c1cc000, 0xafbf0018, 0xaf80680c, 0x0e00004c, 0x241b2105,
5319         0x97850000, 0x97870002, 0x9782002c, 0x9783002e, 0x3c040800, 0x248409c0,
5320         0xafa00014, 0x00021400, 0x00621825, 0x00052c00, 0xafa30010, 0x8f860010,
5321         0x00e52825, 0x0e000060, 0x24070102, 0x3c02ac00, 0x34420100, 0x3c03ac01,
5322         0x34630100, 0xaf820490, 0x3c02ffff, 0xaf820494, 0xaf830498, 0xaf82049c,
5323         0x24020001, 0xaf825ce0, 0x0e00003f, 0xaf825d00, 0x0e000140, 0x00000000,
5324         0x8fbf0018, 0x03e00008, 0x27bd0020, 0x2402ffff, 0xaf825404, 0x8f835400,
5325         0x34630400, 0xaf835400, 0xaf825404, 0x3c020800, 0x24420034, 0xaf82541c,
5326         0x03e00008, 0xaf805400, 0x00000000, 0x00000000, 0x3c020800, 0x34423000,
5327         0x3c030800, 0x34633000, 0x3c040800, 0x348437ff, 0x3c010800, 0xac220a64,
5328         0x24020040, 0x3c010800, 0xac220a68, 0x3c010800, 0xac200a60, 0xac600000,
5329         0x24630004, 0x0083102b, 0x5040fffd, 0xac600000, 0x03e00008, 0x00000000,
5330         0x00804821, 0x8faa0010, 0x3c020800, 0x8c420a60, 0x3c040800, 0x8c840a68,
5331         0x8fab0014, 0x24430001, 0x0044102b, 0x3c010800, 0xac230a60, 0x14400003,
5332         0x00004021, 0x3c010800, 0xac200a60, 0x3c020800, 0x8c420a60, 0x3c030800,
5333         0x8c630a64, 0x91240000, 0x00021140, 0x00431021, 0x00481021, 0x25080001,
5334         0xa0440000, 0x29020008, 0x1440fff4, 0x25290001, 0x3c020800, 0x8c420a60,
5335         0x3c030800, 0x8c630a64, 0x8f84680c, 0x00021140, 0x00431021, 0xac440008,
5336         0xac45000c, 0xac460010, 0xac470014, 0xac4a0018, 0x03e00008, 0xac4b001c,
5337         0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
5338         0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
5339         0, 0, 0, 0, 0, 0,
5340         0x02000008, 0x00000000, 0x0a0001e3, 0x3c0a0001, 0x0a0001e3, 0x3c0a0002,
5341         0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000,
5342         0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000,
5343         0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000,
5344         0x0a0001e3, 0x3c0a0007, 0x0a0001e3, 0x3c0a0008, 0x0a0001e3, 0x3c0a0009,
5345         0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x3c0a000b,
5346         0x0a0001e3, 0x3c0a000c, 0x0a0001e3, 0x3c0a000d, 0x0a0001e3, 0x00000000,
5347         0x0a0001e3, 0x00000000, 0x0a0001e3, 0x3c0a000e, 0x0a0001e3, 0x00000000,
5348         0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000,
5349         0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000,
5350         0x0a0001e3, 0x00000000, 0x0a0001e3, 0x3c0a0013, 0x0a0001e3, 0x3c0a0014,
5351         0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
5352         0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
5353         0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
5354         0x27bdffe0, 0x00001821, 0x00001021, 0xafbf0018, 0xafb10014, 0xafb00010,
5355         0x3c010800, 0x00220821, 0xac200a70, 0x3c010800, 0x00220821, 0xac200a74,
5356         0x3c010800, 0x00220821, 0xac200a78, 0x24630001, 0x1860fff5, 0x2442000c,
5357         0x24110001, 0x8f906810, 0x32020004, 0x14400005, 0x24040001, 0x3c020800,
5358         0x8c420a78, 0x18400003, 0x00002021, 0x0e000182, 0x00000000, 0x32020001,
5359         0x10400003, 0x00000000, 0x0e000169, 0x00000000, 0x0a000153, 0xaf915028,
5360         0x8fbf0018, 0x8fb10014, 0x8fb00010, 0x03e00008, 0x27bd0020, 0x3c050800,
5361         0x8ca50a70, 0x3c060800, 0x8cc60a80, 0x3c070800, 0x8ce70a78, 0x27bdffe0,
5362         0x3c040800, 0x248409d0, 0xafbf0018, 0xafa00010, 0x0e000060, 0xafa00014,
5363         0x0e00017b, 0x00002021, 0x8fbf0018, 0x03e00008, 0x27bd0020, 0x24020001,
5364         0x8f836810, 0x00821004, 0x00021027, 0x00621824, 0x03e00008, 0xaf836810,
5365         0x27bdffd8, 0xafbf0024, 0x1080002e, 0xafb00020, 0x8f825cec, 0xafa20018,
5366         0x8f825cec, 0x3c100800, 0x26100a78, 0xafa2001c, 0x34028000, 0xaf825cec,
5367         0x8e020000, 0x18400016, 0x00000000, 0x3c020800, 0x94420a74, 0x8fa3001c,
5368         0x000221c0, 0xac830004, 0x8fa2001c, 0x3c010800, 0x0e000201, 0xac220a74,
5369         0x10400005, 0x00000000, 0x8e020000, 0x24420001, 0x0a0001df, 0xae020000,
5370         0x3c020800, 0x8c420a70, 0x00021c02, 0x000321c0, 0x0a0001c5, 0xafa2001c,
5371         0x0e000201, 0x00000000, 0x1040001f, 0x00000000, 0x8e020000, 0x8fa3001c,
5372         0x24420001, 0x3c010800, 0xac230a70, 0x3c010800, 0xac230a74, 0x0a0001df,
5373         0xae020000, 0x3c100800, 0x26100a78, 0x8e020000, 0x18400028, 0x00000000,
5374         0x0e000201, 0x00000000, 0x14400024, 0x00000000, 0x8e020000, 0x3c030800,
5375         0x8c630a70, 0x2442ffff, 0xafa3001c, 0x18400006, 0xae020000, 0x00031402,
5376         0x000221c0, 0x8c820004, 0x3c010800, 0xac220a70, 0x97a2001e, 0x2442ff00,
5377         0x2c420300, 0x1440000b, 0x24024000, 0x3c040800, 0x248409dc, 0xafa00010,
5378         0xafa00014, 0x8fa6001c, 0x24050008, 0x0e000060, 0x00003821, 0x0a0001df,
5379         0x00000000, 0xaf825cf8, 0x3c020800, 0x8c420a40, 0x8fa3001c, 0x24420001,
5380         0xaf835cf8, 0x3c010800, 0xac220a40, 0x8fbf0024, 0x8fb00020, 0x03e00008,
5381         0x27bd0028, 0x27bdffe0, 0x3c040800, 0x248409e8, 0x00002821, 0x00003021,
5382         0x00003821, 0xafbf0018, 0xafa00010, 0x0e000060, 0xafa00014, 0x8fbf0018,
5383         0x03e00008, 0x27bd0020, 0x8f82680c, 0x8f85680c, 0x00021827, 0x0003182b,
5384         0x00031823, 0x00431024, 0x00441021, 0x00a2282b, 0x10a00006, 0x00000000,
5385         0x00401821, 0x8f82680c, 0x0043102b, 0x1440fffd, 0x00000000, 0x03e00008,
5386         0x00000000, 0x3c040800, 0x8c840000, 0x3c030800, 0x8c630a40, 0x0064102b,
5387         0x54400002, 0x00831023, 0x00641023, 0x2c420008, 0x03e00008, 0x38420001,
5388         0x27bdffe0, 0x00802821, 0x3c040800, 0x24840a00, 0x00003021, 0x00003821,
5389         0xafbf0018, 0xafa00010, 0x0e000060, 0xafa00014, 0x0a000216, 0x00000000,
5390         0x8fbf0018, 0x03e00008, 0x27bd0020, 0x00000000, 0x27bdffe0, 0x3c1cc000,
5391         0xafbf0018, 0x0e00004c, 0xaf80680c, 0x3c040800, 0x24840a10, 0x03802821,
5392         0x00003021, 0x00003821, 0xafa00010, 0x0e000060, 0xafa00014, 0x2402ffff,
5393         0xaf825404, 0x3c0200aa, 0x0e000234, 0xaf825434, 0x8fbf0018, 0x03e00008,
5394         0x27bd0020, 0x00000000, 0x00000000, 0x00000000, 0x27bdffe8, 0xafb00010,
5395         0x24100001, 0xafbf0014, 0x3c01c003, 0xac200000, 0x8f826810, 0x30422000,
5396         0x10400003, 0x00000000, 0x0e000246, 0x00000000, 0x0a00023a, 0xaf905428,
5397         0x8fbf0014, 0x8fb00010, 0x03e00008, 0x27bd0018, 0x27bdfff8, 0x8f845d0c,
5398         0x3c0200ff, 0x3c030800, 0x8c630a50, 0x3442fff8, 0x00821024, 0x1043001e,
5399         0x3c0500ff, 0x34a5fff8, 0x3c06c003, 0x3c074000, 0x00851824, 0x8c620010,
5400         0x3c010800, 0xac230a50, 0x30420008, 0x10400005, 0x00871025, 0x8cc20000,
5401         0x24420001, 0xacc20000, 0x00871025, 0xaf825d0c, 0x8fa20000, 0x24420001,
5402         0xafa20000, 0x8fa20000, 0x8fa20000, 0x24420001, 0xafa20000, 0x8fa20000,
5403         0x8f845d0c, 0x3c030800, 0x8c630a50, 0x00851024, 0x1443ffe8, 0x00851824,
5404         0x27bd0008, 0x03e00008, 0x00000000, 0x00000000, 0x00000000
5405 };
5406
5407 static const u32 tg3FwRodata[(TG3_FW_RODATA_LEN / sizeof(u32)) + 1] = {
5408         0x35373031, 0x726c7341, 0x00000000, 0x00000000, 0x53774576, 0x656e7430,
5409         0x00000000, 0x726c7045, 0x76656e74, 0x31000000, 0x556e6b6e, 0x45766e74,
5410         0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x66617461, 0x6c457272,
5411         0x00000000, 0x00000000, 0x4d61696e, 0x43707542, 0x00000000, 0x00000000,
5412         0x00000000
5413 };
5414
5415 #if 0 /* All zeros, don't eat up space with it. */
5416 u32 tg3FwData[(TG3_FW_DATA_LEN / sizeof(u32)) + 1] = {
5417         0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
5418         0x00000000, 0x00000000, 0x00000000, 0x00000000
5419 };
5420 #endif
5421
5422 #define RX_CPU_SCRATCH_BASE     0x30000
5423 #define RX_CPU_SCRATCH_SIZE     0x04000
5424 #define TX_CPU_SCRATCH_BASE     0x34000
5425 #define TX_CPU_SCRATCH_SIZE     0x04000
5426
5427 /* tp->lock is held. */
5428 static int tg3_halt_cpu(struct tg3 *tp, u32 offset)
5429 {
5430         int i;
5431
5432         BUG_ON(offset == TX_CPU_BASE &&
5433             (tp->tg3_flags2 & TG3_FLG2_5705_PLUS));
5434
5435         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
5436                 u32 val = tr32(GRC_VCPU_EXT_CTRL);
5437
5438                 tw32(GRC_VCPU_EXT_CTRL, val | GRC_VCPU_EXT_CTRL_HALT_CPU);
5439                 return 0;
5440         }
5441         if (offset == RX_CPU_BASE) {
5442                 for (i = 0; i < 10000; i++) {
5443                         tw32(offset + CPU_STATE, 0xffffffff);
5444                         tw32(offset + CPU_MODE,  CPU_MODE_HALT);
5445                         if (tr32(offset + CPU_MODE) & CPU_MODE_HALT)
5446                                 break;
5447                 }
5448
5449                 tw32(offset + CPU_STATE, 0xffffffff);
5450                 tw32_f(offset + CPU_MODE,  CPU_MODE_HALT);
5451                 udelay(10);
5452         } else {
5453                 for (i = 0; i < 10000; i++) {
5454                         tw32(offset + CPU_STATE, 0xffffffff);
5455                         tw32(offset + CPU_MODE,  CPU_MODE_HALT);
5456                         if (tr32(offset + CPU_MODE) & CPU_MODE_HALT)
5457                                 break;
5458                 }
5459         }
5460
5461         if (i >= 10000) {
5462                 printk(KERN_ERR PFX "tg3_reset_cpu timed out for %s, "
5463                        "and %s CPU\n",
5464                        tp->dev->name,
5465                        (offset == RX_CPU_BASE ? "RX" : "TX"));
5466                 return -ENODEV;
5467         }
5468
5469         /* Clear firmware's nvram arbitration. */
5470         if (tp->tg3_flags & TG3_FLAG_NVRAM)
5471                 tw32(NVRAM_SWARB, SWARB_REQ_CLR0);
5472         return 0;
5473 }
5474
5475 struct fw_info {
5476         unsigned int text_base;
5477         unsigned int text_len;
5478         const u32 *text_data;
5479         unsigned int rodata_base;
5480         unsigned int rodata_len;
5481         const u32 *rodata_data;
5482         unsigned int data_base;
5483         unsigned int data_len;
5484         const u32 *data_data;
5485 };
5486
5487 /* tp->lock is held. */
5488 static int tg3_load_firmware_cpu(struct tg3 *tp, u32 cpu_base, u32 cpu_scratch_base,
5489                                  int cpu_scratch_size, struct fw_info *info)
5490 {
5491         int err, lock_err, i;
5492         void (*write_op)(struct tg3 *, u32, u32);
5493
5494         if (cpu_base == TX_CPU_BASE &&
5495             (tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
5496                 printk(KERN_ERR PFX "tg3_load_firmware_cpu: Trying to load "
5497                        "TX cpu firmware on %s which is 5705.\n",
5498                        tp->dev->name);
5499                 return -EINVAL;
5500         }
5501
5502         if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS)
5503                 write_op = tg3_write_mem;
5504         else
5505                 write_op = tg3_write_indirect_reg32;
5506
5507         /* It is possible that bootcode is still loading at this point.
5508          * Get the nvram lock first before halting the cpu.
5509          */
5510         lock_err = tg3_nvram_lock(tp);
5511         err = tg3_halt_cpu(tp, cpu_base);
5512         if (!lock_err)
5513                 tg3_nvram_unlock(tp);
5514         if (err)
5515                 goto out;
5516
5517         for (i = 0; i < cpu_scratch_size; i += sizeof(u32))
5518                 write_op(tp, cpu_scratch_base + i, 0);
5519         tw32(cpu_base + CPU_STATE, 0xffffffff);
5520         tw32(cpu_base + CPU_MODE, tr32(cpu_base+CPU_MODE)|CPU_MODE_HALT);
5521         for (i = 0; i < (info->text_len / sizeof(u32)); i++)
5522                 write_op(tp, (cpu_scratch_base +
5523                               (info->text_base & 0xffff) +
5524                               (i * sizeof(u32))),
5525                          (info->text_data ?
5526                           info->text_data[i] : 0));
5527         for (i = 0; i < (info->rodata_len / sizeof(u32)); i++)
5528                 write_op(tp, (cpu_scratch_base +
5529                               (info->rodata_base & 0xffff) +
5530                               (i * sizeof(u32))),
5531                          (info->rodata_data ?
5532                           info->rodata_data[i] : 0));
5533         for (i = 0; i < (info->data_len / sizeof(u32)); i++)
5534                 write_op(tp, (cpu_scratch_base +
5535                               (info->data_base & 0xffff) +
5536                               (i * sizeof(u32))),
5537                          (info->data_data ?
5538                           info->data_data[i] : 0));
5539
5540         err = 0;
5541
5542 out:
5543         return err;
5544 }
5545
5546 /* tp->lock is held. */
5547 static int tg3_load_5701_a0_firmware_fix(struct tg3 *tp)
5548 {
5549         struct fw_info info;
5550         int err, i;
5551
5552         info.text_base = TG3_FW_TEXT_ADDR;
5553         info.text_len = TG3_FW_TEXT_LEN;
5554         info.text_data = &tg3FwText[0];
5555         info.rodata_base = TG3_FW_RODATA_ADDR;
5556         info.rodata_len = TG3_FW_RODATA_LEN;
5557         info.rodata_data = &tg3FwRodata[0];
5558         info.data_base = TG3_FW_DATA_ADDR;
5559         info.data_len = TG3_FW_DATA_LEN;
5560         info.data_data = NULL;
5561
5562         err = tg3_load_firmware_cpu(tp, RX_CPU_BASE,
5563                                     RX_CPU_SCRATCH_BASE, RX_CPU_SCRATCH_SIZE,
5564                                     &info);
5565         if (err)
5566                 return err;
5567
5568         err = tg3_load_firmware_cpu(tp, TX_CPU_BASE,
5569                                     TX_CPU_SCRATCH_BASE, TX_CPU_SCRATCH_SIZE,
5570                                     &info);
5571         if (err)
5572                 return err;
5573
5574         /* Now startup only the RX cpu. */
5575         tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
5576         tw32_f(RX_CPU_BASE + CPU_PC,    TG3_FW_TEXT_ADDR);
5577
5578         for (i = 0; i < 5; i++) {
5579                 if (tr32(RX_CPU_BASE + CPU_PC) == TG3_FW_TEXT_ADDR)
5580                         break;
5581                 tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
5582                 tw32(RX_CPU_BASE + CPU_MODE,  CPU_MODE_HALT);
5583                 tw32_f(RX_CPU_BASE + CPU_PC,    TG3_FW_TEXT_ADDR);
5584                 udelay(1000);
5585         }
5586         if (i >= 5) {
5587                 printk(KERN_ERR PFX "tg3_load_firmware fails for %s "
5588                        "to set RX CPU PC, is %08x should be %08x\n",
5589                        tp->dev->name, tr32(RX_CPU_BASE + CPU_PC),
5590                        TG3_FW_TEXT_ADDR);
5591                 return -ENODEV;
5592         }
5593         tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
5594         tw32_f(RX_CPU_BASE + CPU_MODE,  0x00000000);
5595
5596         return 0;
5597 }
5598
5599
5600 #define TG3_TSO_FW_RELEASE_MAJOR        0x1
5601 #define TG3_TSO_FW_RELASE_MINOR         0x6
5602 #define TG3_TSO_FW_RELEASE_FIX          0x0
5603 #define TG3_TSO_FW_START_ADDR           0x08000000
5604 #define TG3_TSO_FW_TEXT_ADDR            0x08000000
5605 #define TG3_TSO_FW_TEXT_LEN             0x1aa0
5606 #define TG3_TSO_FW_RODATA_ADDR          0x08001aa0
5607 #define TG3_TSO_FW_RODATA_LEN           0x60
5608 #define TG3_TSO_FW_DATA_ADDR            0x08001b20
5609 #define TG3_TSO_FW_DATA_LEN             0x30
5610 #define TG3_TSO_FW_SBSS_ADDR            0x08001b50
5611 #define TG3_TSO_FW_SBSS_LEN             0x2c
5612 #define TG3_TSO_FW_BSS_ADDR             0x08001b80
5613 #define TG3_TSO_FW_BSS_LEN              0x894
5614
5615 static const u32 tg3TsoFwText[(TG3_TSO_FW_TEXT_LEN / 4) + 1] = {
5616         0x0e000003, 0x00000000, 0x08001b24, 0x00000000, 0x10000003, 0x00000000,
5617         0x0000000d, 0x0000000d, 0x3c1d0800, 0x37bd4000, 0x03a0f021, 0x3c100800,
5618         0x26100000, 0x0e000010, 0x00000000, 0x0000000d, 0x27bdffe0, 0x3c04fefe,
5619         0xafbf0018, 0x0e0005d8, 0x34840002, 0x0e000668, 0x00000000, 0x3c030800,
5620         0x90631b68, 0x24020002, 0x3c040800, 0x24841aac, 0x14620003, 0x24050001,
5621         0x3c040800, 0x24841aa0, 0x24060006, 0x00003821, 0xafa00010, 0x0e00067c,
5622         0xafa00014, 0x8f625c50, 0x34420001, 0xaf625c50, 0x8f625c90, 0x34420001,
5623         0xaf625c90, 0x2402ffff, 0x0e000034, 0xaf625404, 0x8fbf0018, 0x03e00008,
5624         0x27bd0020, 0x00000000, 0x00000000, 0x00000000, 0x27bdffe0, 0xafbf001c,
5625         0xafb20018, 0xafb10014, 0x0e00005b, 0xafb00010, 0x24120002, 0x24110001,
5626         0x8f706820, 0x32020100, 0x10400003, 0x00000000, 0x0e0000bb, 0x00000000,
5627         0x8f706820, 0x32022000, 0x10400004, 0x32020001, 0x0e0001f0, 0x24040001,
5628         0x32020001, 0x10400003, 0x00000000, 0x0e0000a3, 0x00000000, 0x3c020800,
5629         0x90421b98, 0x14520003, 0x00000000, 0x0e0004c0, 0x00000000, 0x0a00003c,
5630         0xaf715028, 0x8fbf001c, 0x8fb20018, 0x8fb10014, 0x8fb00010, 0x03e00008,
5631         0x27bd0020, 0x27bdffe0, 0x3c040800, 0x24841ac0, 0x00002821, 0x00003021,
5632         0x00003821, 0xafbf0018, 0xafa00010, 0x0e00067c, 0xafa00014, 0x3c040800,
5633         0x248423d8, 0xa4800000, 0x3c010800, 0xa0201b98, 0x3c010800, 0xac201b9c,
5634         0x3c010800, 0xac201ba0, 0x3c010800, 0xac201ba4, 0x3c010800, 0xac201bac,
5635         0x3c010800, 0xac201bb8, 0x3c010800, 0xac201bbc, 0x8f624434, 0x3c010800,
5636         0xac221b88, 0x8f624438, 0x3c010800, 0xac221b8c, 0x8f624410, 0xac80f7a8,
5637         0x3c010800, 0xac201b84, 0x3c010800, 0xac2023e0, 0x3c010800, 0xac2023c8,
5638         0x3c010800, 0xac2023cc, 0x3c010800, 0xac202400, 0x3c010800, 0xac221b90,
5639         0x8f620068, 0x24030007, 0x00021702, 0x10430005, 0x00000000, 0x8f620068,
5640         0x00021702, 0x14400004, 0x24020001, 0x3c010800, 0x0a000097, 0xac20240c,
5641         0xac820034, 0x3c040800, 0x24841acc, 0x3c050800, 0x8ca5240c, 0x00003021,
5642         0x00003821, 0xafa00010, 0x0e00067c, 0xafa00014, 0x8fbf0018, 0x03e00008,
5643         0x27bd0020, 0x27bdffe0, 0x3c040800, 0x24841ad8, 0x00002821, 0x00003021,
5644         0x00003821, 0xafbf0018, 0xafa00010, 0x0e00067c, 0xafa00014, 0x0e00005b,
5645         0x00000000, 0x0e0000b4, 0x00002021, 0x8fbf0018, 0x03e00008, 0x27bd0020,
5646         0x24020001, 0x8f636820, 0x00821004, 0x00021027, 0x00621824, 0x03e00008,
5647         0xaf636820, 0x27bdffd0, 0xafbf002c, 0xafb60028, 0xafb50024, 0xafb40020,
5648         0xafb3001c, 0xafb20018, 0xafb10014, 0xafb00010, 0x8f675c5c, 0x3c030800,
5649         0x24631bbc, 0x8c620000, 0x14470005, 0x3c0200ff, 0x3c020800, 0x90421b98,
5650         0x14400119, 0x3c0200ff, 0x3442fff8, 0x00e28824, 0xac670000, 0x00111902,
5651         0x306300ff, 0x30e20003, 0x000211c0, 0x00622825, 0x00a04021, 0x00071602,
5652         0x3c030800, 0x90631b98, 0x3044000f, 0x14600036, 0x00804821, 0x24020001,
5653         0x3c010800, 0xa0221b98, 0x00051100, 0x00821025, 0x3c010800, 0xac201b9c,
5654         0x3c010800, 0xac201ba0, 0x3c010800, 0xac201ba4, 0x3c010800, 0xac201bac,
5655         0x3c010800, 0xac201bb8, 0x3c010800, 0xac201bb0, 0x3c010800, 0xac201bb4,
5656         0x3c010800, 0xa42223d8, 0x9622000c, 0x30437fff, 0x3c010800, 0xa4222410,
5657         0x30428000, 0x3c010800, 0xa4231bc6, 0x10400005, 0x24020001, 0x3c010800,
5658         0xac2223f4, 0x0a000102, 0x2406003e, 0x24060036, 0x3c010800, 0xac2023f4,
5659         0x9622000a, 0x3c030800, 0x94631bc6, 0x3c010800, 0xac2023f0, 0x3c010800,
5660         0xac2023f8, 0x00021302, 0x00021080, 0x00c21021, 0x00621821, 0x3c010800,
5661         0xa42223d0, 0x3c010800, 0x0a000115, 0xa4231b96, 0x9622000c, 0x3c010800,
5662         0xa42223ec, 0x3c040800, 0x24841b9c, 0x8c820000, 0x00021100, 0x3c010800,
5663         0x00220821, 0xac311bc8, 0x8c820000, 0x00021100, 0x3c010800, 0x00220821,
5664         0xac271bcc, 0x8c820000, 0x25030001, 0x306601ff, 0x00021100, 0x3c010800,
5665         0x00220821, 0xac261bd0, 0x8c820000, 0x00021100, 0x3c010800, 0x00220821,
5666         0xac291bd4, 0x96230008, 0x3c020800, 0x8c421bac, 0x00432821, 0x3c010800,
5667         0xac251bac, 0x9622000a, 0x30420004, 0x14400018, 0x00061100, 0x8f630c14,
5668         0x3063000f, 0x2c620002, 0x1440000b, 0x3c02c000, 0x8f630c14, 0x3c020800,
5669         0x8c421b40, 0x3063000f, 0x24420001, 0x3c010800, 0xac221b40, 0x2c620002,
5670         0x1040fff7, 0x3c02c000, 0x00e21825, 0xaf635c5c, 0x8f625c50, 0x30420002,
5671         0x10400014, 0x00000000, 0x0a000147, 0x00000000, 0x3c030800, 0x8c631b80,
5672         0x3c040800, 0x94841b94, 0x01221025, 0x3c010800, 0xa42223da, 0x24020001,
5673         0x3c010800, 0xac221bb8, 0x24630001, 0x0085202a, 0x3c010800, 0x10800003,
5674         0xac231b80, 0x3c010800, 0xa4251b94, 0x3c060800, 0x24c61b9c, 0x8cc20000,
5675         0x24420001, 0xacc20000, 0x28420080, 0x14400005, 0x00000000, 0x0e000656,
5676         0x24040002, 0x0a0001e6, 0x00000000, 0x3c020800, 0x8c421bb8, 0x10400078,
5677         0x24020001, 0x3c050800, 0x90a51b98, 0x14a20072, 0x00000000, 0x3c150800,
5678         0x96b51b96, 0x3c040800, 0x8c841bac, 0x32a3ffff, 0x0083102a, 0x1440006c,
5679         0x00000000, 0x14830003, 0x00000000, 0x3c010800, 0xac2523f0, 0x1060005c,
5680         0x00009021, 0x24d60004, 0x0060a021, 0x24d30014, 0x8ec20000, 0x00028100,
5681         0x3c110800, 0x02308821, 0x0e000625, 0x8e311bc8, 0x00402821, 0x10a00054,
5682         0x00000000, 0x9628000a, 0x31020040, 0x10400005, 0x2407180c, 0x8e22000c,
5683         0x2407188c, 0x00021400, 0xaca20018, 0x3c030800, 0x00701821, 0x8c631bd0,
5684         0x3c020800, 0x00501021, 0x8c421bd4, 0x00031d00, 0x00021400, 0x00621825,
5685         0xaca30014, 0x8ec30004, 0x96220008, 0x00432023, 0x3242ffff, 0x3083ffff,
5686         0x00431021, 0x0282102a, 0x14400002, 0x02b23023, 0x00803021, 0x8e620000,
5687         0x30c4ffff, 0x00441021, 0xae620000, 0x8e220000, 0xaca20000, 0x8e220004,
5688         0x8e63fff4, 0x00431021, 0xaca20004, 0xa4a6000e, 0x8e62fff4, 0x00441021,
5689         0xae62fff4, 0x96230008, 0x0043102a, 0x14400005, 0x02469021, 0x8e62fff0,
5690         0xae60fff4, 0x24420001, 0xae62fff0, 0xaca00008, 0x3242ffff, 0x14540008,
5691         0x24020305, 0x31020080, 0x54400001, 0x34e70010, 0x24020905, 0xa4a2000c,
5692         0x0a0001cb, 0x34e70020, 0xa4a2000c, 0x3c020800,