[TG3]: Add 5784 and 5764 support.
[linux-2.6.git] / drivers / net / tg3.c
1 /*
2  * tg3.c: Broadcom Tigon3 ethernet driver.
3  *
4  * Copyright (C) 2001, 2002, 2003, 2004 David S. Miller (davem@redhat.com)
5  * Copyright (C) 2001, 2002, 2003 Jeff Garzik (jgarzik@pobox.com)
6  * Copyright (C) 2004 Sun Microsystems Inc.
7  * Copyright (C) 2005-2007 Broadcom Corporation.
8  *
9  * Firmware is:
10  *      Derived from proprietary unpublished source code,
11  *      Copyright (C) 2000-2003 Broadcom Corporation.
12  *
13  *      Permission is hereby granted for the distribution of this firmware
14  *      data in hexadecimal or equivalent format, provided this copyright
15  *      notice is accompanying it.
16  */
17
18
19 #include <linux/module.h>
20 #include <linux/moduleparam.h>
21 #include <linux/kernel.h>
22 #include <linux/types.h>
23 #include <linux/compiler.h>
24 #include <linux/slab.h>
25 #include <linux/delay.h>
26 #include <linux/in.h>
27 #include <linux/init.h>
28 #include <linux/ioport.h>
29 #include <linux/pci.h>
30 #include <linux/netdevice.h>
31 #include <linux/etherdevice.h>
32 #include <linux/skbuff.h>
33 #include <linux/ethtool.h>
34 #include <linux/mii.h>
35 #include <linux/if_vlan.h>
36 #include <linux/ip.h>
37 #include <linux/tcp.h>
38 #include <linux/workqueue.h>
39 #include <linux/prefetch.h>
40 #include <linux/dma-mapping.h>
41
42 #include <net/checksum.h>
43 #include <net/ip.h>
44
45 #include <asm/system.h>
46 #include <asm/io.h>
47 #include <asm/byteorder.h>
48 #include <asm/uaccess.h>
49
50 #ifdef CONFIG_SPARC
51 #include <asm/idprom.h>
52 #include <asm/prom.h>
53 #endif
54
55 #if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE)
56 #define TG3_VLAN_TAG_USED 1
57 #else
58 #define TG3_VLAN_TAG_USED 0
59 #endif
60
61 #define TG3_TSO_SUPPORT 1
62
63 #include "tg3.h"
64
65 #define DRV_MODULE_NAME         "tg3"
66 #define PFX DRV_MODULE_NAME     ": "
67 #define DRV_MODULE_VERSION      "3.81"
68 #define DRV_MODULE_RELDATE      "September 5, 2007"
69
70 #define TG3_DEF_MAC_MODE        0
71 #define TG3_DEF_RX_MODE         0
72 #define TG3_DEF_TX_MODE         0
73 #define TG3_DEF_MSG_ENABLE        \
74         (NETIF_MSG_DRV          | \
75          NETIF_MSG_PROBE        | \
76          NETIF_MSG_LINK         | \
77          NETIF_MSG_TIMER        | \
78          NETIF_MSG_IFDOWN       | \
79          NETIF_MSG_IFUP         | \
80          NETIF_MSG_RX_ERR       | \
81          NETIF_MSG_TX_ERR)
82
83 /* length of time before we decide the hardware is borked,
84  * and dev->tx_timeout() should be called to fix the problem
85  */
86 #define TG3_TX_TIMEOUT                  (5 * HZ)
87
88 /* hardware minimum and maximum for a single frame's data payload */
89 #define TG3_MIN_MTU                     60
90 #define TG3_MAX_MTU(tp) \
91         ((tp->tg3_flags2 & TG3_FLG2_JUMBO_CAPABLE) ? 9000 : 1500)
92
93 /* These numbers seem to be hard coded in the NIC firmware somehow.
94  * You can't change the ring sizes, but you can change where you place
95  * them in the NIC onboard memory.
96  */
97 #define TG3_RX_RING_SIZE                512
98 #define TG3_DEF_RX_RING_PENDING         200
99 #define TG3_RX_JUMBO_RING_SIZE          256
100 #define TG3_DEF_RX_JUMBO_RING_PENDING   100
101
102 /* Do not place this n-ring entries value into the tp struct itself,
103  * we really want to expose these constants to GCC so that modulo et
104  * al.  operations are done with shifts and masks instead of with
105  * hw multiply/modulo instructions.  Another solution would be to
106  * replace things like '% foo' with '& (foo - 1)'.
107  */
108 #define TG3_RX_RCB_RING_SIZE(tp)        \
109         ((tp->tg3_flags2 & TG3_FLG2_5705_PLUS) ?  512 : 1024)
110
111 #define TG3_TX_RING_SIZE                512
112 #define TG3_DEF_TX_RING_PENDING         (TG3_TX_RING_SIZE - 1)
113
114 #define TG3_RX_RING_BYTES       (sizeof(struct tg3_rx_buffer_desc) * \
115                                  TG3_RX_RING_SIZE)
116 #define TG3_RX_JUMBO_RING_BYTES (sizeof(struct tg3_rx_buffer_desc) * \
117                                  TG3_RX_JUMBO_RING_SIZE)
118 #define TG3_RX_RCB_RING_BYTES(tp) (sizeof(struct tg3_rx_buffer_desc) * \
119                                    TG3_RX_RCB_RING_SIZE(tp))
120 #define TG3_TX_RING_BYTES       (sizeof(struct tg3_tx_buffer_desc) * \
121                                  TG3_TX_RING_SIZE)
122 #define NEXT_TX(N)              (((N) + 1) & (TG3_TX_RING_SIZE - 1))
123
124 #define RX_PKT_BUF_SZ           (1536 + tp->rx_offset + 64)
125 #define RX_JUMBO_PKT_BUF_SZ     (9046 + tp->rx_offset + 64)
126
127 /* minimum number of free TX descriptors required to wake up TX process */
128 #define TG3_TX_WAKEUP_THRESH(tp)                ((tp)->tx_pending / 4)
129
130 /* number of ETHTOOL_GSTATS u64's */
131 #define TG3_NUM_STATS           (sizeof(struct tg3_ethtool_stats)/sizeof(u64))
132
133 #define TG3_NUM_TEST            6
134
135 static char version[] __devinitdata =
136         DRV_MODULE_NAME ".c:v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
137
138 MODULE_AUTHOR("David S. Miller (davem@redhat.com) and Jeff Garzik (jgarzik@pobox.com)");
139 MODULE_DESCRIPTION("Broadcom Tigon3 ethernet driver");
140 MODULE_LICENSE("GPL");
141 MODULE_VERSION(DRV_MODULE_VERSION);
142
143 static int tg3_debug = -1;      /* -1 == use TG3_DEF_MSG_ENABLE as value */
144 module_param(tg3_debug, int, 0);
145 MODULE_PARM_DESC(tg3_debug, "Tigon3 bitmapped debugging message enable value");
146
147 static struct pci_device_id tg3_pci_tbl[] = {
148         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5700)},
149         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5701)},
150         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702)},
151         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703)},
152         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704)},
153         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702FE)},
154         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705)},
155         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705_2)},
156         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705M)},
157         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705M_2)},
158         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702X)},
159         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703X)},
160         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704S)},
161         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702A3)},
162         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703A3)},
163         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5782)},
164         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5788)},
165         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5789)},
166         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5901)},
167         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5901_2)},
168         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704S_2)},
169         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705F)},
170         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5720)},
171         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5721)},
172         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5722)},
173         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5750)},
174         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751)},
175         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5750M)},
176         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751M)},
177         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751F)},
178         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5752)},
179         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5752M)},
180         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753)},
181         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753M)},
182         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753F)},
183         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5754)},
184         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5754M)},
185         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5755)},
186         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5755M)},
187         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5756)},
188         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5786)},
189         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787)},
190         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787M)},
191         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787F)},
192         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5714)},
193         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5714S)},
194         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5715)},
195         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5715S)},
196         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5780)},
197         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5780S)},
198         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5781)},
199         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5906)},
200         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5906M)},
201         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5784)},
202         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5764)},
203         {PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9DXX)},
204         {PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9MXX)},
205         {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1000)},
206         {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1001)},
207         {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1003)},
208         {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC9100)},
209         {PCI_DEVICE(PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_TIGON3)},
210         {}
211 };
212
213 MODULE_DEVICE_TABLE(pci, tg3_pci_tbl);
214
215 static const struct {
216         const char string[ETH_GSTRING_LEN];
217 } ethtool_stats_keys[TG3_NUM_STATS] = {
218         { "rx_octets" },
219         { "rx_fragments" },
220         { "rx_ucast_packets" },
221         { "rx_mcast_packets" },
222         { "rx_bcast_packets" },
223         { "rx_fcs_errors" },
224         { "rx_align_errors" },
225         { "rx_xon_pause_rcvd" },
226         { "rx_xoff_pause_rcvd" },
227         { "rx_mac_ctrl_rcvd" },
228         { "rx_xoff_entered" },
229         { "rx_frame_too_long_errors" },
230         { "rx_jabbers" },
231         { "rx_undersize_packets" },
232         { "rx_in_length_errors" },
233         { "rx_out_length_errors" },
234         { "rx_64_or_less_octet_packets" },
235         { "rx_65_to_127_octet_packets" },
236         { "rx_128_to_255_octet_packets" },
237         { "rx_256_to_511_octet_packets" },
238         { "rx_512_to_1023_octet_packets" },
239         { "rx_1024_to_1522_octet_packets" },
240         { "rx_1523_to_2047_octet_packets" },
241         { "rx_2048_to_4095_octet_packets" },
242         { "rx_4096_to_8191_octet_packets" },
243         { "rx_8192_to_9022_octet_packets" },
244
245         { "tx_octets" },
246         { "tx_collisions" },
247
248         { "tx_xon_sent" },
249         { "tx_xoff_sent" },
250         { "tx_flow_control" },
251         { "tx_mac_errors" },
252         { "tx_single_collisions" },
253         { "tx_mult_collisions" },
254         { "tx_deferred" },
255         { "tx_excessive_collisions" },
256         { "tx_late_collisions" },
257         { "tx_collide_2times" },
258         { "tx_collide_3times" },
259         { "tx_collide_4times" },
260         { "tx_collide_5times" },
261         { "tx_collide_6times" },
262         { "tx_collide_7times" },
263         { "tx_collide_8times" },
264         { "tx_collide_9times" },
265         { "tx_collide_10times" },
266         { "tx_collide_11times" },
267         { "tx_collide_12times" },
268         { "tx_collide_13times" },
269         { "tx_collide_14times" },
270         { "tx_collide_15times" },
271         { "tx_ucast_packets" },
272         { "tx_mcast_packets" },
273         { "tx_bcast_packets" },
274         { "tx_carrier_sense_errors" },
275         { "tx_discards" },
276         { "tx_errors" },
277
278         { "dma_writeq_full" },
279         { "dma_write_prioq_full" },
280         { "rxbds_empty" },
281         { "rx_discards" },
282         { "rx_errors" },
283         { "rx_threshold_hit" },
284
285         { "dma_readq_full" },
286         { "dma_read_prioq_full" },
287         { "tx_comp_queue_full" },
288
289         { "ring_set_send_prod_index" },
290         { "ring_status_update" },
291         { "nic_irqs" },
292         { "nic_avoided_irqs" },
293         { "nic_tx_threshold_hit" }
294 };
295
296 static const struct {
297         const char string[ETH_GSTRING_LEN];
298 } ethtool_test_keys[TG3_NUM_TEST] = {
299         { "nvram test     (online) " },
300         { "link test      (online) " },
301         { "register test  (offline)" },
302         { "memory test    (offline)" },
303         { "loopback test  (offline)" },
304         { "interrupt test (offline)" },
305 };
306
307 static void tg3_write32(struct tg3 *tp, u32 off, u32 val)
308 {
309         writel(val, tp->regs + off);
310 }
311
312 static u32 tg3_read32(struct tg3 *tp, u32 off)
313 {
314         return (readl(tp->regs + off));
315 }
316
317 static void tg3_write_indirect_reg32(struct tg3 *tp, u32 off, u32 val)
318 {
319         unsigned long flags;
320
321         spin_lock_irqsave(&tp->indirect_lock, flags);
322         pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off);
323         pci_write_config_dword(tp->pdev, TG3PCI_REG_DATA, val);
324         spin_unlock_irqrestore(&tp->indirect_lock, flags);
325 }
326
327 static void tg3_write_flush_reg32(struct tg3 *tp, u32 off, u32 val)
328 {
329         writel(val, tp->regs + off);
330         readl(tp->regs + off);
331 }
332
333 static u32 tg3_read_indirect_reg32(struct tg3 *tp, u32 off)
334 {
335         unsigned long flags;
336         u32 val;
337
338         spin_lock_irqsave(&tp->indirect_lock, flags);
339         pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off);
340         pci_read_config_dword(tp->pdev, TG3PCI_REG_DATA, &val);
341         spin_unlock_irqrestore(&tp->indirect_lock, flags);
342         return val;
343 }
344
345 static void tg3_write_indirect_mbox(struct tg3 *tp, u32 off, u32 val)
346 {
347         unsigned long flags;
348
349         if (off == (MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW)) {
350                 pci_write_config_dword(tp->pdev, TG3PCI_RCV_RET_RING_CON_IDX +
351                                        TG3_64BIT_REG_LOW, val);
352                 return;
353         }
354         if (off == (MAILBOX_RCV_STD_PROD_IDX + TG3_64BIT_REG_LOW)) {
355                 pci_write_config_dword(tp->pdev, TG3PCI_STD_RING_PROD_IDX +
356                                        TG3_64BIT_REG_LOW, val);
357                 return;
358         }
359
360         spin_lock_irqsave(&tp->indirect_lock, flags);
361         pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off + 0x5600);
362         pci_write_config_dword(tp->pdev, TG3PCI_REG_DATA, val);
363         spin_unlock_irqrestore(&tp->indirect_lock, flags);
364
365         /* In indirect mode when disabling interrupts, we also need
366          * to clear the interrupt bit in the GRC local ctrl register.
367          */
368         if ((off == (MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW)) &&
369             (val == 0x1)) {
370                 pci_write_config_dword(tp->pdev, TG3PCI_MISC_LOCAL_CTRL,
371                                        tp->grc_local_ctrl|GRC_LCLCTRL_CLEARINT);
372         }
373 }
374
375 static u32 tg3_read_indirect_mbox(struct tg3 *tp, u32 off)
376 {
377         unsigned long flags;
378         u32 val;
379
380         spin_lock_irqsave(&tp->indirect_lock, flags);
381         pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off + 0x5600);
382         pci_read_config_dword(tp->pdev, TG3PCI_REG_DATA, &val);
383         spin_unlock_irqrestore(&tp->indirect_lock, flags);
384         return val;
385 }
386
387 /* usec_wait specifies the wait time in usec when writing to certain registers
388  * where it is unsafe to read back the register without some delay.
389  * GRC_LOCAL_CTRL is one example if the GPIOs are toggled to switch power.
390  * TG3PCI_CLOCK_CTRL is another example if the clock frequencies are changed.
391  */
392 static void _tw32_flush(struct tg3 *tp, u32 off, u32 val, u32 usec_wait)
393 {
394         if ((tp->tg3_flags & TG3_FLAG_PCIX_TARGET_HWBUG) ||
395             (tp->tg3_flags2 & TG3_FLG2_ICH_WORKAROUND))
396                 /* Non-posted methods */
397                 tp->write32(tp, off, val);
398         else {
399                 /* Posted method */
400                 tg3_write32(tp, off, val);
401                 if (usec_wait)
402                         udelay(usec_wait);
403                 tp->read32(tp, off);
404         }
405         /* Wait again after the read for the posted method to guarantee that
406          * the wait time is met.
407          */
408         if (usec_wait)
409                 udelay(usec_wait);
410 }
411
412 static inline void tw32_mailbox_flush(struct tg3 *tp, u32 off, u32 val)
413 {
414         tp->write32_mbox(tp, off, val);
415         if (!(tp->tg3_flags & TG3_FLAG_MBOX_WRITE_REORDER) &&
416             !(tp->tg3_flags2 & TG3_FLG2_ICH_WORKAROUND))
417                 tp->read32_mbox(tp, off);
418 }
419
420 static void tg3_write32_tx_mbox(struct tg3 *tp, u32 off, u32 val)
421 {
422         void __iomem *mbox = tp->regs + off;
423         writel(val, mbox);
424         if (tp->tg3_flags & TG3_FLAG_TXD_MBOX_HWBUG)
425                 writel(val, mbox);
426         if (tp->tg3_flags & TG3_FLAG_MBOX_WRITE_REORDER)
427                 readl(mbox);
428 }
429
430 static u32 tg3_read32_mbox_5906(struct tg3 *tp, u32 off)
431 {
432         return (readl(tp->regs + off + GRCMBOX_BASE));
433 }
434
435 static void tg3_write32_mbox_5906(struct tg3 *tp, u32 off, u32 val)
436 {
437         writel(val, tp->regs + off + GRCMBOX_BASE);
438 }
439
440 #define tw32_mailbox(reg, val)  tp->write32_mbox(tp, reg, val)
441 #define tw32_mailbox_f(reg, val)        tw32_mailbox_flush(tp, (reg), (val))
442 #define tw32_rx_mbox(reg, val)  tp->write32_rx_mbox(tp, reg, val)
443 #define tw32_tx_mbox(reg, val)  tp->write32_tx_mbox(tp, reg, val)
444 #define tr32_mailbox(reg)       tp->read32_mbox(tp, reg)
445
446 #define tw32(reg,val)           tp->write32(tp, reg, val)
447 #define tw32_f(reg,val)         _tw32_flush(tp,(reg),(val), 0)
448 #define tw32_wait_f(reg,val,us) _tw32_flush(tp,(reg),(val), (us))
449 #define tr32(reg)               tp->read32(tp, reg)
450
451 static void tg3_write_mem(struct tg3 *tp, u32 off, u32 val)
452 {
453         unsigned long flags;
454
455         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) &&
456             (off >= NIC_SRAM_STATS_BLK) && (off < NIC_SRAM_TX_BUFFER_DESC))
457                 return;
458
459         spin_lock_irqsave(&tp->indirect_lock, flags);
460         if (tp->tg3_flags & TG3_FLAG_SRAM_USE_CONFIG) {
461                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off);
462                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
463
464                 /* Always leave this as zero. */
465                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
466         } else {
467                 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, off);
468                 tw32_f(TG3PCI_MEM_WIN_DATA, val);
469
470                 /* Always leave this as zero. */
471                 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, 0);
472         }
473         spin_unlock_irqrestore(&tp->indirect_lock, flags);
474 }
475
476 static void tg3_read_mem(struct tg3 *tp, u32 off, u32 *val)
477 {
478         unsigned long flags;
479
480         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) &&
481             (off >= NIC_SRAM_STATS_BLK) && (off < NIC_SRAM_TX_BUFFER_DESC)) {
482                 *val = 0;
483                 return;
484         }
485
486         spin_lock_irqsave(&tp->indirect_lock, flags);
487         if (tp->tg3_flags & TG3_FLAG_SRAM_USE_CONFIG) {
488                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off);
489                 pci_read_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
490
491                 /* Always leave this as zero. */
492                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
493         } else {
494                 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, off);
495                 *val = tr32(TG3PCI_MEM_WIN_DATA);
496
497                 /* Always leave this as zero. */
498                 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, 0);
499         }
500         spin_unlock_irqrestore(&tp->indirect_lock, flags);
501 }
502
503 static void tg3_disable_ints(struct tg3 *tp)
504 {
505         tw32(TG3PCI_MISC_HOST_CTRL,
506              (tp->misc_host_ctrl | MISC_HOST_CTRL_MASK_PCI_INT));
507         tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
508 }
509
510 static inline void tg3_cond_int(struct tg3 *tp)
511 {
512         if (!(tp->tg3_flags & TG3_FLAG_TAGGED_STATUS) &&
513             (tp->hw_status->status & SD_STATUS_UPDATED))
514                 tw32(GRC_LOCAL_CTRL, tp->grc_local_ctrl | GRC_LCLCTRL_SETINT);
515         else
516                 tw32(HOSTCC_MODE, tp->coalesce_mode |
517                      (HOSTCC_MODE_ENABLE | HOSTCC_MODE_NOW));
518 }
519
520 static void tg3_enable_ints(struct tg3 *tp)
521 {
522         tp->irq_sync = 0;
523         wmb();
524
525         tw32(TG3PCI_MISC_HOST_CTRL,
526              (tp->misc_host_ctrl & ~MISC_HOST_CTRL_MASK_PCI_INT));
527         tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
528                        (tp->last_tag << 24));
529         if (tp->tg3_flags2 & TG3_FLG2_1SHOT_MSI)
530                 tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
531                                (tp->last_tag << 24));
532         tg3_cond_int(tp);
533 }
534
535 static inline unsigned int tg3_has_work(struct tg3 *tp)
536 {
537         struct tg3_hw_status *sblk = tp->hw_status;
538         unsigned int work_exists = 0;
539
540         /* check for phy events */
541         if (!(tp->tg3_flags &
542               (TG3_FLAG_USE_LINKCHG_REG |
543                TG3_FLAG_POLL_SERDES))) {
544                 if (sblk->status & SD_STATUS_LINK_CHG)
545                         work_exists = 1;
546         }
547         /* check for RX/TX work to do */
548         if (sblk->idx[0].tx_consumer != tp->tx_cons ||
549             sblk->idx[0].rx_producer != tp->rx_rcb_ptr)
550                 work_exists = 1;
551
552         return work_exists;
553 }
554
555 /* tg3_restart_ints
556  *  similar to tg3_enable_ints, but it accurately determines whether there
557  *  is new work pending and can return without flushing the PIO write
558  *  which reenables interrupts
559  */
560 static void tg3_restart_ints(struct tg3 *tp)
561 {
562         tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
563                      tp->last_tag << 24);
564         mmiowb();
565
566         /* When doing tagged status, this work check is unnecessary.
567          * The last_tag we write above tells the chip which piece of
568          * work we've completed.
569          */
570         if (!(tp->tg3_flags & TG3_FLAG_TAGGED_STATUS) &&
571             tg3_has_work(tp))
572                 tw32(HOSTCC_MODE, tp->coalesce_mode |
573                      (HOSTCC_MODE_ENABLE | HOSTCC_MODE_NOW));
574 }
575
576 static inline void tg3_netif_stop(struct tg3 *tp)
577 {
578         tp->dev->trans_start = jiffies; /* prevent tx timeout */
579         napi_disable(&tp->napi);
580         netif_tx_disable(tp->dev);
581 }
582
583 static inline void tg3_netif_start(struct tg3 *tp)
584 {
585         netif_wake_queue(tp->dev);
586         /* NOTE: unconditional netif_wake_queue is only appropriate
587          * so long as all callers are assured to have free tx slots
588          * (such as after tg3_init_hw)
589          */
590         napi_enable(&tp->napi);
591         tp->hw_status->status |= SD_STATUS_UPDATED;
592         tg3_enable_ints(tp);
593 }
594
595 static void tg3_switch_clocks(struct tg3 *tp)
596 {
597         u32 clock_ctrl = tr32(TG3PCI_CLOCK_CTRL);
598         u32 orig_clock_ctrl;
599
600         if ((tp->tg3_flags & TG3_FLAG_CPMU_PRESENT) ||
601             (tp->tg3_flags2 & TG3_FLG2_5780_CLASS))
602                 return;
603
604         orig_clock_ctrl = clock_ctrl;
605         clock_ctrl &= (CLOCK_CTRL_FORCE_CLKRUN |
606                        CLOCK_CTRL_CLKRUN_OENABLE |
607                        0x1f);
608         tp->pci_clock_ctrl = clock_ctrl;
609
610         if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
611                 if (orig_clock_ctrl & CLOCK_CTRL_625_CORE) {
612                         tw32_wait_f(TG3PCI_CLOCK_CTRL,
613                                     clock_ctrl | CLOCK_CTRL_625_CORE, 40);
614                 }
615         } else if ((orig_clock_ctrl & CLOCK_CTRL_44MHZ_CORE) != 0) {
616                 tw32_wait_f(TG3PCI_CLOCK_CTRL,
617                             clock_ctrl |
618                             (CLOCK_CTRL_44MHZ_CORE | CLOCK_CTRL_ALTCLK),
619                             40);
620                 tw32_wait_f(TG3PCI_CLOCK_CTRL,
621                             clock_ctrl | (CLOCK_CTRL_ALTCLK),
622                             40);
623         }
624         tw32_wait_f(TG3PCI_CLOCK_CTRL, clock_ctrl, 40);
625 }
626
627 #define PHY_BUSY_LOOPS  5000
628
629 static int tg3_readphy(struct tg3 *tp, int reg, u32 *val)
630 {
631         u32 frame_val;
632         unsigned int loops;
633         int ret;
634
635         if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
636                 tw32_f(MAC_MI_MODE,
637                      (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
638                 udelay(80);
639         }
640
641         *val = 0x0;
642
643         frame_val  = ((PHY_ADDR << MI_COM_PHY_ADDR_SHIFT) &
644                       MI_COM_PHY_ADDR_MASK);
645         frame_val |= ((reg << MI_COM_REG_ADDR_SHIFT) &
646                       MI_COM_REG_ADDR_MASK);
647         frame_val |= (MI_COM_CMD_READ | MI_COM_START);
648
649         tw32_f(MAC_MI_COM, frame_val);
650
651         loops = PHY_BUSY_LOOPS;
652         while (loops != 0) {
653                 udelay(10);
654                 frame_val = tr32(MAC_MI_COM);
655
656                 if ((frame_val & MI_COM_BUSY) == 0) {
657                         udelay(5);
658                         frame_val = tr32(MAC_MI_COM);
659                         break;
660                 }
661                 loops -= 1;
662         }
663
664         ret = -EBUSY;
665         if (loops != 0) {
666                 *val = frame_val & MI_COM_DATA_MASK;
667                 ret = 0;
668         }
669
670         if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
671                 tw32_f(MAC_MI_MODE, tp->mi_mode);
672                 udelay(80);
673         }
674
675         return ret;
676 }
677
678 static int tg3_writephy(struct tg3 *tp, int reg, u32 val)
679 {
680         u32 frame_val;
681         unsigned int loops;
682         int ret;
683
684         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906 &&
685             (reg == MII_TG3_CTRL || reg == MII_TG3_AUX_CTRL))
686                 return 0;
687
688         if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
689                 tw32_f(MAC_MI_MODE,
690                      (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
691                 udelay(80);
692         }
693
694         frame_val  = ((PHY_ADDR << MI_COM_PHY_ADDR_SHIFT) &
695                       MI_COM_PHY_ADDR_MASK);
696         frame_val |= ((reg << MI_COM_REG_ADDR_SHIFT) &
697                       MI_COM_REG_ADDR_MASK);
698         frame_val |= (val & MI_COM_DATA_MASK);
699         frame_val |= (MI_COM_CMD_WRITE | MI_COM_START);
700
701         tw32_f(MAC_MI_COM, frame_val);
702
703         loops = PHY_BUSY_LOOPS;
704         while (loops != 0) {
705                 udelay(10);
706                 frame_val = tr32(MAC_MI_COM);
707                 if ((frame_val & MI_COM_BUSY) == 0) {
708                         udelay(5);
709                         frame_val = tr32(MAC_MI_COM);
710                         break;
711                 }
712                 loops -= 1;
713         }
714
715         ret = -EBUSY;
716         if (loops != 0)
717                 ret = 0;
718
719         if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
720                 tw32_f(MAC_MI_MODE, tp->mi_mode);
721                 udelay(80);
722         }
723
724         return ret;
725 }
726
727 static void tg3_phy_toggle_automdix(struct tg3 *tp, int enable)
728 {
729         u32 phy;
730
731         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS) ||
732             (tp->tg3_flags2 & TG3_FLG2_ANY_SERDES))
733                 return;
734
735         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
736                 u32 ephy;
737
738                 if (!tg3_readphy(tp, MII_TG3_EPHY_TEST, &ephy)) {
739                         tg3_writephy(tp, MII_TG3_EPHY_TEST,
740                                      ephy | MII_TG3_EPHY_SHADOW_EN);
741                         if (!tg3_readphy(tp, MII_TG3_EPHYTST_MISCCTRL, &phy)) {
742                                 if (enable)
743                                         phy |= MII_TG3_EPHYTST_MISCCTRL_MDIX;
744                                 else
745                                         phy &= ~MII_TG3_EPHYTST_MISCCTRL_MDIX;
746                                 tg3_writephy(tp, MII_TG3_EPHYTST_MISCCTRL, phy);
747                         }
748                         tg3_writephy(tp, MII_TG3_EPHY_TEST, ephy);
749                 }
750         } else {
751                 phy = MII_TG3_AUXCTL_MISC_RDSEL_MISC |
752                       MII_TG3_AUXCTL_SHDWSEL_MISC;
753                 if (!tg3_writephy(tp, MII_TG3_AUX_CTRL, phy) &&
754                     !tg3_readphy(tp, MII_TG3_AUX_CTRL, &phy)) {
755                         if (enable)
756                                 phy |= MII_TG3_AUXCTL_MISC_FORCE_AMDIX;
757                         else
758                                 phy &= ~MII_TG3_AUXCTL_MISC_FORCE_AMDIX;
759                         phy |= MII_TG3_AUXCTL_MISC_WREN;
760                         tg3_writephy(tp, MII_TG3_AUX_CTRL, phy);
761                 }
762         }
763 }
764
765 static void tg3_phy_set_wirespeed(struct tg3 *tp)
766 {
767         u32 val;
768
769         if (tp->tg3_flags2 & TG3_FLG2_NO_ETH_WIRE_SPEED)
770                 return;
771
772         if (!tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x7007) &&
773             !tg3_readphy(tp, MII_TG3_AUX_CTRL, &val))
774                 tg3_writephy(tp, MII_TG3_AUX_CTRL,
775                              (val | (1 << 15) | (1 << 4)));
776 }
777
778 static int tg3_bmcr_reset(struct tg3 *tp)
779 {
780         u32 phy_control;
781         int limit, err;
782
783         /* OK, reset it, and poll the BMCR_RESET bit until it
784          * clears or we time out.
785          */
786         phy_control = BMCR_RESET;
787         err = tg3_writephy(tp, MII_BMCR, phy_control);
788         if (err != 0)
789                 return -EBUSY;
790
791         limit = 5000;
792         while (limit--) {
793                 err = tg3_readphy(tp, MII_BMCR, &phy_control);
794                 if (err != 0)
795                         return -EBUSY;
796
797                 if ((phy_control & BMCR_RESET) == 0) {
798                         udelay(40);
799                         break;
800                 }
801                 udelay(10);
802         }
803         if (limit <= 0)
804                 return -EBUSY;
805
806         return 0;
807 }
808
809 static int tg3_wait_macro_done(struct tg3 *tp)
810 {
811         int limit = 100;
812
813         while (limit--) {
814                 u32 tmp32;
815
816                 if (!tg3_readphy(tp, 0x16, &tmp32)) {
817                         if ((tmp32 & 0x1000) == 0)
818                                 break;
819                 }
820         }
821         if (limit <= 0)
822                 return -EBUSY;
823
824         return 0;
825 }
826
827 static int tg3_phy_write_and_check_testpat(struct tg3 *tp, int *resetp)
828 {
829         static const u32 test_pat[4][6] = {
830         { 0x00005555, 0x00000005, 0x00002aaa, 0x0000000a, 0x00003456, 0x00000003 },
831         { 0x00002aaa, 0x0000000a, 0x00003333, 0x00000003, 0x0000789a, 0x00000005 },
832         { 0x00005a5a, 0x00000005, 0x00002a6a, 0x0000000a, 0x00001bcd, 0x00000003 },
833         { 0x00002a5a, 0x0000000a, 0x000033c3, 0x00000003, 0x00002ef1, 0x00000005 }
834         };
835         int chan;
836
837         for (chan = 0; chan < 4; chan++) {
838                 int i;
839
840                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
841                              (chan * 0x2000) | 0x0200);
842                 tg3_writephy(tp, 0x16, 0x0002);
843
844                 for (i = 0; i < 6; i++)
845                         tg3_writephy(tp, MII_TG3_DSP_RW_PORT,
846                                      test_pat[chan][i]);
847
848                 tg3_writephy(tp, 0x16, 0x0202);
849                 if (tg3_wait_macro_done(tp)) {
850                         *resetp = 1;
851                         return -EBUSY;
852                 }
853
854                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
855                              (chan * 0x2000) | 0x0200);
856                 tg3_writephy(tp, 0x16, 0x0082);
857                 if (tg3_wait_macro_done(tp)) {
858                         *resetp = 1;
859                         return -EBUSY;
860                 }
861
862                 tg3_writephy(tp, 0x16, 0x0802);
863                 if (tg3_wait_macro_done(tp)) {
864                         *resetp = 1;
865                         return -EBUSY;
866                 }
867
868                 for (i = 0; i < 6; i += 2) {
869                         u32 low, high;
870
871                         if (tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &low) ||
872                             tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &high) ||
873                             tg3_wait_macro_done(tp)) {
874                                 *resetp = 1;
875                                 return -EBUSY;
876                         }
877                         low &= 0x7fff;
878                         high &= 0x000f;
879                         if (low != test_pat[chan][i] ||
880                             high != test_pat[chan][i+1]) {
881                                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000b);
882                                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x4001);
883                                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x4005);
884
885                                 return -EBUSY;
886                         }
887                 }
888         }
889
890         return 0;
891 }
892
893 static int tg3_phy_reset_chanpat(struct tg3 *tp)
894 {
895         int chan;
896
897         for (chan = 0; chan < 4; chan++) {
898                 int i;
899
900                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
901                              (chan * 0x2000) | 0x0200);
902                 tg3_writephy(tp, 0x16, 0x0002);
903                 for (i = 0; i < 6; i++)
904                         tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x000);
905                 tg3_writephy(tp, 0x16, 0x0202);
906                 if (tg3_wait_macro_done(tp))
907                         return -EBUSY;
908         }
909
910         return 0;
911 }
912
913 static int tg3_phy_reset_5703_4_5(struct tg3 *tp)
914 {
915         u32 reg32, phy9_orig;
916         int retries, do_phy_reset, err;
917
918         retries = 10;
919         do_phy_reset = 1;
920         do {
921                 if (do_phy_reset) {
922                         err = tg3_bmcr_reset(tp);
923                         if (err)
924                                 return err;
925                         do_phy_reset = 0;
926                 }
927
928                 /* Disable transmitter and interrupt.  */
929                 if (tg3_readphy(tp, MII_TG3_EXT_CTRL, &reg32))
930                         continue;
931
932                 reg32 |= 0x3000;
933                 tg3_writephy(tp, MII_TG3_EXT_CTRL, reg32);
934
935                 /* Set full-duplex, 1000 mbps.  */
936                 tg3_writephy(tp, MII_BMCR,
937                              BMCR_FULLDPLX | TG3_BMCR_SPEED1000);
938
939                 /* Set to master mode.  */
940                 if (tg3_readphy(tp, MII_TG3_CTRL, &phy9_orig))
941                         continue;
942
943                 tg3_writephy(tp, MII_TG3_CTRL,
944                              (MII_TG3_CTRL_AS_MASTER |
945                               MII_TG3_CTRL_ENABLE_AS_MASTER));
946
947                 /* Enable SM_DSP_CLOCK and 6dB.  */
948                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0c00);
949
950                 /* Block the PHY control access.  */
951                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8005);
952                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0800);
953
954                 err = tg3_phy_write_and_check_testpat(tp, &do_phy_reset);
955                 if (!err)
956                         break;
957         } while (--retries);
958
959         err = tg3_phy_reset_chanpat(tp);
960         if (err)
961                 return err;
962
963         tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8005);
964         tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0000);
965
966         tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8200);
967         tg3_writephy(tp, 0x16, 0x0000);
968
969         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
970             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
971                 /* Set Extended packet length bit for jumbo frames */
972                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x4400);
973         }
974         else {
975                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0400);
976         }
977
978         tg3_writephy(tp, MII_TG3_CTRL, phy9_orig);
979
980         if (!tg3_readphy(tp, MII_TG3_EXT_CTRL, &reg32)) {
981                 reg32 &= ~0x3000;
982                 tg3_writephy(tp, MII_TG3_EXT_CTRL, reg32);
983         } else if (!err)
984                 err = -EBUSY;
985
986         return err;
987 }
988
989 static void tg3_link_report(struct tg3 *);
990
991 /* This will reset the tigon3 PHY if there is no valid
992  * link unless the FORCE argument is non-zero.
993  */
994 static int tg3_phy_reset(struct tg3 *tp)
995 {
996         u32 phy_status;
997         int err;
998
999         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
1000                 u32 val;
1001
1002                 val = tr32(GRC_MISC_CFG);
1003                 tw32_f(GRC_MISC_CFG, val & ~GRC_MISC_CFG_EPHY_IDDQ);
1004                 udelay(40);
1005         }
1006         err  = tg3_readphy(tp, MII_BMSR, &phy_status);
1007         err |= tg3_readphy(tp, MII_BMSR, &phy_status);
1008         if (err != 0)
1009                 return -EBUSY;
1010
1011         if (netif_running(tp->dev) && netif_carrier_ok(tp->dev)) {
1012                 netif_carrier_off(tp->dev);
1013                 tg3_link_report(tp);
1014         }
1015
1016         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
1017             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
1018             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
1019                 err = tg3_phy_reset_5703_4_5(tp);
1020                 if (err)
1021                         return err;
1022                 goto out;
1023         }
1024
1025         err = tg3_bmcr_reset(tp);
1026         if (err)
1027                 return err;
1028
1029 out:
1030         if (tp->tg3_flags2 & TG3_FLG2_PHY_ADC_BUG) {
1031                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0c00);
1032                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x201f);
1033                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x2aaa);
1034                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000a);
1035                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0323);
1036                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0400);
1037         }
1038         if (tp->tg3_flags2 & TG3_FLG2_PHY_5704_A0_BUG) {
1039                 tg3_writephy(tp, 0x1c, 0x8d68);
1040                 tg3_writephy(tp, 0x1c, 0x8d68);
1041         }
1042         if (tp->tg3_flags2 & TG3_FLG2_PHY_BER_BUG) {
1043                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0c00);
1044                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000a);
1045                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x310b);
1046                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x201f);
1047                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x9506);
1048                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x401f);
1049                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x14e2);
1050                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0400);
1051         }
1052         else if (tp->tg3_flags2 & TG3_FLG2_PHY_JITTER_BUG) {
1053                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0c00);
1054                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000a);
1055                 if (tp->tg3_flags2 & TG3_FLG2_PHY_ADJUST_TRIM) {
1056                         tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x110b);
1057                         tg3_writephy(tp, MII_TG3_TEST1,
1058                                      MII_TG3_TEST1_TRIM_EN | 0x4);
1059                 } else
1060                         tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x010b);
1061                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0400);
1062         }
1063         /* Set Extended packet length bit (bit 14) on all chips that */
1064         /* support jumbo frames */
1065         if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5401) {
1066                 /* Cannot do read-modify-write on 5401 */
1067                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x4c20);
1068         } else if (tp->tg3_flags2 & TG3_FLG2_JUMBO_CAPABLE) {
1069                 u32 phy_reg;
1070
1071                 /* Set bit 14 with read-modify-write to preserve other bits */
1072                 if (!tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0007) &&
1073                     !tg3_readphy(tp, MII_TG3_AUX_CTRL, &phy_reg))
1074                         tg3_writephy(tp, MII_TG3_AUX_CTRL, phy_reg | 0x4000);
1075         }
1076
1077         /* Set phy register 0x10 bit 0 to high fifo elasticity to support
1078          * jumbo frames transmission.
1079          */
1080         if (tp->tg3_flags2 & TG3_FLG2_JUMBO_CAPABLE) {
1081                 u32 phy_reg;
1082
1083                 if (!tg3_readphy(tp, MII_TG3_EXT_CTRL, &phy_reg))
1084                     tg3_writephy(tp, MII_TG3_EXT_CTRL,
1085                                  phy_reg | MII_TG3_EXT_CTRL_FIFO_ELASTIC);
1086         }
1087
1088         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
1089                 /* adjust output voltage */
1090                 tg3_writephy(tp, MII_TG3_EPHY_PTEST, 0x12);
1091         }
1092
1093         tg3_phy_toggle_automdix(tp, 1);
1094         tg3_phy_set_wirespeed(tp);
1095         return 0;
1096 }
1097
1098 static void tg3_frob_aux_power(struct tg3 *tp)
1099 {
1100         struct tg3 *tp_peer = tp;
1101
1102         if ((tp->tg3_flags2 & TG3_FLG2_IS_NIC) == 0)
1103                 return;
1104
1105         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) ||
1106             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714)) {
1107                 struct net_device *dev_peer;
1108
1109                 dev_peer = pci_get_drvdata(tp->pdev_peer);
1110                 /* remove_one() may have been run on the peer. */
1111                 if (!dev_peer)
1112                         tp_peer = tp;
1113                 else
1114                         tp_peer = netdev_priv(dev_peer);
1115         }
1116
1117         if ((tp->tg3_flags & TG3_FLAG_WOL_ENABLE) != 0 ||
1118             (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) != 0 ||
1119             (tp_peer->tg3_flags & TG3_FLAG_WOL_ENABLE) != 0 ||
1120             (tp_peer->tg3_flags & TG3_FLAG_ENABLE_ASF) != 0) {
1121                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
1122                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
1123                         tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1124                                     (GRC_LCLCTRL_GPIO_OE0 |
1125                                      GRC_LCLCTRL_GPIO_OE1 |
1126                                      GRC_LCLCTRL_GPIO_OE2 |
1127                                      GRC_LCLCTRL_GPIO_OUTPUT0 |
1128                                      GRC_LCLCTRL_GPIO_OUTPUT1),
1129                                     100);
1130                 } else {
1131                         u32 no_gpio2;
1132                         u32 grc_local_ctrl = 0;
1133
1134                         if (tp_peer != tp &&
1135                             (tp_peer->tg3_flags & TG3_FLAG_INIT_COMPLETE) != 0)
1136                                 return;
1137
1138                         /* Workaround to prevent overdrawing Amps. */
1139                         if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
1140                             ASIC_REV_5714) {
1141                                 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE3;
1142                                 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1143                                             grc_local_ctrl, 100);
1144                         }
1145
1146                         /* On 5753 and variants, GPIO2 cannot be used. */
1147                         no_gpio2 = tp->nic_sram_data_cfg &
1148                                     NIC_SRAM_DATA_CFG_NO_GPIO2;
1149
1150                         grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE0 |
1151                                          GRC_LCLCTRL_GPIO_OE1 |
1152                                          GRC_LCLCTRL_GPIO_OE2 |
1153                                          GRC_LCLCTRL_GPIO_OUTPUT1 |
1154                                          GRC_LCLCTRL_GPIO_OUTPUT2;
1155                         if (no_gpio2) {
1156                                 grc_local_ctrl &= ~(GRC_LCLCTRL_GPIO_OE2 |
1157                                                     GRC_LCLCTRL_GPIO_OUTPUT2);
1158                         }
1159                         tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1160                                                     grc_local_ctrl, 100);
1161
1162                         grc_local_ctrl |= GRC_LCLCTRL_GPIO_OUTPUT0;
1163
1164                         tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1165                                                     grc_local_ctrl, 100);
1166
1167                         if (!no_gpio2) {
1168                                 grc_local_ctrl &= ~GRC_LCLCTRL_GPIO_OUTPUT2;
1169                                 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1170                                             grc_local_ctrl, 100);
1171                         }
1172                 }
1173         } else {
1174                 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
1175                     GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701) {
1176                         if (tp_peer != tp &&
1177                             (tp_peer->tg3_flags & TG3_FLAG_INIT_COMPLETE) != 0)
1178                                 return;
1179
1180                         tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1181                                     (GRC_LCLCTRL_GPIO_OE1 |
1182                                      GRC_LCLCTRL_GPIO_OUTPUT1), 100);
1183
1184                         tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1185                                     GRC_LCLCTRL_GPIO_OE1, 100);
1186
1187                         tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1188                                     (GRC_LCLCTRL_GPIO_OE1 |
1189                                      GRC_LCLCTRL_GPIO_OUTPUT1), 100);
1190                 }
1191         }
1192 }
1193
1194 static int tg3_5700_link_polarity(struct tg3 *tp, u32 speed)
1195 {
1196         if (tp->led_ctrl == LED_CTRL_MODE_PHY_2)
1197                 return 1;
1198         else if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5411) {
1199                 if (speed != SPEED_10)
1200                         return 1;
1201         } else if (speed == SPEED_10)
1202                 return 1;
1203
1204         return 0;
1205 }
1206
1207 static int tg3_setup_phy(struct tg3 *, int);
1208
1209 #define RESET_KIND_SHUTDOWN     0
1210 #define RESET_KIND_INIT         1
1211 #define RESET_KIND_SUSPEND      2
1212
1213 static void tg3_write_sig_post_reset(struct tg3 *, int);
1214 static int tg3_halt_cpu(struct tg3 *, u32);
1215 static int tg3_nvram_lock(struct tg3 *);
1216 static void tg3_nvram_unlock(struct tg3 *);
1217
1218 static void tg3_power_down_phy(struct tg3 *tp)
1219 {
1220         if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) {
1221                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
1222                         u32 sg_dig_ctrl = tr32(SG_DIG_CTRL);
1223                         u32 serdes_cfg = tr32(MAC_SERDES_CFG);
1224
1225                         sg_dig_ctrl |=
1226                                 SG_DIG_USING_HW_AUTONEG | SG_DIG_SOFT_RESET;
1227                         tw32(SG_DIG_CTRL, sg_dig_ctrl);
1228                         tw32(MAC_SERDES_CFG, serdes_cfg | (1 << 15));
1229                 }
1230                 return;
1231         }
1232
1233         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
1234                 u32 val;
1235
1236                 tg3_bmcr_reset(tp);
1237                 val = tr32(GRC_MISC_CFG);
1238                 tw32_f(GRC_MISC_CFG, val | GRC_MISC_CFG_EPHY_IDDQ);
1239                 udelay(40);
1240                 return;
1241         } else {
1242                 tg3_writephy(tp, MII_TG3_EXT_CTRL,
1243                              MII_TG3_EXT_CTRL_FORCE_LED_OFF);
1244                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x01b2);
1245         }
1246
1247         /* The PHY should not be powered down on some chips because
1248          * of bugs.
1249          */
1250         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
1251             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
1252             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780 &&
1253              (tp->tg3_flags2 & TG3_FLG2_MII_SERDES)))
1254                 return;
1255         tg3_writephy(tp, MII_BMCR, BMCR_PDOWN);
1256 }
1257
1258 static int tg3_set_power_state(struct tg3 *tp, pci_power_t state)
1259 {
1260         u32 misc_host_ctrl;
1261         u16 power_control, power_caps;
1262         int pm = tp->pm_cap;
1263
1264         /* Make sure register accesses (indirect or otherwise)
1265          * will function correctly.
1266          */
1267         pci_write_config_dword(tp->pdev,
1268                                TG3PCI_MISC_HOST_CTRL,
1269                                tp->misc_host_ctrl);
1270
1271         pci_read_config_word(tp->pdev,
1272                              pm + PCI_PM_CTRL,
1273                              &power_control);
1274         power_control |= PCI_PM_CTRL_PME_STATUS;
1275         power_control &= ~(PCI_PM_CTRL_STATE_MASK);
1276         switch (state) {
1277         case PCI_D0:
1278                 power_control |= 0;
1279                 pci_write_config_word(tp->pdev,
1280                                       pm + PCI_PM_CTRL,
1281                                       power_control);
1282                 udelay(100);    /* Delay after power state change */
1283
1284                 /* Switch out of Vaux if it is a NIC */
1285                 if (tp->tg3_flags2 & TG3_FLG2_IS_NIC)
1286                         tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl, 100);
1287
1288                 return 0;
1289
1290         case PCI_D1:
1291                 power_control |= 1;
1292                 break;
1293
1294         case PCI_D2:
1295                 power_control |= 2;
1296                 break;
1297
1298         case PCI_D3hot:
1299                 power_control |= 3;
1300                 break;
1301
1302         default:
1303                 printk(KERN_WARNING PFX "%s: Invalid power state (%d) "
1304                        "requested.\n",
1305                        tp->dev->name, state);
1306                 return -EINVAL;
1307         };
1308
1309         power_control |= PCI_PM_CTRL_PME_ENABLE;
1310
1311         misc_host_ctrl = tr32(TG3PCI_MISC_HOST_CTRL);
1312         tw32(TG3PCI_MISC_HOST_CTRL,
1313              misc_host_ctrl | MISC_HOST_CTRL_MASK_PCI_INT);
1314
1315         if (tp->link_config.phy_is_low_power == 0) {
1316                 tp->link_config.phy_is_low_power = 1;
1317                 tp->link_config.orig_speed = tp->link_config.speed;
1318                 tp->link_config.orig_duplex = tp->link_config.duplex;
1319                 tp->link_config.orig_autoneg = tp->link_config.autoneg;
1320         }
1321
1322         if (!(tp->tg3_flags2 & TG3_FLG2_ANY_SERDES)) {
1323                 tp->link_config.speed = SPEED_10;
1324                 tp->link_config.duplex = DUPLEX_HALF;
1325                 tp->link_config.autoneg = AUTONEG_ENABLE;
1326                 tg3_setup_phy(tp, 0);
1327         }
1328
1329         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
1330                 u32 val;
1331
1332                 val = tr32(GRC_VCPU_EXT_CTRL);
1333                 tw32(GRC_VCPU_EXT_CTRL, val | GRC_VCPU_EXT_CTRL_DISABLE_WOL);
1334         } else if (!(tp->tg3_flags & TG3_FLAG_ENABLE_ASF)) {
1335                 int i;
1336                 u32 val;
1337
1338                 for (i = 0; i < 200; i++) {
1339                         tg3_read_mem(tp, NIC_SRAM_FW_ASF_STATUS_MBOX, &val);
1340                         if (val == ~NIC_SRAM_FIRMWARE_MBOX_MAGIC1)
1341                                 break;
1342                         msleep(1);
1343                 }
1344         }
1345         if (tp->tg3_flags & TG3_FLAG_WOL_CAP)
1346                 tg3_write_mem(tp, NIC_SRAM_WOL_MBOX, WOL_SIGNATURE |
1347                                                      WOL_DRV_STATE_SHUTDOWN |
1348                                                      WOL_DRV_WOL |
1349                                                      WOL_SET_MAGIC_PKT);
1350
1351         pci_read_config_word(tp->pdev, pm + PCI_PM_PMC, &power_caps);
1352
1353         if (tp->tg3_flags & TG3_FLAG_WOL_ENABLE) {
1354                 u32 mac_mode;
1355
1356                 if (!(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)) {
1357                         tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x5a);
1358                         udelay(40);
1359
1360                         if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES)
1361                                 mac_mode = MAC_MODE_PORT_MODE_GMII;
1362                         else
1363                                 mac_mode = MAC_MODE_PORT_MODE_MII;
1364
1365                         mac_mode |= tp->mac_mode & MAC_MODE_LINK_POLARITY;
1366                         if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
1367                             ASIC_REV_5700) {
1368                                 u32 speed = (tp->tg3_flags &
1369                                              TG3_FLAG_WOL_SPEED_100MB) ?
1370                                              SPEED_100 : SPEED_10;
1371                                 if (tg3_5700_link_polarity(tp, speed))
1372                                         mac_mode |= MAC_MODE_LINK_POLARITY;
1373                                 else
1374                                         mac_mode &= ~MAC_MODE_LINK_POLARITY;
1375                         }
1376                 } else {
1377                         mac_mode = MAC_MODE_PORT_MODE_TBI;
1378                 }
1379
1380                 if (!(tp->tg3_flags2 & TG3_FLG2_5750_PLUS))
1381                         tw32(MAC_LED_CTRL, tp->led_ctrl);
1382
1383                 if (((power_caps & PCI_PM_CAP_PME_D3cold) &&
1384                      (tp->tg3_flags & TG3_FLAG_WOL_ENABLE)))
1385                         mac_mode |= MAC_MODE_MAGIC_PKT_ENABLE;
1386
1387                 tw32_f(MAC_MODE, mac_mode);
1388                 udelay(100);
1389
1390                 tw32_f(MAC_RX_MODE, RX_MODE_ENABLE);
1391                 udelay(10);
1392         }
1393
1394         if (!(tp->tg3_flags & TG3_FLAG_WOL_SPEED_100MB) &&
1395             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
1396              GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)) {
1397                 u32 base_val;
1398
1399                 base_val = tp->pci_clock_ctrl;
1400                 base_val |= (CLOCK_CTRL_RXCLK_DISABLE |
1401                              CLOCK_CTRL_TXCLK_DISABLE);
1402
1403                 tw32_wait_f(TG3PCI_CLOCK_CTRL, base_val | CLOCK_CTRL_ALTCLK |
1404                             CLOCK_CTRL_PWRDOWN_PLL133, 40);
1405         } else if ((tp->tg3_flags2 & TG3_FLG2_5780_CLASS) ||
1406                    (tp->tg3_flags & TG3_FLAG_CPMU_PRESENT) ||
1407                    (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)) {
1408                 /* do nothing */
1409         } else if (!((tp->tg3_flags2 & TG3_FLG2_5750_PLUS) &&
1410                      (tp->tg3_flags & TG3_FLAG_ENABLE_ASF))) {
1411                 u32 newbits1, newbits2;
1412
1413                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
1414                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
1415                         newbits1 = (CLOCK_CTRL_RXCLK_DISABLE |
1416                                     CLOCK_CTRL_TXCLK_DISABLE |
1417                                     CLOCK_CTRL_ALTCLK);
1418                         newbits2 = newbits1 | CLOCK_CTRL_44MHZ_CORE;
1419                 } else if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
1420                         newbits1 = CLOCK_CTRL_625_CORE;
1421                         newbits2 = newbits1 | CLOCK_CTRL_ALTCLK;
1422                 } else {
1423                         newbits1 = CLOCK_CTRL_ALTCLK;
1424                         newbits2 = newbits1 | CLOCK_CTRL_44MHZ_CORE;
1425                 }
1426
1427                 tw32_wait_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl | newbits1,
1428                             40);
1429
1430                 tw32_wait_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl | newbits2,
1431                             40);
1432
1433                 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
1434                         u32 newbits3;
1435
1436                         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
1437                             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
1438                                 newbits3 = (CLOCK_CTRL_RXCLK_DISABLE |
1439                                             CLOCK_CTRL_TXCLK_DISABLE |
1440                                             CLOCK_CTRL_44MHZ_CORE);
1441                         } else {
1442                                 newbits3 = CLOCK_CTRL_44MHZ_CORE;
1443                         }
1444
1445                         tw32_wait_f(TG3PCI_CLOCK_CTRL,
1446                                     tp->pci_clock_ctrl | newbits3, 40);
1447                 }
1448         }
1449
1450         if (!(tp->tg3_flags & TG3_FLAG_WOL_ENABLE) &&
1451             !(tp->tg3_flags & TG3_FLAG_ENABLE_ASF))
1452                 tg3_power_down_phy(tp);
1453
1454         tg3_frob_aux_power(tp);
1455
1456         /* Workaround for unstable PLL clock */
1457         if ((GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_AX) ||
1458             (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_BX)) {
1459                 u32 val = tr32(0x7d00);
1460
1461                 val &= ~((1 << 16) | (1 << 4) | (1 << 2) | (1 << 1) | 1);
1462                 tw32(0x7d00, val);
1463                 if (!(tp->tg3_flags & TG3_FLAG_ENABLE_ASF)) {
1464                         int err;
1465
1466                         err = tg3_nvram_lock(tp);
1467                         tg3_halt_cpu(tp, RX_CPU_BASE);
1468                         if (!err)
1469                                 tg3_nvram_unlock(tp);
1470                 }
1471         }
1472
1473         tg3_write_sig_post_reset(tp, RESET_KIND_SHUTDOWN);
1474
1475         /* Finally, set the new power state. */
1476         pci_write_config_word(tp->pdev, pm + PCI_PM_CTRL, power_control);
1477         udelay(100);    /* Delay after power state change */
1478
1479         return 0;
1480 }
1481
1482 static void tg3_link_report(struct tg3 *tp)
1483 {
1484         if (!netif_carrier_ok(tp->dev)) {
1485                 if (netif_msg_link(tp))
1486                         printk(KERN_INFO PFX "%s: Link is down.\n",
1487                                tp->dev->name);
1488         } else if (netif_msg_link(tp)) {
1489                 printk(KERN_INFO PFX "%s: Link is up at %d Mbps, %s duplex.\n",
1490                        tp->dev->name,
1491                        (tp->link_config.active_speed == SPEED_1000 ?
1492                         1000 :
1493                         (tp->link_config.active_speed == SPEED_100 ?
1494                          100 : 10)),
1495                        (tp->link_config.active_duplex == DUPLEX_FULL ?
1496                         "full" : "half"));
1497
1498                 printk(KERN_INFO PFX "%s: Flow control is %s for TX and "
1499                        "%s for RX.\n",
1500                        tp->dev->name,
1501                        (tp->tg3_flags & TG3_FLAG_TX_PAUSE) ? "on" : "off",
1502                        (tp->tg3_flags & TG3_FLAG_RX_PAUSE) ? "on" : "off");
1503         }
1504 }
1505
1506 static void tg3_setup_flow_control(struct tg3 *tp, u32 local_adv, u32 remote_adv)
1507 {
1508         u32 new_tg3_flags = 0;
1509         u32 old_rx_mode = tp->rx_mode;
1510         u32 old_tx_mode = tp->tx_mode;
1511
1512         if (tp->tg3_flags & TG3_FLAG_PAUSE_AUTONEG) {
1513
1514                 /* Convert 1000BaseX flow control bits to 1000BaseT
1515                  * bits before resolving flow control.
1516                  */
1517                 if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES) {
1518                         local_adv &= ~(ADVERTISE_PAUSE_CAP |
1519                                        ADVERTISE_PAUSE_ASYM);
1520                         remote_adv &= ~(LPA_PAUSE_CAP | LPA_PAUSE_ASYM);
1521
1522                         if (local_adv & ADVERTISE_1000XPAUSE)
1523                                 local_adv |= ADVERTISE_PAUSE_CAP;
1524                         if (local_adv & ADVERTISE_1000XPSE_ASYM)
1525                                 local_adv |= ADVERTISE_PAUSE_ASYM;
1526                         if (remote_adv & LPA_1000XPAUSE)
1527                                 remote_adv |= LPA_PAUSE_CAP;
1528                         if (remote_adv & LPA_1000XPAUSE_ASYM)
1529                                 remote_adv |= LPA_PAUSE_ASYM;
1530                 }
1531
1532                 if (local_adv & ADVERTISE_PAUSE_CAP) {
1533                         if (local_adv & ADVERTISE_PAUSE_ASYM) {
1534                                 if (remote_adv & LPA_PAUSE_CAP)
1535                                         new_tg3_flags |=
1536                                                 (TG3_FLAG_RX_PAUSE |
1537                                                 TG3_FLAG_TX_PAUSE);
1538                                 else if (remote_adv & LPA_PAUSE_ASYM)
1539                                         new_tg3_flags |=
1540                                                 (TG3_FLAG_RX_PAUSE);
1541                         } else {
1542                                 if (remote_adv & LPA_PAUSE_CAP)
1543                                         new_tg3_flags |=
1544                                                 (TG3_FLAG_RX_PAUSE |
1545                                                 TG3_FLAG_TX_PAUSE);
1546                         }
1547                 } else if (local_adv & ADVERTISE_PAUSE_ASYM) {
1548                         if ((remote_adv & LPA_PAUSE_CAP) &&
1549                         (remote_adv & LPA_PAUSE_ASYM))
1550                                 new_tg3_flags |= TG3_FLAG_TX_PAUSE;
1551                 }
1552
1553                 tp->tg3_flags &= ~(TG3_FLAG_RX_PAUSE | TG3_FLAG_TX_PAUSE);
1554                 tp->tg3_flags |= new_tg3_flags;
1555         } else {
1556                 new_tg3_flags = tp->tg3_flags;
1557         }
1558
1559         if (new_tg3_flags & TG3_FLAG_RX_PAUSE)
1560                 tp->rx_mode |= RX_MODE_FLOW_CTRL_ENABLE;
1561         else
1562                 tp->rx_mode &= ~RX_MODE_FLOW_CTRL_ENABLE;
1563
1564         if (old_rx_mode != tp->rx_mode) {
1565                 tw32_f(MAC_RX_MODE, tp->rx_mode);
1566         }
1567
1568         if (new_tg3_flags & TG3_FLAG_TX_PAUSE)
1569                 tp->tx_mode |= TX_MODE_FLOW_CTRL_ENABLE;
1570         else
1571                 tp->tx_mode &= ~TX_MODE_FLOW_CTRL_ENABLE;
1572
1573         if (old_tx_mode != tp->tx_mode) {
1574                 tw32_f(MAC_TX_MODE, tp->tx_mode);
1575         }
1576 }
1577
1578 static void tg3_aux_stat_to_speed_duplex(struct tg3 *tp, u32 val, u16 *speed, u8 *duplex)
1579 {
1580         switch (val & MII_TG3_AUX_STAT_SPDMASK) {
1581         case MII_TG3_AUX_STAT_10HALF:
1582                 *speed = SPEED_10;
1583                 *duplex = DUPLEX_HALF;
1584                 break;
1585
1586         case MII_TG3_AUX_STAT_10FULL:
1587                 *speed = SPEED_10;
1588                 *duplex = DUPLEX_FULL;
1589                 break;
1590
1591         case MII_TG3_AUX_STAT_100HALF:
1592                 *speed = SPEED_100;
1593                 *duplex = DUPLEX_HALF;
1594                 break;
1595
1596         case MII_TG3_AUX_STAT_100FULL:
1597                 *speed = SPEED_100;
1598                 *duplex = DUPLEX_FULL;
1599                 break;
1600
1601         case MII_TG3_AUX_STAT_1000HALF:
1602                 *speed = SPEED_1000;
1603                 *duplex = DUPLEX_HALF;
1604                 break;
1605
1606         case MII_TG3_AUX_STAT_1000FULL:
1607                 *speed = SPEED_1000;
1608                 *duplex = DUPLEX_FULL;
1609                 break;
1610
1611         default:
1612                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
1613                         *speed = (val & MII_TG3_AUX_STAT_100) ? SPEED_100 :
1614                                  SPEED_10;
1615                         *duplex = (val & MII_TG3_AUX_STAT_FULL) ? DUPLEX_FULL :
1616                                   DUPLEX_HALF;
1617                         break;
1618                 }
1619                 *speed = SPEED_INVALID;
1620                 *duplex = DUPLEX_INVALID;
1621                 break;
1622         };
1623 }
1624
1625 static void tg3_phy_copper_begin(struct tg3 *tp)
1626 {
1627         u32 new_adv;
1628         int i;
1629
1630         if (tp->link_config.phy_is_low_power) {
1631                 /* Entering low power mode.  Disable gigabit and
1632                  * 100baseT advertisements.
1633                  */
1634                 tg3_writephy(tp, MII_TG3_CTRL, 0);
1635
1636                 new_adv = (ADVERTISE_10HALF | ADVERTISE_10FULL |
1637                            ADVERTISE_CSMA | ADVERTISE_PAUSE_CAP);
1638                 if (tp->tg3_flags & TG3_FLAG_WOL_SPEED_100MB)
1639                         new_adv |= (ADVERTISE_100HALF | ADVERTISE_100FULL);
1640
1641                 tg3_writephy(tp, MII_ADVERTISE, new_adv);
1642         } else if (tp->link_config.speed == SPEED_INVALID) {
1643                 if (tp->tg3_flags & TG3_FLAG_10_100_ONLY)
1644                         tp->link_config.advertising &=
1645                                 ~(ADVERTISED_1000baseT_Half |
1646                                   ADVERTISED_1000baseT_Full);
1647
1648                 new_adv = (ADVERTISE_CSMA | ADVERTISE_PAUSE_CAP);
1649                 if (tp->link_config.advertising & ADVERTISED_10baseT_Half)
1650                         new_adv |= ADVERTISE_10HALF;
1651                 if (tp->link_config.advertising & ADVERTISED_10baseT_Full)
1652                         new_adv |= ADVERTISE_10FULL;
1653                 if (tp->link_config.advertising & ADVERTISED_100baseT_Half)
1654                         new_adv |= ADVERTISE_100HALF;
1655                 if (tp->link_config.advertising & ADVERTISED_100baseT_Full)
1656                         new_adv |= ADVERTISE_100FULL;
1657                 tg3_writephy(tp, MII_ADVERTISE, new_adv);
1658
1659                 if (tp->link_config.advertising &
1660                     (ADVERTISED_1000baseT_Half | ADVERTISED_1000baseT_Full)) {
1661                         new_adv = 0;
1662                         if (tp->link_config.advertising & ADVERTISED_1000baseT_Half)
1663                                 new_adv |= MII_TG3_CTRL_ADV_1000_HALF;
1664                         if (tp->link_config.advertising & ADVERTISED_1000baseT_Full)
1665                                 new_adv |= MII_TG3_CTRL_ADV_1000_FULL;
1666                         if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY) &&
1667                             (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
1668                              tp->pci_chip_rev_id == CHIPREV_ID_5701_B0))
1669                                 new_adv |= (MII_TG3_CTRL_AS_MASTER |
1670                                             MII_TG3_CTRL_ENABLE_AS_MASTER);
1671                         tg3_writephy(tp, MII_TG3_CTRL, new_adv);
1672                 } else {
1673                         tg3_writephy(tp, MII_TG3_CTRL, 0);
1674                 }
1675         } else {
1676                 /* Asking for a specific link mode. */
1677                 if (tp->link_config.speed == SPEED_1000) {
1678                         new_adv = ADVERTISE_CSMA | ADVERTISE_PAUSE_CAP;
1679                         tg3_writephy(tp, MII_ADVERTISE, new_adv);
1680
1681                         if (tp->link_config.duplex == DUPLEX_FULL)
1682                                 new_adv = MII_TG3_CTRL_ADV_1000_FULL;
1683                         else
1684                                 new_adv = MII_TG3_CTRL_ADV_1000_HALF;
1685                         if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
1686                             tp->pci_chip_rev_id == CHIPREV_ID_5701_B0)
1687                                 new_adv |= (MII_TG3_CTRL_AS_MASTER |
1688                                             MII_TG3_CTRL_ENABLE_AS_MASTER);
1689                         tg3_writephy(tp, MII_TG3_CTRL, new_adv);
1690                 } else {
1691                         tg3_writephy(tp, MII_TG3_CTRL, 0);
1692
1693                         new_adv = ADVERTISE_CSMA | ADVERTISE_PAUSE_CAP;
1694                         if (tp->link_config.speed == SPEED_100) {
1695                                 if (tp->link_config.duplex == DUPLEX_FULL)
1696                                         new_adv |= ADVERTISE_100FULL;
1697                                 else
1698                                         new_adv |= ADVERTISE_100HALF;
1699                         } else {
1700                                 if (tp->link_config.duplex == DUPLEX_FULL)
1701                                         new_adv |= ADVERTISE_10FULL;
1702                                 else
1703                                         new_adv |= ADVERTISE_10HALF;
1704                         }
1705                         tg3_writephy(tp, MII_ADVERTISE, new_adv);
1706                 }
1707         }
1708
1709         if (tp->link_config.autoneg == AUTONEG_DISABLE &&
1710             tp->link_config.speed != SPEED_INVALID) {
1711                 u32 bmcr, orig_bmcr;
1712
1713                 tp->link_config.active_speed = tp->link_config.speed;
1714                 tp->link_config.active_duplex = tp->link_config.duplex;
1715
1716                 bmcr = 0;
1717                 switch (tp->link_config.speed) {
1718                 default:
1719                 case SPEED_10:
1720                         break;
1721
1722                 case SPEED_100:
1723                         bmcr |= BMCR_SPEED100;
1724                         break;
1725
1726                 case SPEED_1000:
1727                         bmcr |= TG3_BMCR_SPEED1000;
1728                         break;
1729                 };
1730
1731                 if (tp->link_config.duplex == DUPLEX_FULL)
1732                         bmcr |= BMCR_FULLDPLX;
1733
1734                 if (!tg3_readphy(tp, MII_BMCR, &orig_bmcr) &&
1735                     (bmcr != orig_bmcr)) {
1736                         tg3_writephy(tp, MII_BMCR, BMCR_LOOPBACK);
1737                         for (i = 0; i < 1500; i++) {
1738                                 u32 tmp;
1739
1740                                 udelay(10);
1741                                 if (tg3_readphy(tp, MII_BMSR, &tmp) ||
1742                                     tg3_readphy(tp, MII_BMSR, &tmp))
1743                                         continue;
1744                                 if (!(tmp & BMSR_LSTATUS)) {
1745                                         udelay(40);
1746                                         break;
1747                                 }
1748                         }
1749                         tg3_writephy(tp, MII_BMCR, bmcr);
1750                         udelay(40);
1751                 }
1752         } else {
1753                 tg3_writephy(tp, MII_BMCR,
1754                              BMCR_ANENABLE | BMCR_ANRESTART);
1755         }
1756 }
1757
1758 static int tg3_init_5401phy_dsp(struct tg3 *tp)
1759 {
1760         int err;
1761
1762         /* Turn off tap power management. */
1763         /* Set Extended packet length bit */
1764         err  = tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x4c20);
1765
1766         err |= tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x0012);
1767         err |= tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x1804);
1768
1769         err |= tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x0013);
1770         err |= tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x1204);
1771
1772         err |= tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8006);
1773         err |= tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0132);
1774
1775         err |= tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8006);
1776         err |= tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0232);
1777
1778         err |= tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x201f);
1779         err |= tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0a20);
1780
1781         udelay(40);
1782
1783         return err;
1784 }
1785
1786 static int tg3_copper_is_advertising_all(struct tg3 *tp, u32 mask)
1787 {
1788         u32 adv_reg, all_mask = 0;
1789
1790         if (mask & ADVERTISED_10baseT_Half)
1791                 all_mask |= ADVERTISE_10HALF;
1792         if (mask & ADVERTISED_10baseT_Full)
1793                 all_mask |= ADVERTISE_10FULL;
1794         if (mask & ADVERTISED_100baseT_Half)
1795                 all_mask |= ADVERTISE_100HALF;
1796         if (mask & ADVERTISED_100baseT_Full)
1797                 all_mask |= ADVERTISE_100FULL;
1798
1799         if (tg3_readphy(tp, MII_ADVERTISE, &adv_reg))
1800                 return 0;
1801
1802         if ((adv_reg & all_mask) != all_mask)
1803                 return 0;
1804         if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY)) {
1805                 u32 tg3_ctrl;
1806
1807                 all_mask = 0;
1808                 if (mask & ADVERTISED_1000baseT_Half)
1809                         all_mask |= ADVERTISE_1000HALF;
1810                 if (mask & ADVERTISED_1000baseT_Full)
1811                         all_mask |= ADVERTISE_1000FULL;
1812
1813                 if (tg3_readphy(tp, MII_TG3_CTRL, &tg3_ctrl))
1814                         return 0;
1815
1816                 if ((tg3_ctrl & all_mask) != all_mask)
1817                         return 0;
1818         }
1819         return 1;
1820 }
1821
1822 static int tg3_setup_copper_phy(struct tg3 *tp, int force_reset)
1823 {
1824         int current_link_up;
1825         u32 bmsr, dummy;
1826         u16 current_speed;
1827         u8 current_duplex;
1828         int i, err;
1829
1830         tw32(MAC_EVENT, 0);
1831
1832         tw32_f(MAC_STATUS,
1833              (MAC_STATUS_SYNC_CHANGED |
1834               MAC_STATUS_CFG_CHANGED |
1835               MAC_STATUS_MI_COMPLETION |
1836               MAC_STATUS_LNKSTATE_CHANGED));
1837         udelay(40);
1838
1839         tp->mi_mode = MAC_MI_MODE_BASE;
1840         tw32_f(MAC_MI_MODE, tp->mi_mode);
1841         udelay(80);
1842
1843         tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x02);
1844
1845         /* Some third-party PHYs need to be reset on link going
1846          * down.
1847          */
1848         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
1849              GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
1850              GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) &&
1851             netif_carrier_ok(tp->dev)) {
1852                 tg3_readphy(tp, MII_BMSR, &bmsr);
1853                 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
1854                     !(bmsr & BMSR_LSTATUS))
1855                         force_reset = 1;
1856         }
1857         if (force_reset)
1858                 tg3_phy_reset(tp);
1859
1860         if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5401) {
1861                 tg3_readphy(tp, MII_BMSR, &bmsr);
1862                 if (tg3_readphy(tp, MII_BMSR, &bmsr) ||
1863                     !(tp->tg3_flags & TG3_FLAG_INIT_COMPLETE))
1864                         bmsr = 0;
1865
1866                 if (!(bmsr & BMSR_LSTATUS)) {
1867                         err = tg3_init_5401phy_dsp(tp);
1868                         if (err)
1869                                 return err;
1870
1871                         tg3_readphy(tp, MII_BMSR, &bmsr);
1872                         for (i = 0; i < 1000; i++) {
1873                                 udelay(10);
1874                                 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
1875                                     (bmsr & BMSR_LSTATUS)) {
1876                                         udelay(40);
1877                                         break;
1878                                 }
1879                         }
1880
1881                         if ((tp->phy_id & PHY_ID_REV_MASK) == PHY_REV_BCM5401_B0 &&
1882                             !(bmsr & BMSR_LSTATUS) &&
1883                             tp->link_config.active_speed == SPEED_1000) {
1884                                 err = tg3_phy_reset(tp);
1885                                 if (!err)
1886                                         err = tg3_init_5401phy_dsp(tp);
1887                                 if (err)
1888                                         return err;
1889                         }
1890                 }
1891         } else if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
1892                    tp->pci_chip_rev_id == CHIPREV_ID_5701_B0) {
1893                 /* 5701 {A0,B0} CRC bug workaround */
1894                 tg3_writephy(tp, 0x15, 0x0a75);
1895                 tg3_writephy(tp, 0x1c, 0x8c68);
1896                 tg3_writephy(tp, 0x1c, 0x8d68);
1897                 tg3_writephy(tp, 0x1c, 0x8c68);
1898         }
1899
1900         /* Clear pending interrupts... */
1901         tg3_readphy(tp, MII_TG3_ISTAT, &dummy);
1902         tg3_readphy(tp, MII_TG3_ISTAT, &dummy);
1903
1904         if (tp->tg3_flags & TG3_FLAG_USE_MI_INTERRUPT)
1905                 tg3_writephy(tp, MII_TG3_IMASK, ~MII_TG3_INT_LINKCHG);
1906         else if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5906)
1907                 tg3_writephy(tp, MII_TG3_IMASK, ~0);
1908
1909         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
1910             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
1911                 if (tp->led_ctrl == LED_CTRL_MODE_PHY_1)
1912                         tg3_writephy(tp, MII_TG3_EXT_CTRL,
1913                                      MII_TG3_EXT_CTRL_LNK3_LED_MODE);
1914                 else
1915                         tg3_writephy(tp, MII_TG3_EXT_CTRL, 0);
1916         }
1917
1918         current_link_up = 0;
1919         current_speed = SPEED_INVALID;
1920         current_duplex = DUPLEX_INVALID;
1921
1922         if (tp->tg3_flags2 & TG3_FLG2_CAPACITIVE_COUPLING) {
1923                 u32 val;
1924
1925                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x4007);
1926                 tg3_readphy(tp, MII_TG3_AUX_CTRL, &val);
1927                 if (!(val & (1 << 10))) {
1928                         val |= (1 << 10);
1929                         tg3_writephy(tp, MII_TG3_AUX_CTRL, val);
1930                         goto relink;
1931                 }
1932         }
1933
1934         bmsr = 0;
1935         for (i = 0; i < 100; i++) {
1936                 tg3_readphy(tp, MII_BMSR, &bmsr);
1937                 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
1938                     (bmsr & BMSR_LSTATUS))
1939                         break;
1940                 udelay(40);
1941         }
1942
1943         if (bmsr & BMSR_LSTATUS) {
1944                 u32 aux_stat, bmcr;
1945
1946                 tg3_readphy(tp, MII_TG3_AUX_STAT, &aux_stat);
1947                 for (i = 0; i < 2000; i++) {
1948                         udelay(10);
1949                         if (!tg3_readphy(tp, MII_TG3_AUX_STAT, &aux_stat) &&
1950                             aux_stat)
1951                                 break;
1952                 }
1953
1954                 tg3_aux_stat_to_speed_duplex(tp, aux_stat,
1955                                              &current_speed,
1956                                              &current_duplex);
1957
1958                 bmcr = 0;
1959                 for (i = 0; i < 200; i++) {
1960                         tg3_readphy(tp, MII_BMCR, &bmcr);
1961                         if (tg3_readphy(tp, MII_BMCR, &bmcr))
1962                                 continue;
1963                         if (bmcr && bmcr != 0x7fff)
1964                                 break;
1965                         udelay(10);
1966                 }
1967
1968                 if (tp->link_config.autoneg == AUTONEG_ENABLE) {
1969                         if (bmcr & BMCR_ANENABLE) {
1970                                 current_link_up = 1;
1971
1972                                 /* Force autoneg restart if we are exiting
1973                                  * low power mode.
1974                                  */
1975                                 if (!tg3_copper_is_advertising_all(tp,
1976                                                 tp->link_config.advertising))
1977                                         current_link_up = 0;
1978                         } else {
1979                                 current_link_up = 0;
1980                         }
1981                 } else {
1982                         if (!(bmcr & BMCR_ANENABLE) &&
1983                             tp->link_config.speed == current_speed &&
1984                             tp->link_config.duplex == current_duplex) {
1985                                 current_link_up = 1;
1986                         } else {
1987                                 current_link_up = 0;
1988                         }
1989                 }
1990
1991                 tp->link_config.active_speed = current_speed;
1992                 tp->link_config.active_duplex = current_duplex;
1993         }
1994
1995         if (current_link_up == 1 &&
1996             (tp->link_config.active_duplex == DUPLEX_FULL) &&
1997             (tp->link_config.autoneg == AUTONEG_ENABLE)) {
1998                 u32 local_adv, remote_adv;
1999
2000                 if (tg3_readphy(tp, MII_ADVERTISE, &local_adv))
2001                         local_adv = 0;
2002                 local_adv &= (ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM);
2003
2004                 if (tg3_readphy(tp, MII_LPA, &remote_adv))
2005                         remote_adv = 0;
2006
2007                 remote_adv &= (LPA_PAUSE_CAP | LPA_PAUSE_ASYM);
2008
2009                 /* If we are not advertising full pause capability,
2010                  * something is wrong.  Bring the link down and reconfigure.
2011                  */
2012                 if (local_adv != ADVERTISE_PAUSE_CAP) {
2013                         current_link_up = 0;
2014                 } else {
2015                         tg3_setup_flow_control(tp, local_adv, remote_adv);
2016                 }
2017         }
2018 relink:
2019         if (current_link_up == 0 || tp->link_config.phy_is_low_power) {
2020                 u32 tmp;
2021
2022                 tg3_phy_copper_begin(tp);
2023
2024                 tg3_readphy(tp, MII_BMSR, &tmp);
2025                 if (!tg3_readphy(tp, MII_BMSR, &tmp) &&
2026                     (tmp & BMSR_LSTATUS))
2027                         current_link_up = 1;
2028         }
2029
2030         tp->mac_mode &= ~MAC_MODE_PORT_MODE_MASK;
2031         if (current_link_up == 1) {
2032                 if (tp->link_config.active_speed == SPEED_100 ||
2033                     tp->link_config.active_speed == SPEED_10)
2034                         tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
2035                 else
2036                         tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
2037         } else
2038                 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
2039
2040         tp->mac_mode &= ~MAC_MODE_HALF_DUPLEX;
2041         if (tp->link_config.active_duplex == DUPLEX_HALF)
2042                 tp->mac_mode |= MAC_MODE_HALF_DUPLEX;
2043
2044         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700) {
2045                 if (current_link_up == 1 &&
2046                     tg3_5700_link_polarity(tp, tp->link_config.active_speed))
2047                         tp->mac_mode |= MAC_MODE_LINK_POLARITY;
2048                 else
2049                         tp->mac_mode &= ~MAC_MODE_LINK_POLARITY;
2050         }
2051
2052         /* ??? Without this setting Netgear GA302T PHY does not
2053          * ??? send/receive packets...
2054          */
2055         if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5411 &&
2056             tp->pci_chip_rev_id == CHIPREV_ID_5700_ALTIMA) {
2057                 tp->mi_mode |= MAC_MI_MODE_AUTO_POLL;
2058                 tw32_f(MAC_MI_MODE, tp->mi_mode);
2059                 udelay(80);
2060         }
2061
2062         tw32_f(MAC_MODE, tp->mac_mode);
2063         udelay(40);
2064
2065         if (tp->tg3_flags & TG3_FLAG_USE_LINKCHG_REG) {
2066                 /* Polled via timer. */
2067                 tw32_f(MAC_EVENT, 0);
2068         } else {
2069                 tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
2070         }
2071         udelay(40);
2072
2073         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 &&
2074             current_link_up == 1 &&
2075             tp->link_config.active_speed == SPEED_1000 &&
2076             ((tp->tg3_flags & TG3_FLAG_PCIX_MODE) ||
2077              (tp->tg3_flags & TG3_FLAG_PCI_HIGH_SPEED))) {
2078                 udelay(120);
2079                 tw32_f(MAC_STATUS,
2080                      (MAC_STATUS_SYNC_CHANGED |
2081                       MAC_STATUS_CFG_CHANGED));
2082                 udelay(40);
2083                 tg3_write_mem(tp,
2084                               NIC_SRAM_FIRMWARE_MBOX,
2085                               NIC_SRAM_FIRMWARE_MBOX_MAGIC2);
2086         }
2087
2088         if (current_link_up != netif_carrier_ok(tp->dev)) {
2089                 if (current_link_up)
2090                         netif_carrier_on(tp->dev);
2091                 else
2092                         netif_carrier_off(tp->dev);
2093                 tg3_link_report(tp);
2094         }
2095
2096         return 0;
2097 }
2098
2099 struct tg3_fiber_aneginfo {
2100         int state;
2101 #define ANEG_STATE_UNKNOWN              0
2102 #define ANEG_STATE_AN_ENABLE            1
2103 #define ANEG_STATE_RESTART_INIT         2
2104 #define ANEG_STATE_RESTART              3
2105 #define ANEG_STATE_DISABLE_LINK_OK      4
2106 #define ANEG_STATE_ABILITY_DETECT_INIT  5
2107 #define ANEG_STATE_ABILITY_DETECT       6
2108 #define ANEG_STATE_ACK_DETECT_INIT      7
2109 #define ANEG_STATE_ACK_DETECT           8
2110 #define ANEG_STATE_COMPLETE_ACK_INIT    9
2111 #define ANEG_STATE_COMPLETE_ACK         10
2112 #define ANEG_STATE_IDLE_DETECT_INIT     11
2113 #define ANEG_STATE_IDLE_DETECT          12
2114 #define ANEG_STATE_LINK_OK              13
2115 #define ANEG_STATE_NEXT_PAGE_WAIT_INIT  14
2116 #define ANEG_STATE_NEXT_PAGE_WAIT       15
2117
2118         u32 flags;
2119 #define MR_AN_ENABLE            0x00000001
2120 #define MR_RESTART_AN           0x00000002
2121 #define MR_AN_COMPLETE          0x00000004
2122 #define MR_PAGE_RX              0x00000008
2123 #define MR_NP_LOADED            0x00000010
2124 #define MR_TOGGLE_TX            0x00000020
2125 #define MR_LP_ADV_FULL_DUPLEX   0x00000040
2126 #define MR_LP_ADV_HALF_DUPLEX   0x00000080
2127 #define MR_LP_ADV_SYM_PAUSE     0x00000100
2128 #define MR_LP_ADV_ASYM_PAUSE    0x00000200
2129 #define MR_LP_ADV_REMOTE_FAULT1 0x00000400
2130 #define MR_LP_ADV_REMOTE_FAULT2 0x00000800
2131 #define MR_LP_ADV_NEXT_PAGE     0x00001000
2132 #define MR_TOGGLE_RX            0x00002000
2133 #define MR_NP_RX                0x00004000
2134
2135 #define MR_LINK_OK              0x80000000
2136
2137         unsigned long link_time, cur_time;
2138
2139         u32 ability_match_cfg;
2140         int ability_match_count;
2141
2142         char ability_match, idle_match, ack_match;
2143
2144         u32 txconfig, rxconfig;
2145 #define ANEG_CFG_NP             0x00000080
2146 #define ANEG_CFG_ACK            0x00000040
2147 #define ANEG_CFG_RF2            0x00000020
2148 #define ANEG_CFG_RF1            0x00000010
2149 #define ANEG_CFG_PS2            0x00000001
2150 #define ANEG_CFG_PS1            0x00008000
2151 #define ANEG_CFG_HD             0x00004000
2152 #define ANEG_CFG_FD             0x00002000
2153 #define ANEG_CFG_INVAL          0x00001f06
2154
2155 };
2156 #define ANEG_OK         0
2157 #define ANEG_DONE       1
2158 #define ANEG_TIMER_ENAB 2
2159 #define ANEG_FAILED     -1
2160
2161 #define ANEG_STATE_SETTLE_TIME  10000
2162
2163 static int tg3_fiber_aneg_smachine(struct tg3 *tp,
2164                                    struct tg3_fiber_aneginfo *ap)
2165 {
2166         unsigned long delta;
2167         u32 rx_cfg_reg;
2168         int ret;
2169
2170         if (ap->state == ANEG_STATE_UNKNOWN) {
2171                 ap->rxconfig = 0;
2172                 ap->link_time = 0;
2173                 ap->cur_time = 0;
2174                 ap->ability_match_cfg = 0;
2175                 ap->ability_match_count = 0;
2176                 ap->ability_match = 0;
2177                 ap->idle_match = 0;
2178                 ap->ack_match = 0;
2179         }
2180         ap->cur_time++;
2181
2182         if (tr32(MAC_STATUS) & MAC_STATUS_RCVD_CFG) {
2183                 rx_cfg_reg = tr32(MAC_RX_AUTO_NEG);
2184
2185                 if (rx_cfg_reg != ap->ability_match_cfg) {
2186                         ap->ability_match_cfg = rx_cfg_reg;
2187                         ap->ability_match = 0;
2188                         ap->ability_match_count = 0;
2189                 } else {
2190                         if (++ap->ability_match_count > 1) {
2191                                 ap->ability_match = 1;
2192                                 ap->ability_match_cfg = rx_cfg_reg;
2193                         }
2194                 }
2195                 if (rx_cfg_reg & ANEG_CFG_ACK)
2196                         ap->ack_match = 1;
2197                 else
2198                         ap->ack_match = 0;
2199
2200                 ap->idle_match = 0;
2201         } else {
2202                 ap->idle_match = 1;
2203                 ap->ability_match_cfg = 0;
2204                 ap->ability_match_count = 0;
2205                 ap->ability_match = 0;
2206                 ap->ack_match = 0;
2207
2208                 rx_cfg_reg = 0;
2209         }
2210
2211         ap->rxconfig = rx_cfg_reg;
2212         ret = ANEG_OK;
2213
2214         switch(ap->state) {
2215         case ANEG_STATE_UNKNOWN:
2216                 if (ap->flags & (MR_AN_ENABLE | MR_RESTART_AN))
2217                         ap->state = ANEG_STATE_AN_ENABLE;
2218
2219                 /* fallthru */
2220         case ANEG_STATE_AN_ENABLE:
2221                 ap->flags &= ~(MR_AN_COMPLETE | MR_PAGE_RX);
2222                 if (ap->flags & MR_AN_ENABLE) {
2223                         ap->link_time = 0;
2224                         ap->cur_time = 0;
2225                         ap->ability_match_cfg = 0;
2226                         ap->ability_match_count = 0;
2227                         ap->ability_match = 0;
2228                         ap->idle_match = 0;
2229                         ap->ack_match = 0;
2230
2231                         ap->state = ANEG_STATE_RESTART_INIT;
2232                 } else {
2233                         ap->state = ANEG_STATE_DISABLE_LINK_OK;
2234                 }
2235                 break;
2236
2237         case ANEG_STATE_RESTART_INIT:
2238                 ap->link_time = ap->cur_time;
2239                 ap->flags &= ~(MR_NP_LOADED);
2240                 ap->txconfig = 0;
2241                 tw32(MAC_TX_AUTO_NEG, 0);
2242                 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
2243                 tw32_f(MAC_MODE, tp->mac_mode);
2244                 udelay(40);
2245
2246                 ret = ANEG_TIMER_ENAB;
2247                 ap->state = ANEG_STATE_RESTART;
2248
2249                 /* fallthru */
2250         case ANEG_STATE_RESTART:
2251                 delta = ap->cur_time - ap->link_time;
2252                 if (delta > ANEG_STATE_SETTLE_TIME) {
2253                         ap->state = ANEG_STATE_ABILITY_DETECT_INIT;
2254                 } else {
2255                         ret = ANEG_TIMER_ENAB;
2256                 }
2257                 break;
2258
2259         case ANEG_STATE_DISABLE_LINK_OK:
2260                 ret = ANEG_DONE;
2261                 break;
2262
2263         case ANEG_STATE_ABILITY_DETECT_INIT:
2264                 ap->flags &= ~(MR_TOGGLE_TX);
2265                 ap->txconfig = (ANEG_CFG_FD | ANEG_CFG_PS1);
2266                 tw32(MAC_TX_AUTO_NEG, ap->txconfig);
2267                 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
2268                 tw32_f(MAC_MODE, tp->mac_mode);
2269                 udelay(40);
2270
2271                 ap->state = ANEG_STATE_ABILITY_DETECT;
2272                 break;
2273
2274         case ANEG_STATE_ABILITY_DETECT:
2275                 if (ap->ability_match != 0 && ap->rxconfig != 0) {
2276                         ap->state = ANEG_STATE_ACK_DETECT_INIT;
2277                 }
2278                 break;
2279
2280         case ANEG_STATE_ACK_DETECT_INIT:
2281                 ap->txconfig |= ANEG_CFG_ACK;
2282                 tw32(MAC_TX_AUTO_NEG, ap->txconfig);
2283                 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
2284                 tw32_f(MAC_MODE, tp->mac_mode);
2285                 udelay(40);
2286
2287                 ap->state = ANEG_STATE_ACK_DETECT;
2288
2289                 /* fallthru */
2290         case ANEG_STATE_ACK_DETECT:
2291                 if (ap->ack_match != 0) {
2292                         if ((ap->rxconfig & ~ANEG_CFG_ACK) ==
2293                             (ap->ability_match_cfg & ~ANEG_CFG_ACK)) {
2294                                 ap->state = ANEG_STATE_COMPLETE_ACK_INIT;
2295                         } else {
2296                                 ap->state = ANEG_STATE_AN_ENABLE;
2297                         }
2298                 } else if (ap->ability_match != 0 &&
2299                            ap->rxconfig == 0) {
2300                         ap->state = ANEG_STATE_AN_ENABLE;
2301                 }
2302                 break;
2303
2304         case ANEG_STATE_COMPLETE_ACK_INIT:
2305                 if (ap->rxconfig & ANEG_CFG_INVAL) {
2306                         ret = ANEG_FAILED;
2307                         break;
2308                 }
2309                 ap->flags &= ~(MR_LP_ADV_FULL_DUPLEX |
2310                                MR_LP_ADV_HALF_DUPLEX |
2311                                MR_LP_ADV_SYM_PAUSE |
2312                                MR_LP_ADV_ASYM_PAUSE |
2313                                MR_LP_ADV_REMOTE_FAULT1 |
2314                                MR_LP_ADV_REMOTE_FAULT2 |
2315                                MR_LP_ADV_NEXT_PAGE |
2316                                MR_TOGGLE_RX |
2317                                MR_NP_RX);
2318                 if (ap->rxconfig & ANEG_CFG_FD)
2319                         ap->flags |= MR_LP_ADV_FULL_DUPLEX;
2320                 if (ap->rxconfig & ANEG_CFG_HD)
2321                         ap->flags |= MR_LP_ADV_HALF_DUPLEX;
2322                 if (ap->rxconfig & ANEG_CFG_PS1)
2323                         ap->flags |= MR_LP_ADV_SYM_PAUSE;
2324                 if (ap->rxconfig & ANEG_CFG_PS2)
2325                         ap->flags |= MR_LP_ADV_ASYM_PAUSE;
2326                 if (ap->rxconfig & ANEG_CFG_RF1)
2327                         ap->flags |= MR_LP_ADV_REMOTE_FAULT1;
2328                 if (ap->rxconfig & ANEG_CFG_RF2)
2329                         ap->flags |= MR_LP_ADV_REMOTE_FAULT2;
2330                 if (ap->rxconfig & ANEG_CFG_NP)
2331                         ap->flags |= MR_LP_ADV_NEXT_PAGE;
2332
2333                 ap->link_time = ap->cur_time;
2334
2335                 ap->flags ^= (MR_TOGGLE_TX);
2336                 if (ap->rxconfig & 0x0008)
2337                         ap->flags |= MR_TOGGLE_RX;
2338                 if (ap->rxconfig & ANEG_CFG_NP)
2339                         ap->flags |= MR_NP_RX;
2340                 ap->flags |= MR_PAGE_RX;
2341
2342                 ap->state = ANEG_STATE_COMPLETE_ACK;
2343                 ret = ANEG_TIMER_ENAB;
2344                 break;
2345
2346         case ANEG_STATE_COMPLETE_ACK:
2347                 if (ap->ability_match != 0 &&
2348                     ap->rxconfig == 0) {
2349                         ap->state = ANEG_STATE_AN_ENABLE;
2350                         break;
2351                 }
2352                 delta = ap->cur_time - ap->link_time;
2353                 if (delta > ANEG_STATE_SETTLE_TIME) {
2354                         if (!(ap->flags & (MR_LP_ADV_NEXT_PAGE))) {
2355                                 ap->state = ANEG_STATE_IDLE_DETECT_INIT;
2356                         } else {
2357                                 if ((ap->txconfig & ANEG_CFG_NP) == 0 &&
2358                                     !(ap->flags & MR_NP_RX)) {
2359                                         ap->state = ANEG_STATE_IDLE_DETECT_INIT;
2360                                 } else {
2361                                         ret = ANEG_FAILED;
2362                                 }
2363                         }
2364                 }
2365                 break;
2366
2367         case ANEG_STATE_IDLE_DETECT_INIT:
2368                 ap->link_time = ap->cur_time;
2369                 tp->mac_mode &= ~MAC_MODE_SEND_CONFIGS;
2370                 tw32_f(MAC_MODE, tp->mac_mode);
2371                 udelay(40);
2372
2373                 ap->state = ANEG_STATE_IDLE_DETECT;
2374                 ret = ANEG_TIMER_ENAB;
2375                 break;
2376
2377         case ANEG_STATE_IDLE_DETECT:
2378                 if (ap->ability_match != 0 &&
2379                     ap->rxconfig == 0) {
2380                         ap->state = ANEG_STATE_AN_ENABLE;
2381                         break;
2382                 }
2383                 delta = ap->cur_time - ap->link_time;
2384                 if (delta > ANEG_STATE_SETTLE_TIME) {
2385                         /* XXX another gem from the Broadcom driver :( */
2386                         ap->state = ANEG_STATE_LINK_OK;
2387                 }
2388                 break;
2389
2390         case ANEG_STATE_LINK_OK:
2391                 ap->flags |= (MR_AN_COMPLETE | MR_LINK_OK);
2392                 ret = ANEG_DONE;
2393                 break;
2394
2395         case ANEG_STATE_NEXT_PAGE_WAIT_INIT:
2396                 /* ??? unimplemented */
2397                 break;
2398
2399         case ANEG_STATE_NEXT_PAGE_WAIT:
2400                 /* ??? unimplemented */
2401                 break;
2402
2403         default:
2404                 ret = ANEG_FAILED;
2405                 break;
2406         };
2407
2408         return ret;
2409 }
2410
2411 static int fiber_autoneg(struct tg3 *tp, u32 *flags)
2412 {
2413         int res = 0;
2414         struct tg3_fiber_aneginfo aninfo;
2415         int status = ANEG_FAILED;
2416         unsigned int tick;
2417         u32 tmp;
2418
2419         tw32_f(MAC_TX_AUTO_NEG, 0);
2420
2421         tmp = tp->mac_mode & ~MAC_MODE_PORT_MODE_MASK;
2422         tw32_f(MAC_MODE, tmp | MAC_MODE_PORT_MODE_GMII);
2423         udelay(40);
2424
2425         tw32_f(MAC_MODE, tp->mac_mode | MAC_MODE_SEND_CONFIGS);
2426         udelay(40);
2427
2428         memset(&aninfo, 0, sizeof(aninfo));
2429         aninfo.flags |= MR_AN_ENABLE;
2430         aninfo.state = ANEG_STATE_UNKNOWN;
2431         aninfo.cur_time = 0;
2432         tick = 0;
2433         while (++tick < 195000) {
2434                 status = tg3_fiber_aneg_smachine(tp, &aninfo);
2435                 if (status == ANEG_DONE || status == ANEG_FAILED)
2436                         break;
2437
2438                 udelay(1);
2439         }
2440
2441         tp->mac_mode &= ~MAC_MODE_SEND_CONFIGS;
2442         tw32_f(MAC_MODE, tp->mac_mode);
2443         udelay(40);
2444
2445         *flags = aninfo.flags;
2446
2447         if (status == ANEG_DONE &&
2448             (aninfo.flags & (MR_AN_COMPLETE | MR_LINK_OK |
2449                              MR_LP_ADV_FULL_DUPLEX)))
2450                 res = 1;
2451
2452         return res;
2453 }
2454
2455 static void tg3_init_bcm8002(struct tg3 *tp)
2456 {
2457         u32 mac_status = tr32(MAC_STATUS);
2458         int i;
2459
2460         /* Reset when initting first time or we have a link. */
2461         if ((tp->tg3_flags & TG3_FLAG_INIT_COMPLETE) &&
2462             !(mac_status & MAC_STATUS_PCS_SYNCED))
2463                 return;
2464
2465         /* Set PLL lock range. */
2466         tg3_writephy(tp, 0x16, 0x8007);
2467
2468         /* SW reset */
2469         tg3_writephy(tp, MII_BMCR, BMCR_RESET);
2470
2471         /* Wait for reset to complete. */
2472         /* XXX schedule_timeout() ... */
2473         for (i = 0; i < 500; i++)
2474                 udelay(10);
2475
2476         /* Config mode; select PMA/Ch 1 regs. */
2477         tg3_writephy(tp, 0x10, 0x8411);
2478
2479         /* Enable auto-lock and comdet, select txclk for tx. */
2480         tg3_writephy(tp, 0x11, 0x0a10);
2481
2482         tg3_writephy(tp, 0x18, 0x00a0);
2483         tg3_writephy(tp, 0x16, 0x41ff);
2484
2485         /* Assert and deassert POR. */
2486         tg3_writephy(tp, 0x13, 0x0400);
2487         udelay(40);
2488         tg3_writephy(tp, 0x13, 0x0000);
2489
2490         tg3_writephy(tp, 0x11, 0x0a50);
2491         udelay(40);
2492         tg3_writephy(tp, 0x11, 0x0a10);
2493
2494         /* Wait for signal to stabilize */
2495         /* XXX schedule_timeout() ... */
2496         for (i = 0; i < 15000; i++)
2497                 udelay(10);
2498
2499         /* Deselect the channel register so we can read the PHYID
2500          * later.
2501          */
2502         tg3_writephy(tp, 0x10, 0x8011);
2503 }
2504
2505 static int tg3_setup_fiber_hw_autoneg(struct tg3 *tp, u32 mac_status)
2506 {
2507         u32 sg_dig_ctrl, sg_dig_status;
2508         u32 serdes_cfg, expected_sg_dig_ctrl;
2509         int workaround, port_a;
2510         int current_link_up;
2511
2512         serdes_cfg = 0;
2513         expected_sg_dig_ctrl = 0;
2514         workaround = 0;
2515         port_a = 1;
2516         current_link_up = 0;
2517
2518         if (tp->pci_chip_rev_id != CHIPREV_ID_5704_A0 &&
2519             tp->pci_chip_rev_id != CHIPREV_ID_5704_A1) {
2520                 workaround = 1;
2521                 if (tr32(TG3PCI_DUAL_MAC_CTRL) & DUAL_MAC_CTRL_ID)
2522                         port_a = 0;
2523
2524                 /* preserve bits 0-11,13,14 for signal pre-emphasis */
2525                 /* preserve bits 20-23 for voltage regulator */
2526                 serdes_cfg = tr32(MAC_SERDES_CFG) & 0x00f06fff;
2527         }
2528
2529         sg_dig_ctrl = tr32(SG_DIG_CTRL);
2530
2531         if (tp->link_config.autoneg != AUTONEG_ENABLE) {
2532                 if (sg_dig_ctrl & (1 << 31)) {
2533                         if (workaround) {
2534                                 u32 val = serdes_cfg;
2535
2536                                 if (port_a)
2537                                         val |= 0xc010000;
2538                                 else
2539                                         val |= 0x4010000;
2540                                 tw32_f(MAC_SERDES_CFG, val);
2541                         }
2542                         tw32_f(SG_DIG_CTRL, 0x01388400);
2543                 }
2544                 if (mac_status & MAC_STATUS_PCS_SYNCED) {
2545                         tg3_setup_flow_control(tp, 0, 0);
2546                         current_link_up = 1;
2547                 }
2548                 goto out;
2549         }
2550
2551         /* Want auto-negotiation.  */
2552         expected_sg_dig_ctrl = 0x81388400;
2553
2554         /* Pause capability */
2555         expected_sg_dig_ctrl |= (1 << 11);
2556
2557         /* Asymettric pause */
2558         expected_sg_dig_ctrl |= (1 << 12);
2559
2560         if (sg_dig_ctrl != expected_sg_dig_ctrl) {
2561                 if ((tp->tg3_flags2 & TG3_FLG2_PARALLEL_DETECT) &&
2562                     tp->serdes_counter &&
2563                     ((mac_status & (MAC_STATUS_PCS_SYNCED |
2564                                     MAC_STATUS_RCVD_CFG)) ==
2565                      MAC_STATUS_PCS_SYNCED)) {
2566                         tp->serdes_counter--;
2567                         current_link_up = 1;
2568                         goto out;
2569                 }
2570 restart_autoneg:
2571                 if (workaround)
2572                         tw32_f(MAC_SERDES_CFG, serdes_cfg | 0xc011000);
2573                 tw32_f(SG_DIG_CTRL, expected_sg_dig_ctrl | (1 << 30));
2574                 udelay(5);
2575                 tw32_f(SG_DIG_CTRL, expected_sg_dig_ctrl);
2576
2577                 tp->serdes_counter = SERDES_AN_TIMEOUT_5704S;
2578                 tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
2579         } else if (mac_status & (MAC_STATUS_PCS_SYNCED |
2580                                  MAC_STATUS_SIGNAL_DET)) {
2581                 sg_dig_status = tr32(SG_DIG_STATUS);
2582                 mac_status = tr32(MAC_STATUS);
2583
2584                 if ((sg_dig_status & (1 << 1)) &&
2585                     (mac_status & MAC_STATUS_PCS_SYNCED)) {
2586                         u32 local_adv, remote_adv;
2587
2588                         local_adv = ADVERTISE_PAUSE_CAP;
2589                         remote_adv = 0;
2590                         if (sg_dig_status & (1 << 19))
2591                                 remote_adv |= LPA_PAUSE_CAP;
2592                         if (sg_dig_status & (1 << 20))
2593                                 remote_adv |= LPA_PAUSE_ASYM;
2594
2595                         tg3_setup_flow_control(tp, local_adv, remote_adv);
2596                         current_link_up = 1;
2597                         tp->serdes_counter = 0;
2598                         tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
2599                 } else if (!(sg_dig_status & (1 << 1))) {
2600                         if (tp->serdes_counter)
2601                                 tp->serdes_counter--;
2602                         else {
2603                                 if (workaround) {
2604                                         u32 val = serdes_cfg;
2605
2606                                         if (port_a)
2607                                                 val |= 0xc010000;
2608                                         else
2609                                                 val |= 0x4010000;
2610
2611                                         tw32_f(MAC_SERDES_CFG, val);
2612                                 }
2613
2614                                 tw32_f(SG_DIG_CTRL, 0x01388400);
2615                                 udelay(40);
2616
2617                                 /* Link parallel detection - link is up */
2618                                 /* only if we have PCS_SYNC and not */
2619                                 /* receiving config code words */
2620                                 mac_status = tr32(MAC_STATUS);
2621                                 if ((mac_status & MAC_STATUS_PCS_SYNCED) &&
2622                                     !(mac_status & MAC_STATUS_RCVD_CFG)) {
2623                                         tg3_setup_flow_control(tp, 0, 0);
2624                                         current_link_up = 1;
2625                                         tp->tg3_flags2 |=
2626                                                 TG3_FLG2_PARALLEL_DETECT;
2627                                         tp->serdes_counter =
2628                                                 SERDES_PARALLEL_DET_TIMEOUT;
2629                                 } else
2630                                         goto restart_autoneg;
2631                         }
2632                 }
2633         } else {
2634                 tp->serdes_counter = SERDES_AN_TIMEOUT_5704S;
2635                 tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
2636         }
2637
2638 out:
2639         return current_link_up;
2640 }
2641
2642 static int tg3_setup_fiber_by_hand(struct tg3 *tp, u32 mac_status)
2643 {
2644         int current_link_up = 0;
2645
2646         if (!(mac_status & MAC_STATUS_PCS_SYNCED))
2647                 goto out;
2648
2649         if (tp->link_config.autoneg == AUTONEG_ENABLE) {
2650                 u32 flags;
2651                 int i;
2652
2653                 if (fiber_autoneg(tp, &flags)) {
2654                         u32 local_adv, remote_adv;
2655
2656                         local_adv = ADVERTISE_PAUSE_CAP;
2657                         remote_adv = 0;
2658                         if (flags & MR_LP_ADV_SYM_PAUSE)
2659                                 remote_adv |= LPA_PAUSE_CAP;
2660                         if (flags & MR_LP_ADV_ASYM_PAUSE)
2661                                 remote_adv |= LPA_PAUSE_ASYM;
2662
2663                         tg3_setup_flow_control(tp, local_adv, remote_adv);
2664
2665                         current_link_up = 1;
2666                 }
2667                 for (i = 0; i < 30; i++) {
2668                         udelay(20);
2669                         tw32_f(MAC_STATUS,
2670                                (MAC_STATUS_SYNC_CHANGED |
2671                                 MAC_STATUS_CFG_CHANGED));
2672                         udelay(40);
2673                         if ((tr32(MAC_STATUS) &
2674                              (MAC_STATUS_SYNC_CHANGED |
2675                               MAC_STATUS_CFG_CHANGED)) == 0)
2676                                 break;
2677                 }
2678
2679                 mac_status = tr32(MAC_STATUS);
2680                 if (current_link_up == 0 &&
2681                     (mac_status & MAC_STATUS_PCS_SYNCED) &&
2682                     !(mac_status & MAC_STATUS_RCVD_CFG))
2683                         current_link_up = 1;
2684         } else {
2685                 /* Forcing 1000FD link up. */
2686                 current_link_up = 1;
2687
2688                 tw32_f(MAC_MODE, (tp->mac_mode | MAC_MODE_SEND_CONFIGS));
2689                 udelay(40);
2690
2691                 tw32_f(MAC_MODE, tp->mac_mode);
2692                 udelay(40);
2693         }
2694
2695 out:
2696         return current_link_up;
2697 }
2698
2699 static int tg3_setup_fiber_phy(struct tg3 *tp, int force_reset)
2700 {
2701         u32 orig_pause_cfg;
2702         u16 orig_active_speed;
2703         u8 orig_active_duplex;
2704         u32 mac_status;
2705         int current_link_up;
2706         int i;
2707
2708         orig_pause_cfg =
2709                 (tp->tg3_flags & (TG3_FLAG_RX_PAUSE |
2710                                   TG3_FLAG_TX_PAUSE));
2711         orig_active_speed = tp->link_config.active_speed;
2712         orig_active_duplex = tp->link_config.active_duplex;
2713
2714         if (!(tp->tg3_flags2 & TG3_FLG2_HW_AUTONEG) &&
2715             netif_carrier_ok(tp->dev) &&
2716             (tp->tg3_flags & TG3_FLAG_INIT_COMPLETE)) {
2717                 mac_status = tr32(MAC_STATUS);
2718                 mac_status &= (MAC_STATUS_PCS_SYNCED |
2719                                MAC_STATUS_SIGNAL_DET |
2720                                MAC_STATUS_CFG_CHANGED |
2721                                MAC_STATUS_RCVD_CFG);
2722                 if (mac_status == (MAC_STATUS_PCS_SYNCED |
2723                                    MAC_STATUS_SIGNAL_DET)) {
2724                         tw32_f(MAC_STATUS, (MAC_STATUS_SYNC_CHANGED |
2725                                             MAC_STATUS_CFG_CHANGED));
2726                         return 0;
2727                 }
2728         }
2729
2730         tw32_f(MAC_TX_AUTO_NEG, 0);
2731
2732         tp->mac_mode &= ~(MAC_MODE_PORT_MODE_MASK | MAC_MODE_HALF_DUPLEX);
2733         tp->mac_mode |= MAC_MODE_PORT_MODE_TBI;
2734         tw32_f(MAC_MODE, tp->mac_mode);
2735         udelay(40);
2736
2737         if (tp->phy_id == PHY_ID_BCM8002)
2738                 tg3_init_bcm8002(tp);
2739
2740         /* Enable link change event even when serdes polling.  */
2741         tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
2742         udelay(40);
2743
2744         current_link_up = 0;
2745         mac_status = tr32(MAC_STATUS);
2746
2747         if (tp->tg3_flags2 & TG3_FLG2_HW_AUTONEG)
2748                 current_link_up = tg3_setup_fiber_hw_autoneg(tp, mac_status);
2749         else
2750                 current_link_up = tg3_setup_fiber_by_hand(tp, mac_status);
2751
2752         tp->hw_status->status =
2753                 (SD_STATUS_UPDATED |
2754                  (tp->hw_status->status & ~SD_STATUS_LINK_CHG));
2755
2756         for (i = 0; i < 100; i++) {
2757                 tw32_f(MAC_STATUS, (MAC_STATUS_SYNC_CHANGED |
2758                                     MAC_STATUS_CFG_CHANGED));
2759                 udelay(5);
2760                 if ((tr32(MAC_STATUS) & (MAC_STATUS_SYNC_CHANGED |
2761                                          MAC_STATUS_CFG_CHANGED |
2762                                          MAC_STATUS_LNKSTATE_CHANGED)) == 0)
2763                         break;
2764         }
2765
2766         mac_status = tr32(MAC_STATUS);
2767         if ((mac_status & MAC_STATUS_PCS_SYNCED) == 0) {
2768                 current_link_up = 0;
2769                 if (tp->link_config.autoneg == AUTONEG_ENABLE &&
2770                     tp->serdes_counter == 0) {
2771                         tw32_f(MAC_MODE, (tp->mac_mode |
2772                                           MAC_MODE_SEND_CONFIGS));
2773                         udelay(1);
2774                         tw32_f(MAC_MODE, tp->mac_mode);
2775                 }
2776         }
2777
2778         if (current_link_up == 1) {
2779                 tp->link_config.active_speed = SPEED_1000;
2780                 tp->link_config.active_duplex = DUPLEX_FULL;
2781                 tw32(MAC_LED_CTRL, (tp->led_ctrl |
2782                                     LED_CTRL_LNKLED_OVERRIDE |
2783                                     LED_CTRL_1000MBPS_ON));
2784         } else {
2785                 tp->link_config.active_speed = SPEED_INVALID;
2786                 tp->link_config.active_duplex = DUPLEX_INVALID;
2787                 tw32(MAC_LED_CTRL, (tp->led_ctrl |
2788                                     LED_CTRL_LNKLED_OVERRIDE |
2789                                     LED_CTRL_TRAFFIC_OVERRIDE));
2790         }
2791
2792         if (current_link_up != netif_carrier_ok(tp->dev)) {
2793                 if (current_link_up)
2794                         netif_carrier_on(tp->dev);
2795                 else
2796                         netif_carrier_off(tp->dev);
2797                 tg3_link_report(tp);
2798         } else {
2799                 u32 now_pause_cfg =
2800                         tp->tg3_flags & (TG3_FLAG_RX_PAUSE |
2801                                          TG3_FLAG_TX_PAUSE);
2802                 if (orig_pause_cfg != now_pause_cfg ||
2803                     orig_active_speed != tp->link_config.active_speed ||
2804                     orig_active_duplex != tp->link_config.active_duplex)
2805                         tg3_link_report(tp);
2806         }
2807
2808         return 0;
2809 }
2810
2811 static int tg3_setup_fiber_mii_phy(struct tg3 *tp, int force_reset)
2812 {
2813         int current_link_up, err = 0;
2814         u32 bmsr, bmcr;
2815         u16 current_speed;
2816         u8 current_duplex;
2817
2818         tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
2819         tw32_f(MAC_MODE, tp->mac_mode);
2820         udelay(40);
2821
2822         tw32(MAC_EVENT, 0);
2823
2824         tw32_f(MAC_STATUS,
2825              (MAC_STATUS_SYNC_CHANGED |
2826               MAC_STATUS_CFG_CHANGED |
2827               MAC_STATUS_MI_COMPLETION |
2828               MAC_STATUS_LNKSTATE_CHANGED));
2829         udelay(40);
2830
2831         if (force_reset)
2832                 tg3_phy_reset(tp);
2833
2834         current_link_up = 0;
2835         current_speed = SPEED_INVALID;
2836         current_duplex = DUPLEX_INVALID;
2837
2838         err |= tg3_readphy(tp, MII_BMSR, &bmsr);
2839         err |= tg3_readphy(tp, MII_BMSR, &bmsr);
2840         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) {
2841                 if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
2842                         bmsr |= BMSR_LSTATUS;
2843                 else
2844                         bmsr &= ~BMSR_LSTATUS;
2845         }
2846
2847         err |= tg3_readphy(tp, MII_BMCR, &bmcr);
2848
2849         if ((tp->link_config.autoneg == AUTONEG_ENABLE) && !force_reset &&
2850             (tp->tg3_flags2 & TG3_FLG2_PARALLEL_DETECT)) {
2851                 /* do nothing, just check for link up at the end */
2852         } else if (tp->link_config.autoneg == AUTONEG_ENABLE) {
2853                 u32 adv, new_adv;
2854
2855                 err |= tg3_readphy(tp, MII_ADVERTISE, &adv);
2856                 new_adv = adv & ~(ADVERTISE_1000XFULL | ADVERTISE_1000XHALF |
2857                                   ADVERTISE_1000XPAUSE |
2858                                   ADVERTISE_1000XPSE_ASYM |
2859                                   ADVERTISE_SLCT);
2860
2861                 /* Always advertise symmetric PAUSE just like copper */
2862                 new_adv |= ADVERTISE_1000XPAUSE;
2863
2864                 if (tp->link_config.advertising & ADVERTISED_1000baseT_Half)
2865                         new_adv |= ADVERTISE_1000XHALF;
2866                 if (tp->link_config.advertising & ADVERTISED_1000baseT_Full)
2867                         new_adv |= ADVERTISE_1000XFULL;
2868
2869                 if ((new_adv != adv) || !(bmcr & BMCR_ANENABLE)) {
2870                         tg3_writephy(tp, MII_ADVERTISE, new_adv);
2871                         bmcr |= BMCR_ANENABLE | BMCR_ANRESTART;
2872                         tg3_writephy(tp, MII_BMCR, bmcr);
2873
2874                         tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
2875                         tp->serdes_counter = SERDES_AN_TIMEOUT_5714S;
2876                         tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
2877
2878                         return err;
2879                 }
2880         } else {
2881                 u32 new_bmcr;
2882
2883                 bmcr &= ~BMCR_SPEED1000;
2884                 new_bmcr = bmcr & ~(BMCR_ANENABLE | BMCR_FULLDPLX);
2885
2886                 if (tp->link_config.duplex == DUPLEX_FULL)
2887                         new_bmcr |= BMCR_FULLDPLX;
2888
2889                 if (new_bmcr != bmcr) {
2890                         /* BMCR_SPEED1000 is a reserved bit that needs
2891                          * to be set on write.
2892                          */
2893                         new_bmcr |= BMCR_SPEED1000;
2894
2895                         /* Force a linkdown */
2896                         if (netif_carrier_ok(tp->dev)) {
2897                                 u32 adv;
2898
2899                                 err |= tg3_readphy(tp, MII_ADVERTISE, &adv);
2900                                 adv &= ~(ADVERTISE_1000XFULL |
2901                                          ADVERTISE_1000XHALF |
2902                                          ADVERTISE_SLCT);
2903                                 tg3_writephy(tp, MII_ADVERTISE, adv);
2904                                 tg3_writephy(tp, MII_BMCR, bmcr |
2905                                                            BMCR_ANRESTART |
2906                                                            BMCR_ANENABLE);
2907                                 udelay(10);
2908                                 netif_carrier_off(tp->dev);
2909                         }
2910                         tg3_writephy(tp, MII_BMCR, new_bmcr);
2911                         bmcr = new_bmcr;
2912                         err |= tg3_readphy(tp, MII_BMSR, &bmsr);
2913                         err |= tg3_readphy(tp, MII_BMSR, &bmsr);
2914                         if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
2915                             ASIC_REV_5714) {
2916                                 if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
2917                                         bmsr |= BMSR_LSTATUS;
2918                                 else
2919                                         bmsr &= ~BMSR_LSTATUS;
2920                         }
2921                         tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
2922                 }
2923         }
2924
2925         if (bmsr & BMSR_LSTATUS) {
2926                 current_speed = SPEED_1000;
2927                 current_link_up = 1;
2928                 if (bmcr & BMCR_FULLDPLX)
2929                         current_duplex = DUPLEX_FULL;
2930                 else
2931                         current_duplex = DUPLEX_HALF;
2932
2933                 if (bmcr & BMCR_ANENABLE) {
2934                         u32 local_adv, remote_adv, common;
2935
2936                         err |= tg3_readphy(tp, MII_ADVERTISE, &local_adv);
2937                         err |= tg3_readphy(tp, MII_LPA, &remote_adv);
2938                         common = local_adv & remote_adv;
2939                         if (common & (ADVERTISE_1000XHALF |
2940                                       ADVERTISE_1000XFULL)) {
2941                                 if (common & ADVERTISE_1000XFULL)
2942                                         current_duplex = DUPLEX_FULL;
2943                                 else
2944                                         current_duplex = DUPLEX_HALF;
2945
2946                                 tg3_setup_flow_control(tp, local_adv,
2947                                                        remote_adv);
2948                         }
2949                         else
2950                                 current_link_up = 0;
2951                 }
2952         }
2953
2954         tp->mac_mode &= ~MAC_MODE_HALF_DUPLEX;
2955         if (tp->link_config.active_duplex == DUPLEX_HALF)
2956                 tp->mac_mode |= MAC_MODE_HALF_DUPLEX;
2957
2958         tw32_f(MAC_MODE, tp->mac_mode);
2959         udelay(40);
2960
2961         tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
2962
2963         tp->link_config.active_speed = current_speed;
2964         tp->link_config.active_duplex = current_duplex;
2965
2966         if (current_link_up != netif_carrier_ok(tp->dev)) {
2967                 if (current_link_up)
2968                         netif_carrier_on(tp->dev);
2969                 else {
2970                         netif_carrier_off(tp->dev);
2971                         tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
2972                 }
2973                 tg3_link_report(tp);
2974         }
2975         return err;
2976 }
2977
2978 static void tg3_serdes_parallel_detect(struct tg3 *tp)
2979 {
2980         if (tp->serdes_counter) {
2981                 /* Give autoneg time to complete. */
2982                 tp->serdes_counter--;
2983                 return;
2984         }
2985         if (!netif_carrier_ok(tp->dev) &&
2986             (tp->link_config.autoneg == AUTONEG_ENABLE)) {
2987                 u32 bmcr;
2988
2989                 tg3_readphy(tp, MII_BMCR, &bmcr);
2990                 if (bmcr & BMCR_ANENABLE) {
2991                         u32 phy1, phy2;
2992
2993                         /* Select shadow register 0x1f */
2994                         tg3_writephy(tp, 0x1c, 0x7c00);
2995                         tg3_readphy(tp, 0x1c, &phy1);
2996
2997                         /* Select expansion interrupt status register */
2998                         tg3_writephy(tp, 0x17, 0x0f01);
2999                         tg3_readphy(tp, 0x15, &phy2);
3000                         tg3_readphy(tp, 0x15, &phy2);
3001
3002                         if ((phy1 & 0x10) && !(phy2 & 0x20)) {
3003                                 /* We have signal detect and not receiving
3004                                  * config code words, link is up by parallel
3005                                  * detection.
3006                                  */
3007
3008                                 bmcr &= ~BMCR_ANENABLE;
3009                                 bmcr |= BMCR_SPEED1000 | BMCR_FULLDPLX;
3010                                 tg3_writephy(tp, MII_BMCR, bmcr);
3011                                 tp->tg3_flags2 |= TG3_FLG2_PARALLEL_DETECT;
3012                         }
3013                 }
3014         }
3015         else if (netif_carrier_ok(tp->dev) &&
3016                  (tp->link_config.autoneg == AUTONEG_ENABLE) &&
3017                  (tp->tg3_flags2 & TG3_FLG2_PARALLEL_DETECT)) {
3018                 u32 phy2;
3019
3020                 /* Select expansion interrupt status register */
3021                 tg3_writephy(tp, 0x17, 0x0f01);
3022                 tg3_readphy(tp, 0x15, &phy2);
3023                 if (phy2 & 0x20) {
3024                         u32 bmcr;
3025
3026                         /* Config code words received, turn on autoneg. */
3027                         tg3_readphy(tp, MII_BMCR, &bmcr);
3028                         tg3_writephy(tp, MII_BMCR, bmcr | BMCR_ANENABLE);
3029
3030                         tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
3031
3032                 }
3033         }
3034 }
3035
3036 static int tg3_setup_phy(struct tg3 *tp, int force_reset)
3037 {
3038         int err;
3039
3040         if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) {
3041                 err = tg3_setup_fiber_phy(tp, force_reset);
3042         } else if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES) {
3043                 err = tg3_setup_fiber_mii_phy(tp, force_reset);
3044         } else {
3045                 err = tg3_setup_copper_phy(tp, force_reset);
3046         }
3047
3048         if (tp->link_config.active_speed == SPEED_1000 &&
3049             tp->link_config.active_duplex == DUPLEX_HALF)
3050                 tw32(MAC_TX_LENGTHS,
3051                      ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
3052                       (6 << TX_LENGTHS_IPG_SHIFT) |
3053                       (0xff << TX_LENGTHS_SLOT_TIME_SHIFT)));
3054         else
3055                 tw32(MAC_TX_LENGTHS,
3056                      ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
3057                       (6 << TX_LENGTHS_IPG_SHIFT) |
3058                       (32 << TX_LENGTHS_SLOT_TIME_SHIFT)));
3059
3060         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
3061                 if (netif_carrier_ok(tp->dev)) {
3062                         tw32(HOSTCC_STAT_COAL_TICKS,
3063                              tp->coal.stats_block_coalesce_usecs);
3064                 } else {
3065                         tw32(HOSTCC_STAT_COAL_TICKS, 0);
3066                 }
3067         }
3068
3069         if (tp->tg3_flags & TG3_FLAG_ASPM_WORKAROUND) {
3070                 u32 val = tr32(PCIE_PWR_MGMT_THRESH);
3071                 if (!netif_carrier_ok(tp->dev))
3072                         val = (val & ~PCIE_PWR_MGMT_L1_THRESH_MSK) |
3073                               tp->pwrmgmt_thresh;
3074                 else
3075                         val |= PCIE_PWR_MGMT_L1_THRESH_MSK;
3076                 tw32(PCIE_PWR_MGMT_THRESH, val);
3077         }
3078
3079         return err;
3080 }
3081
3082 /* This is called whenever we suspect that the system chipset is re-
3083  * ordering the sequence of MMIO to the tx send mailbox. The symptom
3084  * is bogus tx completions. We try to recover by setting the
3085  * TG3_FLAG_MBOX_WRITE_REORDER flag and resetting the chip later
3086  * in the workqueue.
3087  */
3088 static void tg3_tx_recover(struct tg3 *tp)
3089 {
3090         BUG_ON((tp->tg3_flags & TG3_FLAG_MBOX_WRITE_REORDER) ||
3091                tp->write32_tx_mbox == tg3_write_indirect_mbox);
3092
3093         printk(KERN_WARNING PFX "%s: The system may be re-ordering memory-"
3094                "mapped I/O cycles to the network device, attempting to "
3095                "recover. Please report the problem to the driver maintainer "
3096                "and include system chipset information.\n", tp->dev->name);
3097
3098         spin_lock(&tp->lock);
3099         tp->tg3_flags |= TG3_FLAG_TX_RECOVERY_PENDING;
3100         spin_unlock(&tp->lock);
3101 }
3102
3103 static inline u32 tg3_tx_avail(struct tg3 *tp)
3104 {
3105         smp_mb();
3106         return (tp->tx_pending -
3107                 ((tp->tx_prod - tp->tx_cons) & (TG3_TX_RING_SIZE - 1)));
3108 }
3109
3110 /* Tigon3 never reports partial packet sends.  So we do not
3111  * need special logic to handle SKBs that have not had all
3112  * of their frags sent yet, like SunGEM does.
3113  */
3114 static void tg3_tx(struct tg3 *tp)
3115 {
3116         u32 hw_idx = tp->hw_status->idx[0].tx_consumer;
3117         u32 sw_idx = tp->tx_cons;
3118
3119         while (sw_idx != hw_idx) {
3120                 struct tx_ring_info *ri = &tp->tx_buffers[sw_idx];
3121                 struct sk_buff *skb = ri->skb;
3122                 int i, tx_bug = 0;
3123
3124                 if (unlikely(skb == NULL)) {
3125                         tg3_tx_recover(tp);
3126                         return;
3127                 }
3128
3129                 pci_unmap_single(tp->pdev,
3130                                  pci_unmap_addr(ri, mapping),
3131                                  skb_headlen(skb),
3132                                  PCI_DMA_TODEVICE);
3133
3134                 ri->skb = NULL;
3135
3136                 sw_idx = NEXT_TX(sw_idx);
3137
3138                 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
3139                         ri = &tp->tx_buffers[sw_idx];
3140                         if (unlikely(ri->skb != NULL || sw_idx == hw_idx))
3141                                 tx_bug = 1;
3142
3143                         pci_unmap_page(tp->pdev,
3144                                        pci_unmap_addr(ri, mapping),
3145                                        skb_shinfo(skb)->frags[i].size,
3146                                        PCI_DMA_TODEVICE);
3147
3148                         sw_idx = NEXT_TX(sw_idx);
3149                 }
3150
3151                 dev_kfree_skb(skb);
3152
3153                 if (unlikely(tx_bug)) {
3154                         tg3_tx_recover(tp);
3155                         return;
3156                 }
3157         }
3158
3159         tp->tx_cons = sw_idx;
3160
3161         /* Need to make the tx_cons update visible to tg3_start_xmit()
3162          * before checking for netif_queue_stopped().  Without the
3163          * memory barrier, there is a small possibility that tg3_start_xmit()
3164          * will miss it and cause the queue to be stopped forever.
3165          */
3166         smp_mb();
3167
3168         if (unlikely(netif_queue_stopped(tp->dev) &&
3169                      (tg3_tx_avail(tp) > TG3_TX_WAKEUP_THRESH(tp)))) {
3170                 netif_tx_lock(tp->dev);
3171                 if (netif_queue_stopped(tp->dev) &&
3172                     (tg3_tx_avail(tp) > TG3_TX_WAKEUP_THRESH(tp)))
3173                         netif_wake_queue(tp->dev);
3174                 netif_tx_unlock(tp->dev);
3175         }
3176 }
3177
3178 /* Returns size of skb allocated or < 0 on error.
3179  *
3180  * We only need to fill in the address because the other members
3181  * of the RX descriptor are invariant, see tg3_init_rings.
3182  *
3183  * Note the purposeful assymetry of cpu vs. chip accesses.  For
3184  * posting buffers we only dirty the first cache line of the RX
3185  * descriptor (containing the address).  Whereas for the RX status
3186  * buffers the cpu only reads the last cacheline of the RX descriptor
3187  * (to fetch the error flags, vlan tag, checksum, and opaque cookie).
3188  */
3189 static int tg3_alloc_rx_skb(struct tg3 *tp, u32 opaque_key,
3190                             int src_idx, u32 dest_idx_unmasked)
3191 {
3192         struct tg3_rx_buffer_desc *desc;
3193         struct ring_info *map, *src_map;
3194         struct sk_buff *skb;
3195         dma_addr_t mapping;
3196         int skb_size, dest_idx;
3197
3198         src_map = NULL;
3199         switch (opaque_key) {
3200         case RXD_OPAQUE_RING_STD:
3201                 dest_idx = dest_idx_unmasked % TG3_RX_RING_SIZE;
3202                 desc = &tp->rx_std[dest_idx];
3203                 map = &tp->rx_std_buffers[dest_idx];
3204                 if (src_idx >= 0)
3205                         src_map = &tp->rx_std_buffers[src_idx];
3206                 skb_size = tp->rx_pkt_buf_sz;
3207                 break;
3208
3209         case RXD_OPAQUE_RING_JUMBO:
3210                 dest_idx = dest_idx_unmasked % TG3_RX_JUMBO_RING_SIZE;
3211                 desc = &tp->rx_jumbo[dest_idx];
3212                 map = &tp->rx_jumbo_buffers[dest_idx];
3213                 if (src_idx >= 0)
3214                         src_map = &tp->rx_jumbo_buffers[src_idx];
3215                 skb_size = RX_JUMBO_PKT_BUF_SZ;
3216                 break;
3217
3218         default:
3219                 return -EINVAL;
3220         };
3221
3222         /* Do not overwrite any of the map or rp information
3223          * until we are sure we can commit to a new buffer.
3224          *
3225          * Callers depend upon this behavior and assume that
3226          * we leave everything unchanged if we fail.
3227          */
3228         skb = netdev_alloc_skb(tp->dev, skb_size);
3229         if (skb == NULL)
3230                 return -ENOMEM;
3231
3232         skb_reserve(skb, tp->rx_offset);
3233
3234         mapping = pci_map_single(tp->pdev, skb->data,
3235                                  skb_size - tp->rx_offset,
3236                                  PCI_DMA_FROMDEVICE);
3237
3238         map->skb = skb;
3239         pci_unmap_addr_set(map, mapping, mapping);
3240
3241         if (src_map != NULL)
3242                 src_map->skb = NULL;
3243
3244         desc->addr_hi = ((u64)mapping >> 32);
3245         desc->addr_lo = ((u64)mapping & 0xffffffff);
3246
3247         return skb_size;
3248 }
3249
3250 /* We only need to move over in the address because the other
3251  * members of the RX descriptor are invariant.  See notes above
3252  * tg3_alloc_rx_skb for full details.
3253  */
3254 static void tg3_recycle_rx(struct tg3 *tp, u32 opaque_key,
3255                            int src_idx, u32 dest_idx_unmasked)
3256 {
3257         struct tg3_rx_buffer_desc *src_desc, *dest_desc;
3258         struct ring_info *src_map, *dest_map;
3259         int dest_idx;
3260
3261         switch (opaque_key) {
3262         case RXD_OPAQUE_RING_STD:
3263                 dest_idx = dest_idx_unmasked % TG3_RX_RING_SIZE;
3264                 dest_desc = &tp->rx_std[dest_idx];
3265                 dest_map = &tp->rx_std_buffers[dest_idx];
3266                 src_desc = &tp->rx_std[src_idx];
3267                 src_map = &tp->rx_std_buffers[src_idx];
3268                 break;
3269
3270         case RXD_OPAQUE_RING_JUMBO:
3271                 dest_idx = dest_idx_unmasked % TG3_RX_JUMBO_RING_SIZE;
3272                 dest_desc = &tp->rx_jumbo[dest_idx];
3273                 dest_map = &tp->rx_jumbo_buffers[dest_idx];
3274                 src_desc = &tp->rx_jumbo[src_idx];
3275                 src_map = &tp->rx_jumbo_buffers[src_idx];
3276                 break;
3277
3278         default:
3279                 return;
3280         };
3281
3282         dest_map->skb = src_map->skb;
3283         pci_unmap_addr_set(dest_map, mapping,
3284                            pci_unmap_addr(src_map, mapping));
3285         dest_desc->addr_hi = src_desc->addr_hi;
3286         dest_desc->addr_lo = src_desc->addr_lo;
3287
3288         src_map->skb = NULL;
3289 }
3290
3291 #if TG3_VLAN_TAG_USED
3292 static int tg3_vlan_rx(struct tg3 *tp, struct sk_buff *skb, u16 vlan_tag)
3293 {
3294         return vlan_hwaccel_receive_skb(skb, tp->vlgrp, vlan_tag);
3295 }
3296 #endif
3297
3298 /* The RX ring scheme is composed of multiple rings which post fresh
3299  * buffers to the chip, and one special ring the chip uses to report
3300  * status back to the host.
3301  *
3302  * The special ring reports the status of received packets to the
3303  * host.  The chip does not write into the original descriptor the
3304  * RX buffer was obtained from.  The chip simply takes the original
3305  * descriptor as provided by the host, updates the status and length
3306  * field, then writes this into the next status ring entry.
3307  *
3308  * Each ring the host uses to post buffers to the chip is described
3309  * by a TG3_BDINFO entry in the chips SRAM area.  When a packet arrives,
3310  * it is first placed into the on-chip ram.  When the packet's length
3311  * is known, it walks down the TG3_BDINFO entries to select the ring.
3312  * Each TG3_BDINFO specifies a MAXLEN field and the first TG3_BDINFO
3313  * which is within the range of the new packet's length is chosen.
3314  *
3315  * The "separate ring for rx status" scheme may sound queer, but it makes
3316  * sense from a cache coherency perspective.  If only the host writes
3317  * to the buffer post rings, and only the chip writes to the rx status
3318  * rings, then cache lines never move beyond shared-modified state.
3319  * If both the host and chip were to write into the same ring, cache line
3320  * eviction could occur since both entities want it in an exclusive state.
3321  */
3322 static int tg3_rx(struct tg3 *tp, int budget)
3323 {
3324         u32 work_mask, rx_std_posted = 0;
3325         u32 sw_idx = tp->rx_rcb_ptr;
3326         u16 hw_idx;
3327         int received;
3328
3329         hw_idx = tp->hw_status->idx[0].rx_producer;
3330         /*
3331          * We need to order the read of hw_idx and the read of
3332          * the opaque cookie.
3333          */
3334         rmb();
3335         work_mask = 0;
3336         received = 0;
3337         while (sw_idx != hw_idx && budget > 0) {
3338                 struct tg3_rx_buffer_desc *desc = &tp->rx_rcb[sw_idx];
3339                 unsigned int len;
3340                 struct sk_buff *skb;
3341                 dma_addr_t dma_addr;
3342                 u32 opaque_key, desc_idx, *post_ptr;
3343
3344                 desc_idx = desc->opaque & RXD_OPAQUE_INDEX_MASK;
3345                 opaque_key = desc->opaque & RXD_OPAQUE_RING_MASK;
3346                 if (opaque_key == RXD_OPAQUE_RING_STD) {
3347                         dma_addr = pci_unmap_addr(&tp->rx_std_buffers[desc_idx],
3348                                                   mapping);
3349                         skb = tp->rx_std_buffers[desc_idx].skb;
3350                         post_ptr = &tp->rx_std_ptr;
3351                         rx_std_posted++;
3352                 } else if (opaque_key == RXD_OPAQUE_RING_JUMBO) {
3353                         dma_addr = pci_unmap_addr(&tp->rx_jumbo_buffers[desc_idx],
3354                                                   mapping);
3355                         skb = tp->rx_jumbo_buffers[desc_idx].skb;
3356                         post_ptr = &tp->rx_jumbo_ptr;
3357                 }
3358                 else {
3359                         goto next_pkt_nopost;
3360                 }
3361
3362                 work_mask |= opaque_key;
3363
3364                 if ((desc->err_vlan & RXD_ERR_MASK) != 0 &&
3365                     (desc->err_vlan != RXD_ERR_ODD_NIBBLE_RCVD_MII)) {
3366                 drop_it:
3367                         tg3_recycle_rx(tp, opaque_key,
3368                                        desc_idx, *post_ptr);
3369                 drop_it_no_recycle:
3370                         /* Other statistics kept track of by card. */
3371                         tp->net_stats.rx_dropped++;
3372                         goto next_pkt;
3373                 }
3374
3375                 len = ((desc->idx_len & RXD_LEN_MASK) >> RXD_LEN_SHIFT) - 4; /* omit crc */
3376
3377                 if (len > RX_COPY_THRESHOLD
3378                         && tp->rx_offset == 2
3379                         /* rx_offset != 2 iff this is a 5701 card running
3380                          * in PCI-X mode [see tg3_get_invariants()] */
3381                 ) {
3382                         int skb_size;
3383
3384                         skb_size = tg3_alloc_rx_skb(tp, opaque_key,
3385                                                     desc_idx, *post_ptr);
3386                         if (skb_size < 0)
3387                                 goto drop_it;
3388
3389                         pci_unmap_single(tp->pdev, dma_addr,
3390                                          skb_size - tp->rx_offset,
3391                                          PCI_DMA_FROMDEVICE);
3392
3393                         skb_put(skb, len);
3394                 } else {
3395                         struct sk_buff *copy_skb;
3396
3397                         tg3_recycle_rx(tp, opaque_key,
3398                                        desc_idx, *post_ptr);
3399
3400                         copy_skb = netdev_alloc_skb(tp->dev, len + 2);
3401                         if (copy_skb == NULL)
3402                                 goto drop_it_no_recycle;
3403
3404                         skb_reserve(copy_skb, 2);
3405                         skb_put(copy_skb, len);
3406                         pci_dma_sync_single_for_cpu(tp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE);
3407                         skb_copy_from_linear_data(skb, copy_skb->data, len);
3408                         pci_dma_sync_single_for_device(tp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE);
3409
3410                         /* We'll reuse the original ring buffer. */
3411                         skb = copy_skb;
3412                 }
3413
3414                 if ((tp->tg3_flags & TG3_FLAG_RX_CHECKSUMS) &&
3415                     (desc->type_flags & RXD_FLAG_TCPUDP_CSUM) &&
3416                     (((desc->ip_tcp_csum & RXD_TCPCSUM_MASK)
3417                       >> RXD_TCPCSUM_SHIFT) == 0xffff))
3418                         skb->ip_summed = CHECKSUM_UNNECESSARY;
3419                 else
3420                         skb->ip_summed = CHECKSUM_NONE;
3421
3422                 skb->protocol = eth_type_trans(skb, tp->dev);
3423 #if TG3_VLAN_TAG_USED
3424                 if (tp->vlgrp != NULL &&
3425                     desc->type_flags & RXD_FLAG_VLAN) {
3426                         tg3_vlan_rx(tp, skb,
3427                                     desc->err_vlan & RXD_VLAN_MASK);
3428                 } else
3429 #endif
3430                         netif_receive_skb(skb);
3431
3432                 tp->dev->last_rx = jiffies;
3433                 received++;
3434                 budget--;
3435
3436 next_pkt:
3437                 (*post_ptr)++;
3438
3439                 if (unlikely(rx_std_posted >= tp->rx_std_max_post)) {
3440                         u32 idx = *post_ptr % TG3_RX_RING_SIZE;
3441
3442                         tw32_rx_mbox(MAILBOX_RCV_STD_PROD_IDX +
3443                                      TG3_64BIT_REG_LOW, idx);
3444                         work_mask &= ~RXD_OPAQUE_RING_STD;
3445                         rx_std_posted = 0;
3446                 }
3447 next_pkt_nopost:
3448                 sw_idx++;
3449                 sw_idx &= (TG3_RX_RCB_RING_SIZE(tp) - 1);
3450
3451                 /* Refresh hw_idx to see if there is new work */
3452                 if (sw_idx == hw_idx) {
3453                         hw_idx = tp->hw_status->idx[0].rx_producer;
3454                         rmb();
3455                 }
3456         }
3457
3458         /* ACK the status ring. */
3459         tp->rx_rcb_ptr = sw_idx;
3460         tw32_rx_mbox(MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW, sw_idx);
3461
3462         /* Refill RX ring(s). */
3463         if (work_mask & RXD_OPAQUE_RING_STD) {
3464                 sw_idx = tp->rx_std_ptr % TG3_RX_RING_SIZE;
3465                 tw32_rx_mbox(MAILBOX_RCV_STD_PROD_IDX + TG3_64BIT_REG_LOW,
3466                              sw_idx);
3467         }
3468         if (work_mask & RXD_OPAQUE_RING_JUMBO) {
3469                 sw_idx = tp->rx_jumbo_ptr % TG3_RX_JUMBO_RING_SIZE;
3470                 tw32_rx_mbox(MAILBOX_RCV_JUMBO_PROD_IDX + TG3_64BIT_REG_LOW,
3471                              sw_idx);
3472         }
3473         mmiowb();
3474
3475         return received;
3476 }
3477
3478 static int tg3_poll(struct napi_struct *napi, int budget)
3479 {
3480         struct tg3 *tp = container_of(napi, struct tg3, napi);
3481         struct net_device *netdev = tp->dev;
3482         struct tg3_hw_status *sblk = tp->hw_status;
3483         int work_done = 0;
3484
3485         /* handle link change and other phy events */
3486         if (!(tp->tg3_flags &
3487               (TG3_FLAG_USE_LINKCHG_REG |
3488                TG3_FLAG_POLL_SERDES))) {
3489                 if (sblk->status & SD_STATUS_LINK_CHG) {
3490                         sblk->status = SD_STATUS_UPDATED |
3491                                 (sblk->status & ~SD_STATUS_LINK_CHG);
3492                         spin_lock(&tp->lock);
3493                         tg3_setup_phy(tp, 0);
3494                         spin_unlock(&tp->lock);
3495                 }
3496         }
3497
3498         /* run TX completion thread */
3499         if (sblk->idx[0].tx_consumer != tp->tx_cons) {
3500                 tg3_tx(tp);
3501                 if (unlikely(tp->tg3_flags & TG3_FLAG_TX_RECOVERY_PENDING)) {
3502                         netif_rx_complete(netdev, napi);
3503                         schedule_work(&tp->reset_task);
3504                         return 0;
3505                 }
3506         }
3507
3508         /* run RX thread, within the bounds set by NAPI.
3509          * All RX "locking" is done by ensuring outside
3510          * code synchronizes with tg3->napi.poll()
3511          */
3512         if (sblk->idx[0].rx_producer != tp->rx_rcb_ptr)
3513                 work_done = tg3_rx(tp, budget);
3514
3515         if (tp->tg3_flags & TG3_FLAG_TAGGED_STATUS) {
3516                 tp->last_tag = sblk->status_tag;
3517                 rmb();
3518         } else
3519                 sblk->status &= ~SD_STATUS_UPDATED;
3520
3521         /* if no more work, tell net stack and NIC we're done */
3522         if (!tg3_has_work(tp)) {
3523                 netif_rx_complete(netdev, napi);
3524                 tg3_restart_ints(tp);
3525         }
3526
3527         return work_done;
3528 }
3529
3530 static void tg3_irq_quiesce(struct tg3 *tp)
3531 {
3532         BUG_ON(tp->irq_sync);
3533
3534         tp->irq_sync = 1;
3535         smp_mb();
3536
3537         synchronize_irq(tp->pdev->irq);
3538 }
3539
3540 static inline int tg3_irq_sync(struct tg3 *tp)
3541 {
3542         return tp->irq_sync;
3543 }
3544
3545 /* Fully shutdown all tg3 driver activity elsewhere in the system.
3546  * If irq_sync is non-zero, then the IRQ handler must be synchronized
3547  * with as well.  Most of the time, this is not necessary except when
3548  * shutting down the device.
3549  */
3550 static inline void tg3_full_lock(struct tg3 *tp, int irq_sync)
3551 {
3552         spin_lock_bh(&tp->lock);
3553         if (irq_sync)
3554                 tg3_irq_quiesce(tp);
3555 }
3556
3557 static inline void tg3_full_unlock(struct tg3 *tp)
3558 {
3559         spin_unlock_bh(&tp->lock);
3560 }
3561
3562 /* One-shot MSI handler - Chip automatically disables interrupt
3563  * after sending MSI so driver doesn't have to do it.
3564  */
3565 static irqreturn_t tg3_msi_1shot(int irq, void *dev_id)
3566 {
3567         struct net_device *dev = dev_id;
3568         struct tg3 *tp = netdev_priv(dev);
3569
3570         prefetch(tp->hw_status);
3571         prefetch(&tp->rx_rcb[tp->rx_rcb_ptr]);
3572
3573         if (likely(!tg3_irq_sync(tp)))
3574                 netif_rx_schedule(dev, &tp->napi);
3575
3576         return IRQ_HANDLED;
3577 }
3578
3579 /* MSI ISR - No need to check for interrupt sharing and no need to
3580  * flush status block and interrupt mailbox. PCI ordering rules
3581  * guarantee that MSI will arrive after the status block.
3582  */
3583 static irqreturn_t tg3_msi(int irq, void *dev_id)
3584 {
3585         struct net_device *dev = dev_id;
3586         struct tg3 *tp = netdev_priv(dev);
3587
3588         prefetch(tp->hw_status);
3589         prefetch(&tp->rx_rcb[tp->rx_rcb_ptr]);
3590         /*
3591          * Writing any value to intr-mbox-0 clears PCI INTA# and
3592          * chip-internal interrupt pending events.
3593          * Writing non-zero to intr-mbox-0 additional tells the
3594          * NIC to stop sending us irqs, engaging "in-intr-handler"
3595          * event coalescing.
3596          */
3597         tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
3598         if (likely(!tg3_irq_sync(tp)))
3599                 netif_rx_schedule(dev, &tp->napi);
3600
3601         return IRQ_RETVAL(1);
3602 }
3603
3604 static irqreturn_t tg3_interrupt(int irq, void *dev_id)
3605 {
3606         struct net_device *dev = dev_id;
3607         struct tg3 *tp = netdev_priv(dev);
3608         struct tg3_hw_status *sblk = tp->hw_status;
3609         unsigned int handled = 1;
3610
3611         /* In INTx mode, it is possible for the interrupt to arrive at
3612          * the CPU before the status block posted prior to the interrupt.
3613          * Reading the PCI State register will confirm whether the
3614          * interrupt is ours and will flush the status block.
3615          */
3616         if (unlikely(!(sblk->status & SD_STATUS_UPDATED))) {
3617                 if ((tp->tg3_flags & TG3_FLAG_CHIP_RESETTING) ||
3618                     (tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
3619                         handled = 0;
3620                         goto out;
3621                 }
3622         }
3623
3624         /*
3625          * Writing any value to intr-mbox-0 clears PCI INTA# and
3626          * chip-internal interrupt pending events.
3627          * Writing non-zero to intr-mbox-0 additional tells the
3628          * NIC to stop sending us irqs, engaging "in-intr-handler"
3629          * event coalescing.
3630          *
3631          * Flush the mailbox to de-assert the IRQ immediately to prevent
3632          * spurious interrupts.  The flush impacts performance but
3633          * excessive spurious interrupts can be worse in some cases.
3634          */
3635         tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
3636         if (tg3_irq_sync(tp))
3637                 goto out;
3638         sblk->status &= ~SD_STATUS_UPDATED;
3639         if (likely(tg3_has_work(tp))) {
3640                 prefetch(&tp->rx_rcb[tp->rx_rcb_ptr]);
3641                 netif_rx_schedule(dev, &tp->napi);
3642         } else {
3643                 /* No work, shared interrupt perhaps?  re-enable
3644                  * interrupts, and flush that PCI write
3645                  */
3646                 tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
3647                                0x00000000);
3648         }
3649 out:
3650         return IRQ_RETVAL(handled);
3651 }
3652
3653 static irqreturn_t tg3_interrupt_tagged(int irq, void *dev_id)
3654 {
3655         struct net_device *dev = dev_id;
3656         struct tg3 *tp = netdev_priv(dev);
3657         struct tg3_hw_status *sblk = tp->hw_status;
3658         unsigned int handled = 1;
3659
3660         /* In INTx mode, it is possible for the interrupt to arrive at
3661          * the CPU before the status block posted prior to the interrupt.
3662          * Reading the PCI State register will confirm whether the
3663          * interrupt is ours and will flush the status block.
3664          */
3665         if (unlikely(sblk->status_tag == tp->last_tag)) {
3666                 if ((tp->tg3_flags & TG3_FLAG_CHIP_RESETTING) ||
3667                     (tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
3668                         handled = 0;
3669                         goto out;
3670                 }
3671         }
3672
3673         /*
3674          * writing any value to intr-mbox-0 clears PCI INTA# and
3675          * chip-internal interrupt pending events.
3676          * writing non-zero to intr-mbox-0 additional tells the
3677          * NIC to stop sending us irqs, engaging "in-intr-handler"
3678          * event coalescing.
3679          *
3680          * Flush the mailbox to de-assert the IRQ immediately to prevent
3681          * spurious interrupts.  The flush impacts performance but
3682          * excessive spurious interrupts can be worse in some cases.
3683          */
3684         tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
3685         if (tg3_irq_sync(tp))
3686                 goto out;
3687         if (netif_rx_schedule_prep(dev, &tp->napi)) {
3688                 prefetch(&tp->rx_rcb[tp->rx_rcb_ptr]);
3689                 /* Update last_tag to mark that this status has been
3690                  * seen. Because interrupt may be shared, we may be
3691                  * racing with tg3_poll(), so only update last_tag
3692                  * if tg3_poll() is not scheduled.
3693                  */
3694                 tp->last_tag = sblk->status_tag;
3695                 __netif_rx_schedule(dev, &tp->napi);
3696         }
3697 out:
3698         return IRQ_RETVAL(handled);
3699 }
3700
3701 /* ISR for interrupt test */
3702 static irqreturn_t tg3_test_isr(int irq, void *dev_id)
3703 {
3704         struct net_device *dev = dev_id;
3705         struct tg3 *tp = netdev_priv(dev);
3706         struct tg3_hw_status *sblk = tp->hw_status;
3707
3708         if ((sblk->status & SD_STATUS_UPDATED) ||
3709             !(tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
3710                 tg3_disable_ints(tp);
3711                 return IRQ_RETVAL(1);
3712         }
3713         return IRQ_RETVAL(0);
3714 }
3715
3716 static int tg3_init_hw(struct tg3 *, int);
3717 static int tg3_halt(struct tg3 *, int, int);
3718
3719 /* Restart hardware after configuration changes, self-test, etc.
3720  * Invoked with tp->lock held.
3721  */
3722 static int tg3_restart_hw(struct tg3 *tp, int reset_phy)
3723 {
3724         int err;
3725
3726         err = tg3_init_hw(tp, reset_phy);
3727         if (err) {
3728                 printk(KERN_ERR PFX "%s: Failed to re-initialize device, "
3729                        "aborting.\n", tp->dev->name);
3730                 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
3731                 tg3_full_unlock(tp);
3732                 del_timer_sync(&tp->timer);
3733                 tp->irq_sync = 0;
3734                 napi_enable(&tp->napi);
3735                 dev_close(tp->dev);
3736                 tg3_full_lock(tp, 0);
3737         }
3738         return err;
3739 }
3740
3741 #ifdef CONFIG_NET_POLL_CONTROLLER
3742 static void tg3_poll_controller(struct net_device *dev)
3743 {
3744         struct tg3 *tp = netdev_priv(dev);
3745
3746         tg3_interrupt(tp->pdev->irq, dev);
3747 }
3748 #endif
3749
3750 static void tg3_reset_task(struct work_struct *work)
3751 {
3752         struct tg3 *tp = container_of(work, struct tg3, reset_task);
3753         unsigned int restart_timer;
3754
3755         tg3_full_lock(tp, 0);
3756
3757         if (!netif_running(tp->dev)) {
3758                 tg3_full_unlock(tp);
3759                 return;
3760         }
3761
3762         tg3_full_unlock(tp);
3763
3764         tg3_netif_stop(tp);
3765
3766         tg3_full_lock(tp, 1);
3767
3768         restart_timer = tp->tg3_flags2 & TG3_FLG2_RESTART_TIMER;
3769         tp->tg3_flags2 &= ~TG3_FLG2_RESTART_TIMER;
3770
3771         if (tp->tg3_flags & TG3_FLAG_TX_RECOVERY_PENDING) {
3772                 tp->write32_tx_mbox = tg3_write32_tx_mbox;
3773                 tp->write32_rx_mbox = tg3_write_flush_reg32;
3774                 tp->tg3_flags |= TG3_FLAG_MBOX_WRITE_REORDER;
3775                 tp->tg3_flags &= ~TG3_FLAG_TX_RECOVERY_PENDING;
3776         }
3777
3778         tg3_halt(tp, RESET_KIND_SHUTDOWN, 0);
3779         if (tg3_init_hw(tp, 1))
3780                 goto out;
3781
3782         tg3_netif_start(tp);
3783
3784         if (restart_timer)
3785                 mod_timer(&tp->timer, jiffies + 1);
3786
3787 out:
3788         tg3_full_unlock(tp);
3789 }
3790
3791 static void tg3_dump_short_state(struct tg3 *tp)
3792 {
3793         printk(KERN_ERR PFX "DEBUG: MAC_TX_STATUS[%08x] MAC_RX_STATUS[%08x]\n",
3794                tr32(MAC_TX_STATUS), tr32(MAC_RX_STATUS));
3795         printk(KERN_ERR PFX "DEBUG: RDMAC_STATUS[%08x] WDMAC_STATUS[%08x]\n",
3796                tr32(RDMAC_STATUS), tr32(WDMAC_STATUS));
3797 }
3798
3799 static void tg3_tx_timeout(struct net_device *dev)
3800 {
3801         struct tg3 *tp = netdev_priv(dev);
3802
3803         if (netif_msg_tx_err(tp)) {
3804                 printk(KERN_ERR PFX "%s: transmit timed out, resetting\n",
3805                        dev->name);
3806                 tg3_dump_short_state(tp);
3807         }
3808
3809         schedule_work(&tp->reset_task);
3810 }
3811
3812 /* Test for DMA buffers crossing any 4GB boundaries: 4G, 8G, etc */
3813 static inline int tg3_4g_overflow_test(dma_addr_t mapping, int len)
3814 {
3815         u32 base = (u32) mapping & 0xffffffff;
3816
3817         return ((base > 0xffffdcc0) &&
3818                 (base + len + 8 < base));
3819 }
3820
3821 /* Test for DMA addresses > 40-bit */
3822 static inline int tg3_40bit_overflow_test(struct tg3 *tp, dma_addr_t mapping,
3823                                           int len)
3824 {
3825 #if defined(CONFIG_HIGHMEM) && (BITS_PER_LONG == 64)
3826         if (tp->tg3_flags & TG3_FLAG_40BIT_DMA_BUG)
3827                 return (((u64) mapping + len) > DMA_40BIT_MASK);
3828         return 0;
3829 #else
3830         return 0;
3831 #endif
3832 }
3833
3834 static void tg3_set_txd(struct tg3 *, int, dma_addr_t, int, u32, u32);
3835
3836 /* Workaround 4GB and 40-bit hardware DMA bugs. */
3837 static int tigon3_dma_hwbug_workaround(struct tg3 *tp, struct sk_buff *skb,
3838                                        u32 last_plus_one, u32 *start,
3839                                        u32 base_flags, u32 mss)
3840 {
3841         struct sk_buff *new_skb = skb_copy(skb, GFP_ATOMIC);
3842         dma_addr_t new_addr = 0;
3843         u32 entry = *start;
3844         int i, ret = 0;
3845
3846         if (!new_skb) {
3847                 ret = -1;
3848         } else {
3849                 /* New SKB is guaranteed to be linear. */
3850                 entry = *start;
3851                 new_addr = pci_map_single(tp->pdev, new_skb->data, new_skb->len,
3852                                           PCI_DMA_TODEVICE);
3853                 /* Make sure new skb does not cross any 4G boundaries.
3854                  * Drop the packet if it does.
3855                  */
3856                 if (tg3_4g_overflow_test(new_addr, new_skb->len)) {
3857                         ret = -1;
3858                         dev_kfree_skb(new_skb);
3859                         new_skb = NULL;
3860                 } else {
3861                         tg3_set_txd(tp, entry, new_addr, new_skb->len,
3862                                     base_flags, 1 | (mss << 1));
3863                         *start = NEXT_TX(entry);
3864                 }
3865         }
3866
3867         /* Now clean up the sw ring entries. */
3868         i = 0;
3869         while (entry != last_plus_one) {
3870                 int len;
3871
3872                 if (i == 0)
3873                         len = skb_headlen(skb);
3874                 else
3875                         len = skb_shinfo(skb)->frags[i-1].size;
3876                 pci_unmap_single(tp->pdev,
3877                                  pci_unmap_addr(&tp->tx_buffers[entry], mapping),
3878                                  len, PCI_DMA_TODEVICE);
3879                 if (i == 0) {
3880                         tp->tx_buffers[entry].skb = new_skb;
3881                         pci_unmap_addr_set(&tp->tx_buffers[entry], mapping, new_addr);
3882                 } else {
3883                         tp->tx_buffers[entry].skb = NULL;
3884                 }
3885                 entry = NEXT_TX(entry);
3886                 i++;
3887         }
3888
3889         dev_kfree_skb(skb);
3890
3891         return ret;
3892 }
3893
3894 static void tg3_set_txd(struct tg3 *tp, int entry,
3895                         dma_addr_t mapping, int len, u32 flags,
3896                         u32 mss_and_is_end)
3897 {
3898         struct tg3_tx_buffer_desc *txd = &tp->tx_ring[entry];
3899         int is_end = (mss_and_is_end & 0x1);
3900         u32 mss = (mss_and_is_end >> 1);
3901         u32 vlan_tag = 0;
3902
3903         if (is_end)
3904                 flags |= TXD_FLAG_END;
3905         if (flags & TXD_FLAG_VLAN) {
3906                 vlan_tag = flags >> 16;
3907                 flags &= 0xffff;
3908         }
3909         vlan_tag |= (mss << TXD_MSS_SHIFT);
3910
3911         txd->addr_hi = ((u64) mapping >> 32);
3912         txd->addr_lo = ((u64) mapping & 0xffffffff);
3913         txd->len_flags = (len << TXD_LEN_SHIFT) | flags;
3914         txd->vlan_tag = vlan_tag << TXD_VLAN_TAG_SHIFT;
3915 }
3916
3917 /* hard_start_xmit for devices that don't have any bugs and
3918  * support TG3_FLG2_HW_TSO_2 only.
3919  */
3920 static int tg3_start_xmit(struct sk_buff *skb, struct net_device *dev)
3921 {
3922         struct tg3 *tp = netdev_priv(dev);
3923         dma_addr_t mapping;
3924         u32 len, entry, base_flags, mss;
3925
3926         len = skb_headlen(skb);
3927
3928         /* We are running in BH disabled context with netif_tx_lock
3929          * and TX reclaim runs via tp->napi.poll inside of a software
3930          * interrupt.  Furthermore, IRQ processing runs lockless so we have
3931          * no IRQ context deadlocks to worry about either.  Rejoice!
3932          */
3933         if (unlikely(tg3_tx_avail(tp) <= (skb_shinfo(skb)->nr_frags + 1))) {
3934                 if (!netif_queue_stopped(dev)) {
3935                         netif_stop_queue(dev);
3936
3937                         /* This is a hard error, log it. */
3938                         printk(KERN_ERR PFX "%s: BUG! Tx Ring full when "
3939                                "queue awake!\n", dev->name);
3940                 }
3941                 return NETDEV_TX_BUSY;
3942         }
3943
3944         entry = tp->tx_prod;
3945         base_flags = 0;
3946         mss = 0;
3947         if ((mss = skb_shinfo(skb)->gso_size) != 0) {
3948                 int tcp_opt_len, ip_tcp_len;
3949
3950                 if (skb_header_cloned(skb) &&
3951                     pskb_expand_head(skb, 0, 0, GFP_ATOMIC)) {
3952                         dev_kfree_skb(skb);
3953                         goto out_unlock;
3954                 }
3955
3956                 if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6)
3957                         mss |= (skb_headlen(skb) - ETH_HLEN) << 9;
3958                 else {
3959                         struct iphdr *iph = ip_hdr(skb);
3960
3961                         tcp_opt_len = tcp_optlen(skb);
3962                         ip_tcp_len = ip_hdrlen(skb) + sizeof(struct tcphdr);
3963
3964                         iph->check = 0;
3965                         iph->tot_len = htons(mss + ip_tcp_len + tcp_opt_len);
3966                         mss |= (ip_tcp_len + tcp_opt_len) << 9;
3967                 }
3968
3969                 base_flags |= (TXD_FLAG_CPU_PRE_DMA |
3970                                TXD_FLAG_CPU_POST_DMA);
3971
3972                 tcp_hdr(skb)->check = 0;
3973
3974         }
3975         else if (skb->ip_summed == CHECKSUM_PARTIAL)
3976                 base_flags |= TXD_FLAG_TCPUDP_CSUM;
3977 #if TG3_VLAN_TAG_USED
3978         if (tp->vlgrp != NULL && vlan_tx_tag_present(skb))
3979                 base_flags |= (TXD_FLAG_VLAN |
3980                                (vlan_tx_tag_get(skb) << 16));
3981 #endif
3982
3983         /* Queue skb data, a.k.a. the main skb fragment. */
3984         mapping = pci_map_single(tp->pdev, skb->data, len, PCI_DMA_TODEVICE);
3985
3986         tp->tx_buffers[entry].skb = skb;
3987         pci_unmap_addr_set(&tp->tx_buffers[entry], mapping, mapping);
3988
3989         tg3_set_txd(tp, entry, mapping, len, base_flags,
3990                     (skb_shinfo(skb)->nr_frags == 0) | (mss << 1));
3991
3992         entry = NEXT_TX(entry);
3993
3994         /* Now loop through additional data fragments, and queue them. */
3995         if (skb_shinfo(skb)->nr_frags > 0) {
3996                 unsigned int i, last;
3997
3998                 last = skb_shinfo(skb)->nr_frags - 1;
3999                 for (i = 0; i <= last; i++) {
4000                         skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
4001
4002                         len = frag->size;
4003                         mapping = pci_map_page(tp->pdev,
4004                                                frag->page,
4005                                                frag->page_offset,
4006                                                len, PCI_DMA_TODEVICE);
4007
4008                         tp->tx_buffers[entry].skb = NULL;
4009                         pci_unmap_addr_set(&tp->tx_buffers[entry], mapping, mapping);
4010
4011                         tg3_set_txd(tp, entry, mapping, len,
4012                                     base_flags, (i == last) | (mss << 1));
4013
4014                         entry = NEXT_TX(entry);
4015                 }
4016         }
4017
4018         /* Packets are ready, update Tx producer idx local and on card. */
4019         tw32_tx_mbox((MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW), entry);
4020
4021         tp->tx_prod = entry;
4022         if (unlikely(tg3_tx_avail(tp) <= (MAX_SKB_FRAGS + 1))) {
4023                 netif_stop_queue(dev);
4024                 if (tg3_tx_avail(tp) > TG3_TX_WAKEUP_THRESH(tp))
4025                         netif_wake_queue(tp->dev);
4026         }
4027
4028 out_unlock:
4029         mmiowb();
4030
4031         dev->trans_start = jiffies;
4032
4033         return NETDEV_TX_OK;
4034 }
4035
4036 static int tg3_start_xmit_dma_bug(struct sk_buff *, struct net_device *);
4037
4038 /* Use GSO to workaround a rare TSO bug that may be triggered when the
4039  * TSO header is greater than 80 bytes.
4040  */
4041 static int tg3_tso_bug(struct tg3 *tp, struct sk_buff *skb)
4042 {
4043         struct sk_buff *segs, *nskb;
4044
4045         /* Estimate the number of fragments in the worst case */
4046         if (unlikely(tg3_tx_avail(tp) <= (skb_shinfo(skb)->gso_segs * 3))) {
4047                 netif_stop_queue(tp->dev);
4048                 if (tg3_tx_avail(tp) <= (skb_shinfo(skb)->gso_segs * 3))
4049                         return NETDEV_TX_BUSY;
4050
4051                 netif_wake_queue(tp->dev);
4052         }
4053
4054         segs = skb_gso_segment(skb, tp->dev->features & ~NETIF_F_TSO);
4055         if (unlikely(IS_ERR(segs)))
4056                 goto tg3_tso_bug_end;
4057
4058         do {
4059                 nskb = segs;
4060                 segs = segs->next;
4061                 nskb->next = NULL;
4062                 tg3_start_xmit_dma_bug(nskb, tp->dev);
4063         } while (segs);
4064
4065 tg3_tso_bug_end:
4066         dev_kfree_skb(skb);
4067
4068         return NETDEV_TX_OK;
4069 }
4070
4071 /* hard_start_xmit for devices that have the 4G bug and/or 40-bit bug and
4072  * support TG3_FLG2_HW_TSO_1 or firmware TSO only.
4073  */
4074 static int tg3_start_xmit_dma_bug(struct sk_buff *skb, struct net_device *dev)
4075 {
4076         struct tg3 *tp = netdev_priv(dev);
4077         dma_addr_t mapping;
4078         u32 len, entry, base_flags, mss;
4079         int would_hit_hwbug;
4080
4081         len = skb_headlen(skb);
4082
4083         /* We are running in BH disabled context with netif_tx_lock
4084          * and TX reclaim runs via tp->napi.poll inside of a software
4085          * interrupt.  Furthermore, IRQ processing runs lockless so we have
4086          * no IRQ context deadlocks to worry about either.  Rejoice!
4087          */
4088         if (unlikely(tg3_tx_avail(tp) <= (skb_shinfo(skb)->nr_frags + 1))) {
4089                 if (!netif_queue_stopped(dev)) {
4090                         netif_stop_queue(dev);
4091
4092                         /* This is a hard error, log it. */
4093                         printk(KERN_ERR PFX "%s: BUG! Tx Ring full when "
4094                                "queue awake!\n", dev->name);
4095                 }
4096                 return NETDEV_TX_BUSY;
4097         }
4098
4099         entry = tp->tx_prod;
4100         base_flags = 0;
4101         if (skb->ip_summed == CHECKSUM_PARTIAL)
4102                 base_flags |= TXD_FLAG_TCPUDP_CSUM;
4103         mss = 0;
4104         if ((mss = skb_shinfo(skb)->gso_size) != 0) {
4105                 struct iphdr *iph;
4106                 int tcp_opt_len, ip_tcp_len, hdr_len;
4107
4108                 if (skb_header_cloned(skb) &&
4109                     pskb_expand_head(skb, 0, 0, GFP_ATOMIC)) {
4110                         dev_kfree_skb(skb);
4111                         goto out_unlock;
4112                 }
4113
4114                 tcp_opt_len = tcp_optlen(skb);
4115                 ip_tcp_len = ip_hdrlen(skb) + sizeof(struct tcphdr);
4116
4117                 hdr_len = ip_tcp_len + tcp_opt_len;
4118                 if (unlikely((ETH_HLEN + hdr_len) > 80) &&
4119                              (tp->tg3_flags2 & TG3_FLG2_TSO_BUG))
4120                         return (tg3_tso_bug(tp, skb));
4121
4122                 base_flags |= (TXD_FLAG_CPU_PRE_DMA |
4123                                TXD_FLAG_CPU_POST_DMA);
4124
4125                 iph = ip_hdr(skb);
4126                 iph->check = 0;
4127                 iph->tot_len = htons(mss + hdr_len);
4128                 if (tp->tg3_flags2 & TG3_FLG2_HW_TSO) {
4129                         tcp_hdr(skb)->check = 0;
4130                         base_flags &= ~TXD_FLAG_TCPUDP_CSUM;
4131                 } else
4132                         tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr,
4133                                                                  iph->daddr, 0,
4134                                                                  IPPROTO_TCP,
4135                                                                  0);
4136
4137                 if ((tp->tg3_flags2 & TG3_FLG2_HW_TSO) ||
4138                     (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705)) {
4139                         if (tcp_opt_len || iph->ihl > 5) {
4140                                 int tsflags;
4141
4142                                 tsflags = (iph->ihl - 5) + (tcp_opt_len >> 2);
4143                                 mss |= (tsflags << 11);
4144                         }
4145                 } else {
4146                         if (tcp_opt_len || iph->ihl > 5) {
4147                                 int tsflags;
4148
4149                                 tsflags = (iph->ihl - 5) + (tcp_opt_len >> 2);
4150                                 base_flags |= tsflags << 12;
4151                         }
4152                 }
4153         }
4154 #if TG3_VLAN_TAG_USED
4155         if (tp->vlgrp != NULL && vlan_tx_tag_present(skb))
4156                 base_flags |= (TXD_FLAG_VLAN |
4157                                (vlan_tx_tag_get(skb) << 16));
4158 #endif
4159
4160         /* Queue skb data, a.k.a. the main skb fragment. */
4161         mapping = pci_map_single(tp->pdev, skb->data, len, PCI_DMA_TODEVICE);
4162
4163         tp->tx_buffers[entry].skb = skb;
4164         pci_unmap_addr_set(&tp->tx_buffers[entry], mapping, mapping);
4165
4166         would_hit_hwbug = 0;
4167
4168         if (tg3_4g_overflow_test(mapping, len))
4169                 would_hit_hwbug = 1;
4170
4171         tg3_set_txd(tp, entry, mapping, len, base_flags,
4172                     (skb_shinfo(skb)->nr_frags == 0) | (mss << 1));
4173
4174         entry = NEXT_TX(entry);
4175
4176         /* Now loop through additional data fragments, and queue them. */
4177         if (skb_shinfo(skb)->nr_frags > 0) {
4178                 unsigned int i, last;
4179
4180                 last = skb_shinfo(skb)->nr_frags - 1;
4181                 for (i = 0; i <= last; i++) {
4182                         skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
4183
4184                         len = frag->size;
4185                         mapping = pci_map_page(tp->pdev,
4186                                                frag->page,
4187                                                frag->page_offset,
4188                                                len, PCI_DMA_TODEVICE);
4189
4190                         tp->tx_buffers[entry].skb = NULL;
4191                         pci_unmap_addr_set(&tp->tx_buffers[entry], mapping, mapping);
4192
4193                         if (tg3_4g_overflow_test(mapping, len))
4194                                 would_hit_hwbug = 1;
4195
4196                         if (tg3_40bit_overflow_test(tp, mapping, len))
4197                                 would_hit_hwbug = 1;
4198
4199                         if (tp->tg3_flags2 & TG3_FLG2_HW_TSO)
4200                                 tg3_set_txd(tp, entry, mapping, len,
4201                                             base_flags, (i == last)|(mss << 1));
4202                         else
4203                                 tg3_set_txd(tp, entry, mapping, len,
4204                                             base_flags, (i == last));
4205
4206                         entry = NEXT_TX(entry);
4207                 }
4208         }
4209
4210         if (would_hit_hwbug) {
4211                 u32 last_plus_one = entry;
4212                 u32 start;
4213
4214                 start = entry - 1 - skb_shinfo(skb)->nr_frags;
4215                 start &= (TG3_TX_RING_SIZE - 1);
4216
4217                 /* If the workaround fails due to memory/mapping
4218                  * failure, silently drop this packet.
4219                  */
4220                 if (tigon3_dma_hwbug_workaround(tp, skb, last_plus_one,
4221                                                 &start, base_flags, mss))
4222                         goto out_unlock;
4223
4224                 entry = start;
4225         }
4226
4227         /* Packets are ready, update Tx producer idx local and on card. */
4228         tw32_tx_mbox((MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW), entry);
4229
4230         tp->tx_prod = entry;
4231         if (unlikely(tg3_tx_avail(tp) <= (MAX_SKB_FRAGS + 1))) {
4232                 netif_stop_queue(dev);
4233                 if (tg3_tx_avail(tp) > TG3_TX_WAKEUP_THRESH(tp))
4234                         netif_wake_queue(tp->dev);
4235         }
4236
4237 out_unlock:
4238         mmiowb();
4239
4240         dev->trans_start = jiffies;
4241
4242         return NETDEV_TX_OK;
4243 }
4244
4245 static inline void tg3_set_mtu(struct net_device *dev, struct tg3 *tp,
4246                                int new_mtu)
4247 {
4248         dev->mtu = new_mtu;
4249
4250         if (new_mtu > ETH_DATA_LEN) {
4251                 if (tp->tg3_flags2 & TG3_FLG2_5780_CLASS) {
4252                         tp->tg3_flags2 &= ~TG3_FLG2_TSO_CAPABLE;
4253                         ethtool_op_set_tso(dev, 0);
4254                 }
4255                 else
4256                         tp->tg3_flags |= TG3_FLAG_JUMBO_RING_ENABLE;
4257         } else {
4258                 if (tp->tg3_flags2 & TG3_FLG2_5780_CLASS)
4259                         tp->tg3_flags2 |= TG3_FLG2_TSO_CAPABLE;
4260                 tp->tg3_flags &= ~TG3_FLAG_JUMBO_RING_ENABLE;
4261         }
4262 }
4263
4264 static int tg3_change_mtu(struct net_device *dev, int new_mtu)
4265 {
4266         struct tg3 *tp = netdev_priv(dev);
4267         int err;
4268
4269         if (new_mtu < TG3_MIN_MTU || new_mtu > TG3_MAX_MTU(tp))
4270                 return -EINVAL;
4271
4272         if (!netif_running(dev)) {
4273                 /* We'll just catch it later when the
4274                  * device is up'd.
4275                  */
4276                 tg3_set_mtu(dev, tp, new_mtu);
4277                 return 0;
4278         }
4279
4280         tg3_netif_stop(tp);
4281
4282         tg3_full_lock(tp, 1);
4283
4284         tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
4285
4286         tg3_set_mtu(dev, tp, new_mtu);
4287
4288         err = tg3_restart_hw(tp, 0);
4289
4290         if (!err)
4291                 tg3_netif_start(tp);
4292
4293         tg3_full_unlock(tp);
4294
4295         return err;
4296 }
4297
4298 /* Free up pending packets in all rx/tx rings.
4299  *
4300  * The chip has been shut down and the driver detached from
4301  * the networking, so no interrupts or new tx packets will
4302  * end up in the driver.  tp->{tx,}lock is not held and we are not
4303  * in an interrupt context and thus may sleep.
4304  */
4305 static void tg3_free_rings(struct tg3 *tp)
4306 {
4307         struct ring_info *rxp;
4308         int i;
4309
4310         for (i = 0; i < TG3_RX_RING_SIZE; i++) {
4311                 rxp = &tp->rx_std_buffers[i];
4312
4313                 if (rxp->skb == NULL)
4314                         continue;
4315                 pci_unmap_single(tp->pdev,
4316                                  pci_unmap_addr(rxp, mapping),
4317                                  tp->rx_pkt_buf_sz - tp->rx_offset,
4318                                  PCI_DMA_FROMDEVICE);
4319                 dev_kfree_skb_any(rxp->skb);
4320                 rxp->skb = NULL;
4321         }
4322
4323         for (i = 0; i < TG3_RX_JUMBO_RING_SIZE; i++) {
4324                 rxp = &tp->rx_jumbo_buffers[i];
4325
4326                 if (rxp->skb == NULL)
4327                         continue;
4328                 pci_unmap_single(tp->pdev,
4329                                  pci_unmap_addr(rxp, mapping),
4330                                  RX_JUMBO_PKT_BUF_SZ - tp->rx_offset,
4331                                  PCI_DMA_FROMDEVICE);
4332                 dev_kfree_skb_any(rxp->skb);
4333                 rxp->skb = NULL;
4334         }
4335
4336         for (i = 0; i < TG3_TX_RING_SIZE; ) {
4337                 struct tx_ring_info *txp;
4338                 struct sk_buff *skb;
4339                 int j;
4340
4341                 txp = &tp->tx_buffers[i];
4342                 skb = txp->skb;
4343
4344                 if (skb == NULL) {
4345                         i++;
4346                         continue;
4347                 }
4348
4349                 pci_unmap_single(tp->pdev,
4350                                  pci_unmap_addr(txp, mapping),
4351                                  skb_headlen(skb),
4352                                  PCI_DMA_TODEVICE);
4353                 txp->skb = NULL;
4354
4355                 i++;
4356
4357                 for (j = 0; j < skb_shinfo(skb)->nr_frags; j++) {
4358                         txp = &tp->tx_buffers[i & (TG3_TX_RING_SIZE - 1)];
4359                         pci_unmap_page(tp->pdev,
4360                                        pci_unmap_addr(txp, mapping),
4361                                        skb_shinfo(skb)->frags[j].size,
4362                                        PCI_DMA_TODEVICE);
4363                         i++;
4364                 }
4365
4366                 dev_kfree_skb_any(skb);
4367         }
4368 }
4369
4370 /* Initialize tx/rx rings for packet processing.
4371  *
4372  * The chip has been shut down and the driver detached from
4373  * the networking, so no interrupts or new tx packets will
4374  * end up in the driver.  tp->{tx,}lock are held and thus
4375  * we may not sleep.
4376  */
4377 static int tg3_init_rings(struct tg3 *tp)
4378 {
4379         u32 i;
4380
4381         /* Free up all the SKBs. */
4382         tg3_free_rings(tp);
4383
4384         /* Zero out all descriptors. */
4385         memset(tp->rx_std, 0, TG3_RX_RING_BYTES);
4386         memset(tp->rx_jumbo, 0, TG3_RX_JUMBO_RING_BYTES);
4387         memset(tp->rx_rcb, 0, TG3_RX_RCB_RING_BYTES(tp));
4388         memset(tp->tx_ring, 0, TG3_TX_RING_BYTES);
4389
4390         tp->rx_pkt_buf_sz = RX_PKT_BUF_SZ;
4391         if ((tp->tg3_flags2 & TG3_FLG2_5780_CLASS) &&
4392             (tp->dev->mtu > ETH_DATA_LEN))
4393                 tp->rx_pkt_buf_sz = RX_JUMBO_PKT_BUF_SZ;
4394
4395         /* Initialize invariants of the rings, we only set this
4396          * stuff once.  This works because the card does not
4397          * write into the rx buffer posting rings.
4398          */
4399         for (i = 0; i < TG3_RX_RING_SIZE; i++) {
4400                 struct tg3_rx_buffer_desc *rxd;
4401
4402                 rxd = &tp->rx_std[i];
4403                 rxd->idx_len = (tp->rx_pkt_buf_sz - tp->rx_offset - 64)
4404                         << RXD_LEN_SHIFT;
4405                 rxd->type_flags = (RXD_FLAG_END << RXD_FLAGS_SHIFT);
4406                 rxd->opaque = (RXD_OPAQUE_RING_STD |
4407                                (i << RXD_OPAQUE_INDEX_SHIFT));
4408         }
4409
4410         if (tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE) {
4411                 for (i = 0; i < TG3_RX_JUMBO_RING_SIZE; i++) {
4412                         struct tg3_rx_buffer_desc *rxd;
4413
4414                         rxd = &tp->rx_jumbo[i];
4415                         rxd->idx_len = (RX_JUMBO_PKT_BUF_SZ - tp->rx_offset - 64)
4416                                 << RXD_LEN_SHIFT;
4417                         rxd->type_flags = (RXD_FLAG_END << RXD_FLAGS_SHIFT) |
4418                                 RXD_FLAG_JUMBO;
4419                         rxd->opaque = (RXD_OPAQUE_RING_JUMBO |
4420                                (i << RXD_OPAQUE_INDEX_SHIFT));
4421                 }
4422         }
4423
4424         /* Now allocate fresh SKBs for each rx ring. */
4425         for (i = 0; i < tp->rx_pending; i++) {
4426                 if (tg3_alloc_rx_skb(tp, RXD_OPAQUE_RING_STD, -1, i) < 0) {
4427                         printk(KERN_WARNING PFX
4428                                "%s: Using a smaller RX standard ring, "
4429                                "only %d out of %d buffers were allocated "
4430                                "successfully.\n",
4431                                tp->dev->name, i, tp->rx_pending);
4432                         if (i == 0)
4433                                 return -ENOMEM;
4434                         tp->rx_pending = i;
4435                         break;
4436                 }
4437         }
4438
4439         if (tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE) {
4440                 for (i = 0; i < tp->rx_jumbo_pending; i++) {
4441                         if (tg3_alloc_rx_skb(tp, RXD_OPAQUE_RING_JUMBO,
4442                                              -1, i) < 0) {
4443                                 printk(KERN_WARNING PFX
4444                                        "%s: Using a smaller RX jumbo ring, "
4445                                        "only %d out of %d buffers were "
4446                                        "allocated successfully.\n",
4447                                        tp->dev->name, i, tp->rx_jumbo_pending);
4448                                 if (i == 0) {
4449                                         tg3_free_rings(tp);
4450                                         return -ENOMEM;
4451                                 }
4452                                 tp->rx_jumbo_pending = i;
4453                                 break;
4454                         }
4455                 }
4456         }
4457         return 0;
4458 }
4459
4460 /*
4461  * Must not be invoked with interrupt sources disabled and
4462  * the hardware shutdown down.
4463  */
4464 static void tg3_free_consistent(struct tg3 *tp)
4465 {
4466         kfree(tp->rx_std_buffers);
4467         tp->rx_std_buffers = NULL;
4468         if (tp->rx_std) {
4469                 pci_free_consistent(tp->pdev, TG3_RX_RING_BYTES,
4470                                     tp->rx_std, tp->rx_std_mapping);
4471                 tp->rx_std = NULL;
4472         }
4473         if (tp->rx_jumbo) {
4474                 pci_free_consistent(tp->pdev, TG3_RX_JUMBO_RING_BYTES,
4475                                     tp->rx_jumbo, tp->rx_jumbo_mapping);
4476                 tp->rx_jumbo = NULL;
4477         }
4478         if (tp->rx_rcb) {
4479                 pci_free_consistent(tp->pdev, TG3_RX_RCB_RING_BYTES(tp),
4480                                     tp->rx_rcb, tp->rx_rcb_mapping);
4481                 tp->rx_rcb = NULL;
4482         }
4483         if (tp->tx_ring) {
4484                 pci_free_consistent(tp->pdev, TG3_TX_RING_BYTES,
4485                         tp->tx_ring, tp->tx_desc_mapping);
4486                 tp->tx_ring = NULL;
4487         }
4488         if (tp->hw_status) {
4489                 pci_free_consistent(tp->pdev, TG3_HW_STATUS_SIZE,
4490                                     tp->hw_status, tp->status_mapping);
4491                 tp->hw_status = NULL;
4492         }
4493         if (tp->hw_stats) {
4494                 pci_free_consistent(tp->pdev, sizeof(struct tg3_hw_stats),
4495                                     tp->hw_stats, tp->stats_mapping);
4496                 tp->hw_stats = NULL;
4497         }
4498 }
4499
4500 /*
4501  * Must not be invoked with interrupt sources disabled and
4502  * the hardware shutdown down.  Can sleep.
4503  */
4504 static int tg3_alloc_consistent(struct tg3 *tp)
4505 {
4506         tp->rx_std_buffers = kzalloc((sizeof(struct ring_info) *
4507                                       (TG3_RX_RING_SIZE +
4508                                        TG3_RX_JUMBO_RING_SIZE)) +
4509                                      (sizeof(struct tx_ring_info) *
4510                                       TG3_TX_RING_SIZE),
4511                                      GFP_KERNEL);
4512         if (!tp->rx_std_buffers)
4513                 return -ENOMEM;
4514
4515         tp->rx_jumbo_buffers = &tp->rx_std_buffers[TG3_RX_RING_SIZE];
4516         tp->tx_buffers = (struct tx_ring_info *)
4517                 &tp->rx_jumbo_buffers[TG3_RX_JUMBO_RING_SIZE];
4518
4519         tp->rx_std = pci_alloc_consistent(tp->pdev, TG3_RX_RING_BYTES,
4520                                           &tp->rx_std_mapping);
4521         if (!tp->rx_std)
4522                 goto err_out;
4523
4524         tp->rx_jumbo = pci_alloc_consistent(tp->pdev, TG3_RX_JUMBO_RING_BYTES,
4525                                             &tp->rx_jumbo_mapping);
4526
4527         if (!tp->rx_jumbo)
4528                 goto err_out;
4529
4530         tp->rx_rcb = pci_alloc_consistent(tp->pdev, TG3_RX_RCB_RING_BYTES(tp),
4531                                           &tp->rx_rcb_mapping);
4532         if (!tp->rx_rcb)
4533                 goto err_out;
4534
4535         tp->tx_ring = pci_alloc_consistent(tp->pdev, TG3_TX_RING_BYTES,
4536                                            &tp->tx_desc_mapping);
4537         if (!tp->tx_ring)
4538                 goto err_out;
4539
4540         tp->hw_status = pci_alloc_consistent(tp->pdev,
4541                                              TG3_HW_STATUS_SIZE,
4542                                              &tp->status_mapping);
4543         if (!tp->hw_status)
4544                 goto err_out;
4545
4546         tp->hw_stats = pci_alloc_consistent(tp->pdev,
4547                                             sizeof(struct tg3_hw_stats),
4548                                             &tp->stats_mapping);
4549         if (!tp->hw_stats)
4550                 goto err_out;
4551
4552         memset(tp->hw_status, 0, TG3_HW_STATUS_SIZE);
4553         memset(tp->hw_stats, 0, sizeof(struct tg3_hw_stats));
4554
4555         return 0;
4556
4557 err_out:
4558         tg3_free_consistent(tp);
4559         return -ENOMEM;
4560 }
4561
4562 #define MAX_WAIT_CNT 1000
4563
4564 /* To stop a block, clear the enable bit and poll till it
4565  * clears.  tp->lock is held.
4566  */
4567 static int tg3_stop_block(struct tg3 *tp, unsigned long ofs, u32 enable_bit, int silent)
4568 {
4569         unsigned int i;
4570         u32 val;
4571
4572         if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
4573                 switch (ofs) {
4574                 case RCVLSC_MODE:
4575                 case DMAC_MODE:
4576                 case MBFREE_MODE:
4577                 case BUFMGR_MODE:
4578                 case MEMARB_MODE:
4579                         /* We can't enable/disable these bits of the
4580                          * 5705/5750, just say success.
4581                          */
4582                         return 0;
4583
4584                 default:
4585                         break;
4586                 };
4587         }
4588
4589         val = tr32(ofs);
4590         val &= ~enable_bit;
4591         tw32_f(ofs, val);
4592
4593         for (i = 0; i < MAX_WAIT_CNT; i++) {
4594                 udelay(100);
4595                 val = tr32(ofs);
4596                 if ((val & enable_bit) == 0)
4597                         break;
4598         }
4599
4600         if (i == MAX_WAIT_CNT && !silent) {
4601                 printk(KERN_ERR PFX "tg3_stop_block timed out, "
4602                        "ofs=%lx enable_bit=%x\n",
4603                        ofs, enable_bit);
4604                 return -ENODEV;
4605         }
4606
4607         return 0;
4608 }
4609
4610 /* tp->lock is held. */
4611 static int tg3_abort_hw(struct tg3 *tp, int silent)
4612 {
4613         int i, err;
4614
4615         tg3_disable_ints(tp);
4616
4617         tp->rx_mode &= ~RX_MODE_ENABLE;
4618         tw32_f(MAC_RX_MODE, tp->rx_mode);
4619         udelay(10);
4620
4621         err  = tg3_stop_block(tp, RCVBDI_MODE, RCVBDI_MODE_ENABLE, silent);
4622         err |= tg3_stop_block(tp, RCVLPC_MODE, RCVLPC_MODE_ENABLE, silent);
4623         err |= tg3_stop_block(tp, RCVLSC_MODE, RCVLSC_MODE_ENABLE, silent);
4624         err |= tg3_stop_block(tp, RCVDBDI_MODE, RCVDBDI_MODE_ENABLE, silent);
4625         err |= tg3_stop_block(tp, RCVDCC_MODE, RCVDCC_MODE_ENABLE, silent);
4626         err |= tg3_stop_block(tp, RCVCC_MODE, RCVCC_MODE_ENABLE, silent);
4627
4628         err |= tg3_stop_block(tp, SNDBDS_MODE, SNDBDS_MODE_ENABLE, silent);
4629         err |= tg3_stop_block(tp, SNDBDI_MODE, SNDBDI_MODE_ENABLE, silent);
4630         err |= tg3_stop_block(tp, SNDDATAI_MODE, SNDDATAI_MODE_ENABLE, silent);
4631         err |= tg3_stop_block(tp, RDMAC_MODE, RDMAC_MODE_ENABLE, silent);
4632         err |= tg3_stop_block(tp, SNDDATAC_MODE, SNDDATAC_MODE_ENABLE, silent);
4633         err |= tg3_stop_block(tp, DMAC_MODE, DMAC_MODE_ENABLE, silent);
4634         err |= tg3_stop_block(tp, SNDBDC_MODE, SNDBDC_MODE_ENABLE, silent);
4635
4636         tp->mac_mode &= ~MAC_MODE_TDE_ENABLE;
4637         tw32_f(MAC_MODE, tp->mac_mode);
4638         udelay(40);
4639
4640         tp->tx_mode &= ~TX_MODE_ENABLE;
4641         tw32_f(MAC_TX_MODE, tp->tx_mode);
4642
4643         for (i = 0; i < MAX_WAIT_CNT; i++) {
4644                 udelay(100);
4645                 if (!(tr32(MAC_TX_MODE) & TX_MODE_ENABLE))
4646                         break;
4647         }
4648         if (i >= MAX_WAIT_CNT) {
4649                 printk(KERN_ERR PFX "tg3_abort_hw timed out for %s, "
4650                        "TX_MODE_ENABLE will not clear MAC_TX_MODE=%08x\n",
4651                        tp->dev->name, tr32(MAC_TX_MODE));
4652                 err |= -ENODEV;
4653         }
4654
4655         err |= tg3_stop_block(tp, HOSTCC_MODE, HOSTCC_MODE_ENABLE, silent);
4656         err |= tg3_stop_block(tp, WDMAC_MODE, WDMAC_MODE_ENABLE, silent);
4657         err |= tg3_stop_block(tp, MBFREE_MODE, MBFREE_MODE_ENABLE, silent);
4658
4659         tw32(FTQ_RESET, 0xffffffff);
4660         tw32(FTQ_RESET, 0x00000000);
4661
4662         err |= tg3_stop_block(tp, BUFMGR_MODE, BUFMGR_MODE_ENABLE, silent);
4663         err |= tg3_stop_block(tp, MEMARB_MODE, MEMARB_MODE_ENABLE, silent);
4664
4665         if (tp->hw_status)
4666                 memset(tp->hw_status, 0, TG3_HW_STATUS_SIZE);
4667         if (tp->hw_stats)
4668                 memset(tp->hw_stats, 0, sizeof(struct tg3_hw_stats));
4669
4670         return err;
4671 }
4672
4673 /* tp->lock is held. */
4674 static int tg3_nvram_lock(struct tg3 *tp)
4675 {
4676         if (tp->tg3_flags & TG3_FLAG_NVRAM) {
4677                 int i;
4678
4679                 if (tp->nvram_lock_cnt == 0) {
4680                         tw32(NVRAM_SWARB, SWARB_REQ_SET1);
4681                         for (i = 0; i < 8000; i++) {
4682                                 if (tr32(NVRAM_SWARB) & SWARB_GNT1)
4683                                         break;
4684                                 udelay(20);
4685                         }
4686                         if (i == 8000) {
4687                                 tw32(NVRAM_SWARB, SWARB_REQ_CLR1);
4688                                 return -ENODEV;
4689                         }
4690                 }
4691                 tp->nvram_lock_cnt++;
4692         }
4693         return 0;
4694 }
4695
4696 /* tp->lock is held. */
4697 static void tg3_nvram_unlock(struct tg3 *tp)
4698 {
4699         if (tp->tg3_flags & TG3_FLAG_NVRAM) {
4700                 if (tp->nvram_lock_cnt > 0)
4701                         tp->nvram_lock_cnt--;
4702                 if (tp->nvram_lock_cnt == 0)
4703                         tw32_f(NVRAM_SWARB, SWARB_REQ_CLR1);
4704         }
4705 }
4706
4707 /* tp->lock is held. */
4708 static void tg3_enable_nvram_access(struct tg3 *tp)
4709 {
4710         if ((tp->tg3_flags2 & TG3_FLG2_5750_PLUS) &&
4711             !(tp->tg3_flags2 & TG3_FLG2_PROTECTED_NVRAM)) {
4712                 u32 nvaccess = tr32(NVRAM_ACCESS);
4713
4714                 tw32(NVRAM_ACCESS, nvaccess | ACCESS_ENABLE);
4715         }
4716 }
4717
4718 /* tp->lock is held. */
4719 static void tg3_disable_nvram_access(struct tg3 *tp)
4720 {
4721         if ((tp->tg3_flags2 & TG3_FLG2_5750_PLUS) &&
4722             !(tp->tg3_flags2 & TG3_FLG2_PROTECTED_NVRAM)) {
4723                 u32 nvaccess = tr32(NVRAM_ACCESS);
4724
4725                 tw32(NVRAM_ACCESS, nvaccess & ~ACCESS_ENABLE);
4726         }
4727 }
4728
4729 /* tp->lock is held. */
4730 static void tg3_write_sig_pre_reset(struct tg3 *tp, int kind)
4731 {
4732         tg3_write_mem(tp, NIC_SRAM_FIRMWARE_MBOX,
4733                       NIC_SRAM_FIRMWARE_MBOX_MAGIC1);
4734
4735         if (tp->tg3_flags2 & TG3_FLG2_ASF_NEW_HANDSHAKE) {
4736                 switch (kind) {
4737                 case RESET_KIND_INIT:
4738                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
4739                                       DRV_STATE_START);
4740                         break;
4741
4742                 case RESET_KIND_SHUTDOWN:
4743                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
4744                                       DRV_STATE_UNLOAD);
4745                         break;
4746
4747                 case RESET_KIND_SUSPEND:
4748                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
4749                                       DRV_STATE_SUSPEND);
4750                         break;
4751
4752                 default:
4753                         break;
4754                 };
4755         }
4756 }
4757
4758 /* tp->lock is held. */
4759 static void tg3_write_sig_post_reset(struct tg3 *tp, int kind)
4760 {
4761         if (tp->tg3_flags2 & TG3_FLG2_ASF_NEW_HANDSHAKE) {
4762                 switch (kind) {
4763                 case RESET_KIND_INIT:
4764                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
4765                                       DRV_STATE_START_DONE);
4766                         break;
4767
4768                 case RESET_KIND_SHUTDOWN:
4769                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
4770                                       DRV_STATE_UNLOAD_DONE);
4771                         break;
4772
4773                 default:
4774                         break;
4775                 };
4776         }
4777 }
4778
4779 /* tp->lock is held. */
4780 static void tg3_write_sig_legacy(struct tg3 *tp, int kind)
4781 {
4782         if (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) {
4783                 switch (kind) {
4784                 case RESET_KIND_INIT:
4785                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
4786                                       DRV_STATE_START);
4787                         break;
4788
4789                 case RESET_KIND_SHUTDOWN:
4790                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
4791                                       DRV_STATE_UNLOAD);
4792                         break;
4793
4794                 case RESET_KIND_SUSPEND:
4795                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
4796                                       DRV_STATE_SUSPEND);
4797                         break;
4798
4799                 default:
4800                         break;
4801                 };
4802         }
4803 }
4804
4805 static int tg3_poll_fw(struct tg3 *tp)
4806 {
4807         int i;
4808         u32 val;
4809
4810         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
4811                 /* Wait up to 20ms for init done. */
4812                 for (i = 0; i < 200; i++) {
4813                         if (tr32(VCPU_STATUS) & VCPU_STATUS_INIT_DONE)
4814                                 return 0;
4815                         udelay(100);
4816                 }
4817                 return -ENODEV;
4818         }
4819
4820         /* Wait for firmware initialization to complete. */
4821         for (i = 0; i < 100000; i++) {
4822                 tg3_read_mem(tp, NIC_SRAM_FIRMWARE_MBOX, &val);
4823                 if (val == ~NIC_SRAM_FIRMWARE_MBOX_MAGIC1)
4824                         break;
4825                 udelay(10);
4826         }
4827
4828         /* Chip might not be fitted with firmware.  Some Sun onboard
4829          * parts are configured like that.  So don't signal the timeout
4830          * of the above loop as an error, but do report the lack of
4831          * running firmware once.
4832          */
4833         if (i >= 100000 &&
4834             !(tp->tg3_flags2 & TG3_FLG2_NO_FWARE_REPORTED)) {
4835                 tp->tg3_flags2 |= TG3_FLG2_NO_FWARE_REPORTED;
4836
4837                 printk(KERN_INFO PFX "%s: No firmware running.\n",
4838                        tp->dev->name);
4839         }
4840
4841         return 0;
4842 }
4843
4844 /* Save PCI command register before chip reset */
4845 static void tg3_save_pci_state(struct tg3 *tp)
4846 {
4847         u32 val;
4848
4849         pci_read_config_dword(tp->pdev, TG3PCI_COMMAND, &val);
4850         tp->pci_cmd = val;
4851 }
4852
4853 /* Restore PCI state after chip reset */
4854 static void tg3_restore_pci_state(struct tg3 *tp)
4855 {
4856         u32 val;
4857
4858         /* Re-enable indirect register accesses. */
4859         pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
4860                                tp->misc_host_ctrl);
4861
4862         /* Set MAX PCI retry to zero. */
4863         val = (PCISTATE_ROM_ENABLE | PCISTATE_ROM_RETRY_ENABLE);
4864         if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0 &&
4865             (tp->tg3_flags & TG3_FLAG_PCIX_MODE))
4866                 val |= PCISTATE_RETRY_SAME_DMA;
4867         pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE, val);
4868
4869         pci_write_config_dword(tp->pdev, TG3PCI_COMMAND, tp->pci_cmd);
4870
4871         /* Make sure PCI-X relaxed ordering bit is clear. */
4872         if (tp->pcix_cap) {
4873                 u16 pcix_cmd;
4874
4875                 pci_read_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
4876                                      &pcix_cmd);
4877                 pcix_cmd &= ~PCI_X_CMD_ERO;
4878                 pci_write_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
4879                                       pcix_cmd);
4880         }
4881
4882         if (tp->tg3_flags2 & TG3_FLG2_5780_CLASS) {
4883
4884                 /* Chip reset on 5780 will reset MSI enable bit,
4885                  * so need to restore it.
4886                  */
4887                 if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
4888                         u16 ctrl;
4889
4890                         pci_read_config_word(tp->pdev,
4891                                              tp->msi_cap + PCI_MSI_FLAGS,
4892                                              &ctrl);
4893                         pci_write_config_word(tp->pdev,
4894                                               tp->msi_cap + PCI_MSI_FLAGS,
4895                                               ctrl | PCI_MSI_FLAGS_ENABLE);
4896                         val = tr32(MSGINT_MODE);
4897                         tw32(MSGINT_MODE, val | MSGINT_MODE_ENABLE);
4898                 }
4899         }
4900 }
4901
4902 static void tg3_stop_fw(struct tg3 *);
4903
4904 /* tp->lock is held. */
4905 static int tg3_chip_reset(struct tg3 *tp)
4906 {
4907         u32 val;
4908         void (*write_op)(struct tg3 *, u32, u32);
4909         int err;
4910
4911         tg3_nvram_lock(tp);
4912
4913         /* No matching tg3_nvram_unlock() after this because
4914          * chip reset below will undo the nvram lock.
4915          */
4916         tp->nvram_lock_cnt = 0;
4917
4918         /* GRC_MISC_CFG core clock reset will clear the memory
4919          * enable bit in PCI register 4 and the MSI enable bit
4920          * on some chips, so we save relevant registers here.
4921          */
4922         tg3_save_pci_state(tp);
4923
4924         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752 ||
4925             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
4926             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
4927             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784)
4928                 tw32(GRC_FASTBOOT_PC, 0);
4929
4930         /*
4931          * We must avoid the readl() that normally takes place.
4932          * It locks machines, causes machine checks, and other
4933          * fun things.  So, temporarily disable the 5701
4934          * hardware workaround, while we do the reset.
4935          */
4936         write_op = tp->write32;
4937         if (write_op == tg3_write_flush_reg32)
4938                 tp->write32 = tg3_write32;
4939
4940         /* Prevent the irq handler from reading or writing PCI registers
4941          * during chip reset when the memory enable bit in the PCI command
4942          * register may be cleared.  The chip does not generate interrupt
4943          * at this time, but the irq handler may still be called due to irq
4944          * sharing or irqpoll.
4945          */
4946         tp->tg3_flags |= TG3_FLAG_CHIP_RESETTING;
4947         if (tp->hw_status) {
4948                 tp->hw_status->status = 0;
4949                 tp->hw_status->status_tag = 0;
4950         }
4951         tp->last_tag = 0;
4952         smp_mb();
4953         synchronize_irq(tp->pdev->irq);
4954
4955         /* do the reset */
4956         val = GRC_MISC_CFG_CORECLK_RESET;
4957
4958         if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) {
4959                 if (tr32(0x7e2c) == 0x60) {
4960                         tw32(0x7e2c, 0x20);
4961                 }
4962                 if (tp->pci_chip_rev_id != CHIPREV_ID_5750_A0) {
4963                         tw32(GRC_MISC_CFG, (1 << 29));
4964                         val |= (1 << 29);
4965                 }
4966         }
4967
4968         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
4969                 tw32(VCPU_STATUS, tr32(VCPU_STATUS) | VCPU_STATUS_DRV_RESET);
4970                 tw32(GRC_VCPU_EXT_CTRL,
4971                      tr32(GRC_VCPU_EXT_CTRL) & ~GRC_VCPU_EXT_CTRL_HALT_CPU);
4972         }
4973
4974         if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS)
4975                 val |= GRC_MISC_CFG_KEEP_GPHY_POWER;
4976         tw32(GRC_MISC_CFG, val);
4977
4978         /* restore 5701 hardware bug workaround write method */
4979         tp->write32 = write_op;
4980
4981         /* Unfortunately, we have to delay before the PCI read back.
4982          * Some 575X chips even will not respond to a PCI cfg access
4983          * when the reset command is given to the chip.
4984          *
4985          * How do these hardware designers expect things to work
4986          * properly if the PCI write is posted for a long period
4987          * of time?  It is always necessary to have some method by
4988          * which a register read back can occur to push the write
4989          * out which does the reset.
4990          *
4991          * For most tg3 variants the trick below was working.
4992          * Ho hum...
4993          */
4994         udelay(120);
4995
4996         /* Flush PCI posted writes.  The normal MMIO registers
4997          * are inaccessible at this time so this is the only
4998          * way to make this reliably (actually, this is no longer
4999          * the case, see above).  I tried to use indirect
5000          * register read/write but this upset some 5701 variants.
5001          */
5002         pci_read_config_dword(tp->pdev, PCI_COMMAND, &val);
5003
5004         udelay(120);
5005
5006         if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) {
5007                 if (tp->pci_chip_rev_id == CHIPREV_ID_5750_A0) {
5008                         int i;
5009                         u32 cfg_val;
5010
5011                         /* Wait for link training to complete.  */
5012                         for (i = 0; i < 5000; i++)
5013                                 udelay(100);
5014
5015                         pci_read_config_dword(tp->pdev, 0xc4, &cfg_val);
5016                         pci_write_config_dword(tp->pdev, 0xc4,
5017                                                cfg_val | (1 << 15));
5018                 }
5019                 /* Set PCIE max payload size and clear error status.  */
5020                 pci_write_config_dword(tp->pdev, 0xd8, 0xf5000);
5021         }
5022
5023         tg3_restore_pci_state(tp);
5024
5025         tp->tg3_flags &= ~TG3_FLAG_CHIP_RESETTING;
5026
5027         val = 0;
5028         if (tp->tg3_flags2 & TG3_FLG2_5780_CLASS)
5029                 val = tr32(MEMARB_MODE);
5030         tw32(MEMARB_MODE, val | MEMARB_MODE_ENABLE);
5031
5032         if (tp->pci_chip_rev_id == CHIPREV_ID_5750_A3) {
5033                 tg3_stop_fw(tp);
5034                 tw32(0x5000, 0x400);
5035         }
5036
5037         tw32(GRC_MODE, tp->grc_mode);
5038
5039         if (tp->pci_chip_rev_id == CHIPREV_ID_5705_A0) {
5040                 val = tr32(0xc4);
5041
5042                 tw32(0xc4, val | (1 << 15));
5043         }
5044
5045         if ((tp->nic_sram_data_cfg & NIC_SRAM_DATA_CFG_MINI_PCI) != 0 &&
5046             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
5047                 tp->pci_clock_ctrl |= CLOCK_CTRL_CLKRUN_OENABLE;
5048                 if (tp->pci_chip_rev_id == CHIPREV_ID_5705_A0)
5049                         tp->pci_clock_ctrl |= CLOCK_CTRL_FORCE_CLKRUN;
5050                 tw32(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl);
5051         }
5052
5053         if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) {
5054                 tp->mac_mode = MAC_MODE_PORT_MODE_TBI;
5055                 tw32_f(MAC_MODE, tp->mac_mode);
5056         } else if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES) {
5057                 tp->mac_mode = MAC_MODE_PORT_MODE_GMII;
5058                 tw32_f(MAC_MODE, tp->mac_mode);
5059         } else
5060                 tw32_f(MAC_MODE, 0);
5061         udelay(40);
5062
5063         err = tg3_poll_fw(tp);
5064         if (err)
5065                 return err;
5066
5067         if ((tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) &&
5068             tp->pci_chip_rev_id != CHIPREV_ID_5750_A0) {
5069                 val = tr32(0x7c00);
5070
5071                 tw32(0x7c00, val | (1 << 25));
5072         }
5073
5074         /* Reprobe ASF enable state.  */
5075         tp->tg3_flags &= ~TG3_FLAG_ENABLE_ASF;
5076         tp->tg3_flags2 &= ~TG3_FLG2_ASF_NEW_HANDSHAKE;
5077         tg3_read_mem(tp, NIC_SRAM_DATA_SIG, &val);
5078         if (val == NIC_SRAM_DATA_SIG_MAGIC) {
5079                 u32 nic_cfg;
5080
5081                 tg3_read_mem(tp, NIC_SRAM_DATA_CFG, &nic_cfg);
5082                 if (nic_cfg & NIC_SRAM_DATA_CFG_ASF_ENABLE) {
5083                         tp->tg3_flags |= TG3_FLAG_ENABLE_ASF;
5084                         if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS)
5085                                 tp->tg3_flags2 |= TG3_FLG2_ASF_NEW_HANDSHAKE;
5086                 }
5087         }
5088
5089         return 0;
5090 }
5091
5092 /* tp->lock is held. */
5093 static void tg3_stop_fw(struct tg3 *tp)
5094 {
5095         if (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) {
5096                 u32 val;
5097                 int i;
5098
5099                 tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX, FWCMD_NICDRV_PAUSE_FW);
5100                 val = tr32(GRC_RX_CPU_EVENT);
5101                 val |= (1 << 14);
5102                 tw32(GRC_RX_CPU_EVENT, val);
5103
5104                 /* Wait for RX cpu to ACK the event.  */
5105                 for (i = 0; i < 100; i++) {
5106                         if (!(tr32(GRC_RX_CPU_EVENT) & (1 << 14)))
5107                                 break;
5108                         udelay(1);
5109                 }
5110         }
5111 }
5112
5113 /* tp->lock is held. */
5114 static int tg3_halt(struct tg3 *tp, int kind, int silent)
5115 {
5116         int err;
5117
5118         tg3_stop_fw(tp);
5119
5120         tg3_write_sig_pre_reset(tp, kind);
5121
5122         tg3_abort_hw(tp, silent);
5123         err = tg3_chip_reset(tp);
5124
5125         tg3_write_sig_legacy(tp, kind);
5126         tg3_write_sig_post_reset(tp, kind);
5127
5128         if (err)
5129                 return err;
5130
5131         return 0;
5132 }
5133
5134 #define TG3_FW_RELEASE_MAJOR    0x0
5135 #define TG3_FW_RELASE_MINOR     0x0
5136 #define TG3_FW_RELEASE_FIX      0x0
5137 #define TG3_FW_START_ADDR       0x08000000
5138 #define TG3_FW_TEXT_ADDR        0x08000000
5139 #define TG3_FW_TEXT_LEN         0x9c0
5140 #define TG3_FW_RODATA_ADDR      0x080009c0
5141 #define TG3_FW_RODATA_LEN       0x60
5142 #define TG3_FW_DATA_ADDR        0x08000a40
5143 #define TG3_FW_DATA_LEN         0x20
5144 #define TG3_FW_SBSS_ADDR        0x08000a60
5145 #define TG3_FW_SBSS_LEN         0xc
5146 #define TG3_FW_BSS_ADDR         0x08000a70
5147 #define TG3_FW_BSS_LEN          0x10
5148
5149 static const u32 tg3FwText[(TG3_FW_TEXT_LEN / sizeof(u32)) + 1] = {
5150         0x00000000, 0x10000003, 0x00000000, 0x0000000d, 0x0000000d, 0x3c1d0800,
5151         0x37bd3ffc, 0x03a0f021, 0x3c100800, 0x26100000, 0x0e000018, 0x00000000,
5152         0x0000000d, 0x3c1d0800, 0x37bd3ffc, 0x03a0f021, 0x3c100800, 0x26100034,
5153         0x0e00021c, 0x00000000, 0x0000000d, 0x00000000, 0x00000000, 0x00000000,
5154         0x27bdffe0, 0x3c1cc000, 0xafbf0018, 0xaf80680c, 0x0e00004c, 0x241b2105,
5155         0x97850000, 0x97870002, 0x9782002c, 0x9783002e, 0x3c040800, 0x248409c0,
5156         0xafa00014, 0x00021400, 0x00621825, 0x00052c00, 0xafa30010, 0x8f860010,
5157         0x00e52825, 0x0e000060, 0x24070102, 0x3c02ac00, 0x34420100, 0x3c03ac01,
5158         0x34630100, 0xaf820490, 0x3c02ffff, 0xaf820494, 0xaf830498, 0xaf82049c,
5159         0x24020001, 0xaf825ce0, 0x0e00003f, 0xaf825d00, 0x0e000140, 0x00000000,
5160         0x8fbf0018, 0x03e00008, 0x27bd0020, 0x2402ffff, 0xaf825404, 0x8f835400,
5161         0x34630400, 0xaf835400, 0xaf825404, 0x3c020800, 0x24420034, 0xaf82541c,
5162         0x03e00008, 0xaf805400, 0x00000000, 0x00000000, 0x3c020800, 0x34423000,
5163         0x3c030800, 0x34633000, 0x3c040800, 0x348437ff, 0x3c010800, 0xac220a64,
5164         0x24020040, 0x3c010800, 0xac220a68, 0x3c010800, 0xac200a60, 0xac600000,
5165         0x24630004, 0x0083102b, 0x5040fffd, 0xac600000, 0x03e00008, 0x00000000,
5166         0x00804821, 0x8faa0010, 0x3c020800, 0x8c420a60, 0x3c040800, 0x8c840a68,
5167         0x8fab0014, 0x24430001, 0x0044102b, 0x3c010800, 0xac230a60, 0x14400003,
5168         0x00004021, 0x3c010800, 0xac200a60, 0x3c020800, 0x8c420a60, 0x3c030800,
5169         0x8c630a64, 0x91240000, 0x00021140, 0x00431021, 0x00481021, 0x25080001,
5170         0xa0440000, 0x29020008, 0x1440fff4, 0x25290001, 0x3c020800, 0x8c420a60,
5171         0x3c030800, 0x8c630a64, 0x8f84680c, 0x00021140, 0x00431021, 0xac440008,
5172         0xac45000c, 0xac460010, 0xac470014, 0xac4a0018, 0x03e00008, 0xac4b001c,
5173         0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
5174         0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
5175         0, 0, 0, 0, 0, 0,
5176         0x02000008, 0x00000000, 0x0a0001e3, 0x3c0a0001, 0x0a0001e3, 0x3c0a0002,
5177         0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000,
5178         0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000,
5179         0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000,
5180         0x0a0001e3, 0x3c0a0007, 0x0a0001e3, 0x3c0a0008, 0x0a0001e3, 0x3c0a0009,
5181         0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x3c0a000b,
5182         0x0a0001e3, 0x3c0a000c, 0x0a0001e3, 0x3c0a000d, 0x0a0001e3, 0x00000000,
5183         0x0a0001e3, 0x00000000, 0x0a0001e3, 0x3c0a000e, 0x0a0001e3, 0x00000000,
5184         0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000,
5185         0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000,
5186         0x0a0001e3, 0x00000000, 0x0a0001e3, 0x3c0a0013, 0x0a0001e3, 0x3c0a0014,
5187         0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
5188         0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
5189         0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
5190         0x27bdffe0, 0x00001821, 0x00001021, 0xafbf0018, 0xafb10014, 0xafb00010,
5191         0x3c010800, 0x00220821, 0xac200a70, 0x3c010800, 0x00220821, 0xac200a74,
5192         0x3c010800, 0x00220821, 0xac200a78, 0x24630001, 0x1860fff5, 0x2442000c,
5193         0x24110001, 0x8f906810, 0x32020004, 0x14400005, 0x24040001, 0x3c020800,
5194         0x8c420a78, 0x18400003, 0x00002021, 0x0e000182, 0x00000000, 0x32020001,
5195         0x10400003, 0x00000000, 0x0e000169, 0x00000000, 0x0a000153, 0xaf915028,
5196         0x8fbf0018, 0x8fb10014, 0x8fb00010, 0x03e00008, 0x27bd0020, 0x3c050800,
5197         0x8ca50a70, 0x3c060800, 0x8cc60a80, 0x3c070800, 0x8ce70a78, 0x27bdffe0,
5198         0x3c040800, 0x248409d0, 0xafbf0018, 0xafa00010, 0x0e000060, 0xafa00014,
5199         0x0e00017b, 0x00002021, 0x8fbf0018, 0x03e00008, 0x27bd0020, 0x24020001,
5200         0x8f836810, 0x00821004, 0x00021027, 0x00621824, 0x03e00008, 0xaf836810,
5201         0x27bdffd8, 0xafbf0024, 0x1080002e, 0xafb00020, 0x8f825cec, 0xafa20018,
5202         0x8f825cec, 0x3c100800, 0x26100a78, 0xafa2001c, 0x34028000, 0xaf825cec,
5203         0x8e020000, 0x18400016, 0x00000000, 0x3c020800, 0x94420a74, 0x8fa3001c,
5204         0x000221c0, 0xac830004, 0x8fa2001c, 0x3c010800, 0x0e000201, 0xac220a74,
5205         0x10400005, 0x00000000, 0x8e020000, 0x24420001, 0x0a0001df, 0xae020000,
5206         0x3c020800, 0x8c420a70, 0x00021c02, 0x000321c0, 0x0a0001c5, 0xafa2001c,
5207         0x0e000201, 0x00000000, 0x1040001f, 0x00000000, 0x8e020000, 0x8fa3001c,
5208         0x24420001, 0x3c010800, 0xac230a70, 0x3c010800, 0xac230a74, 0x0a0001df,
5209         0xae020000, 0x3c100800, 0x26100a78, 0x8e020000, 0x18400028, 0x00000000,
5210         0x0e000201, 0x00000000, 0x14400024, 0x00000000, 0x8e020000, 0x3c030800,
5211         0x8c630a70, 0x2442ffff, 0xafa3001c, 0x18400006, 0xae020000, 0x00031402,
5212         0x000221c0, 0x8c820004, 0x3c010800, 0xac220a70, 0x97a2001e, 0x2442ff00,
5213         0x2c420300, 0x1440000b, 0x24024000, 0x3c040800, 0x248409dc, 0xafa00010,
5214         0xafa00014, 0x8fa6001c, 0x24050008, 0x0e000060, 0x00003821, 0x0a0001df,
5215         0x00000000, 0xaf825cf8, 0x3c020800, 0x8c420a40, 0x8fa3001c, 0x24420001,
5216         0xaf835cf8, 0x3c010800, 0xac220a40, 0x8fbf0024, 0x8fb00020, 0x03e00008,
5217         0x27bd0028, 0x27bdffe0, 0x3c040800, 0x248409e8, 0x00002821, 0x00003021,
5218         0x00003821, 0xafbf0018, 0xafa00010, 0x0e000060, 0xafa00014, 0x8fbf0018,
5219         0x03e00008, 0x27bd0020, 0x8f82680c, 0x8f85680c, 0x00021827, 0x0003182b,
5220         0x00031823, 0x00431024, 0x00441021, 0x00a2282b, 0x10a00006, 0x00000000,
5221         0x00401821, 0x8f82680c, 0x0043102b, 0x1440fffd, 0x00000000, 0x03e00008,
5222         0x00000000, 0x3c040800, 0x8c840000, 0x3c030800, 0x8c630a40, 0x0064102b,
5223         0x54400002, 0x00831023, 0x00641023, 0x2c420008, 0x03e00008, 0x38420001,
5224         0x27bdffe0, 0x00802821, 0x3c040800, 0x24840a00, 0x00003021, 0x00003821,
5225         0xafbf0018, 0xafa00010, 0x0e000060, 0xafa00014, 0x0a000216, 0x00000000,
5226         0x8fbf0018, 0x03e00008, 0x27bd0020, 0x00000000, 0x27bdffe0, 0x3c1cc000,
5227         0xafbf0018, 0x0e00004c, 0xaf80680c, 0x3c040800, 0x24840a10, 0x03802821,
5228         0x00003021, 0x00003821, 0xafa00010, 0x0e000060, 0xafa00014, 0x2402ffff,
5229         0xaf825404, 0x3c0200aa, 0x0e000234, 0xaf825434, 0x8fbf0018, 0x03e00008,
5230         0x27bd0020, 0x00000000, 0x00000000, 0x00000000, 0x27bdffe8, 0xafb00010,
5231         0x24100001, 0xafbf0014, 0x3c01c003, 0xac200000, 0x8f826810, 0x30422000,
5232         0x10400003, 0x00000000, 0x0e000246, 0x00000000, 0x0a00023a, 0xaf905428,
5233         0x8fbf0014, 0x8fb00010, 0x03e00008, 0x27bd0018, 0x27bdfff8, 0x8f845d0c,
5234         0x3c0200ff, 0x3c030800, 0x8c630a50, 0x3442fff8, 0x00821024, 0x1043001e,
5235         0x3c0500ff, 0x34a5fff8, 0x3c06c003, 0x3c074000, 0x00851824, 0x8c620010,
5236         0x3c010800, 0xac230a50, 0x30420008, 0x10400005, 0x00871025, 0x8cc20000,
5237         0x24420001, 0xacc20000, 0x00871025, 0xaf825d0c, 0x8fa20000, 0x24420001,
5238         0xafa20000, 0x8fa20000, 0x8fa20000, 0x24420001, 0xafa20000, 0x8fa20000,
5239         0x8f845d0c, 0x3c030800, 0x8c630a50, 0x00851024, 0x1443ffe8, 0x00851824,
5240         0x27bd0008, 0x03e00008, 0x00000000, 0x00000000, 0x00000000
5241 };
5242
5243 static const u32 tg3FwRodata[(TG3_FW_RODATA_LEN / sizeof(u32)) + 1] = {
5244         0x35373031, 0x726c7341, 0x00000000, 0x00000000, 0x53774576, 0x656e7430,
5245         0x00000000, 0x726c7045, 0x76656e74, 0x31000000, 0x556e6b6e, 0x45766e74,
5246         0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x66617461, 0x6c457272,
5247         0x00000000, 0x00000000, 0x4d61696e, 0x43707542, 0x00000000, 0x00000000,
5248         0x00000000
5249 };
5250
5251 #if 0 /* All zeros, don't eat up space with it. */
5252 u32 tg3FwData[(TG3_FW_DATA_LEN / sizeof(u32)) + 1] = {
5253         0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
5254         0x00000000, 0x00000000, 0x00000000, 0x00000000
5255 };
5256 #endif
5257
5258 #define RX_CPU_SCRATCH_BASE     0x30000
5259 #define RX_CPU_SCRATCH_SIZE     0x04000
5260 #define TX_CPU_SCRATCH_BASE     0x34000
5261 #define TX_CPU_SCRATCH_SIZE     0x04000
5262
5263 /* tp->lock is held. */
5264 static int tg3_halt_cpu(struct tg3 *tp, u32 offset)
5265 {
5266         int i;
5267
5268         BUG_ON(offset == TX_CPU_BASE &&
5269             (tp->tg3_flags2 & TG3_FLG2_5705_PLUS));
5270
5271         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
5272                 u32 val = tr32(GRC_VCPU_EXT_CTRL);
5273
5274                 tw32(GRC_VCPU_EXT_CTRL, val | GRC_VCPU_EXT_CTRL_HALT_CPU);
5275                 return 0;
5276         }
5277         if (offset == RX_CPU_BASE) {
5278                 for (i = 0; i < 10000; i++) {
5279                         tw32(offset + CPU_STATE, 0xffffffff);
5280                         tw32(offset + CPU_MODE,  CPU_MODE_HALT);
5281                         if (tr32(offset + CPU_MODE) & CPU_MODE_HALT)
5282                                 break;
5283                 }
5284
5285                 tw32(offset + CPU_STATE, 0xffffffff);
5286                 tw32_f(offset + CPU_MODE,  CPU_MODE_HALT);
5287                 udelay(10);
5288         } else {
5289                 for (i = 0; i < 10000; i++) {
5290                         tw32(offset + CPU_STATE, 0xffffffff);
5291                         tw32(offset + CPU_MODE,  CPU_MODE_HALT);
5292                         if (tr32(offset + CPU_MODE) & CPU_MODE_HALT)
5293                                 break;
5294                 }
5295         }
5296
5297         if (i >= 10000) {
5298                 printk(KERN_ERR PFX "tg3_reset_cpu timed out for %s, "
5299                        "and %s CPU\n",
5300                        tp->dev->name,
5301                        (offset == RX_CPU_BASE ? "RX" : "TX"));
5302                 return -ENODEV;
5303         }
5304
5305         /* Clear firmware's nvram arbitration. */
5306         if (tp->tg3_flags & TG3_FLAG_NVRAM)
5307                 tw32(NVRAM_SWARB, SWARB_REQ_CLR0);
5308         return 0;
5309 }
5310
5311 struct fw_info {
5312         unsigned int text_base;
5313         unsigned int text_len;
5314         const u32 *text_data;
5315         unsigned int rodata_base;
5316         unsigned int rodata_len;
5317         const u32 *rodata_data;
5318         unsigned int data_base;
5319         unsigned int data_len;
5320         const u32 *data_data;
5321 };
5322
5323 /* tp->lock is held. */
5324 static int tg3_load_firmware_cpu(struct tg3 *tp, u32 cpu_base, u32 cpu_scratch_base,
5325                                  int cpu_scratch_size, struct fw_info *info)
5326 {
5327         int err, lock_err, i;
5328         void (*write_op)(struct tg3 *, u32, u32);
5329
5330         if (cpu_base == TX_CPU_BASE &&
5331             (tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
5332                 printk(KERN_ERR PFX "tg3_load_firmware_cpu: Trying to load "
5333                        "TX cpu firmware on %s which is 5705.\n",
5334                        tp->dev->name);
5335                 return -EINVAL;
5336         }
5337
5338         if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS)
5339                 write_op = tg3_write_mem;
5340         else
5341                 write_op = tg3_write_indirect_reg32;
5342
5343         /* It is possible that bootcode is still loading at this point.
5344          * Get the nvram lock first before halting the cpu.
5345          */
5346         lock_err = tg3_nvram_lock(tp);
5347         err = tg3_halt_cpu(tp, cpu_base);
5348         if (!lock_err)
5349                 tg3_nvram_unlock(tp);
5350         if (err)
5351                 goto out;
5352
5353         for (i = 0; i < cpu_scratch_size; i += sizeof(u32))
5354                 write_op(tp, cpu_scratch_base + i, 0);
5355         tw32(cpu_base + CPU_STATE, 0xffffffff);
5356         tw32(cpu_base + CPU_MODE, tr32(cpu_base+CPU_MODE)|CPU_MODE_HALT);
5357         for (i = 0; i < (info->text_len / sizeof(u32)); i++)
5358                 write_op(tp, (cpu_scratch_base +
5359                               (info->text_base & 0xffff) +
5360                               (i * sizeof(u32))),
5361                          (info->text_data ?
5362                           info->text_data[i] : 0));
5363         for (i = 0; i < (info->rodata_len / sizeof(u32)); i++)
5364                 write_op(tp, (cpu_scratch_base +
5365                               (info->rodata_base & 0xffff) +
5366                               (i * sizeof(u32))),
5367                          (info->rodata_data ?
5368                           info->rodata_data[i] : 0));
5369         for (i = 0; i < (info->data_len / sizeof(u32)); i++)
5370                 write_op(tp, (cpu_scratch_base +
5371                               (info->data_base & 0xffff) +
5372                               (i * sizeof(u32))),
5373                          (info->data_data ?
5374                           info->data_data[i] : 0));
5375
5376         err = 0;
5377
5378 out:
5379         return err;
5380 }
5381
5382 /* tp->lock is held. */
5383 static int tg3_load_5701_a0_firmware_fix(struct tg3 *tp)
5384 {
5385         struct fw_info info;
5386         int err, i;
5387
5388         info.text_base = TG3_FW_TEXT_ADDR;
5389         info.text_len = TG3_FW_TEXT_LEN;
5390         info.text_data = &tg3FwText[0];
5391         info.rodata_base = TG3_FW_RODATA_ADDR;
5392         info.rodata_len = TG3_FW_RODATA_LEN;
5393         info.rodata_data = &tg3FwRodata[0];
5394         info.data_base = TG3_FW_DATA_ADDR;
5395         info.data_len = TG3_FW_DATA_LEN;
5396         info.data_data = NULL;
5397
5398         err = tg3_load_firmware_cpu(tp, RX_CPU_BASE,
5399                                     RX_CPU_SCRATCH_BASE, RX_CPU_SCRATCH_SIZE,
5400                                     &info);
5401         if (err)
5402                 return err;
5403
5404         err = tg3_load_firmware_cpu(tp, TX_CPU_BASE,
5405                                     TX_CPU_SCRATCH_BASE, TX_CPU_SCRATCH_SIZE,
5406                                     &info);
5407         if (err)
5408                 return err;
5409
5410         /* Now startup only the RX cpu. */
5411         tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
5412         tw32_f(RX_CPU_BASE + CPU_PC,    TG3_FW_TEXT_ADDR);
5413
5414         for (i = 0; i < 5; i++) {
5415                 if (tr32(RX_CPU_BASE + CPU_PC) == TG3_FW_TEXT_ADDR)
5416                         break;
5417                 tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
5418                 tw32(RX_CPU_BASE + CPU_MODE,  CPU_MODE_HALT);
5419                 tw32_f(RX_CPU_BASE + CPU_PC,    TG3_FW_TEXT_ADDR);
5420                 udelay(1000);
5421         }
5422         if (i >= 5) {
5423                 printk(KERN_ERR PFX "tg3_load_firmware fails for %s "
5424                        "to set RX CPU PC, is %08x should be %08x\n",
5425                        tp->dev->name, tr32(RX_CPU_BASE + CPU_PC),
5426                        TG3_FW_TEXT_ADDR);
5427                 return -ENODEV;
5428         }
5429         tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
5430         tw32_f(RX_CPU_BASE + CPU_MODE,  0x00000000);
5431
5432         return 0;
5433 }
5434
5435
5436 #define TG3_TSO_FW_RELEASE_MAJOR        0x1
5437 #define TG3_TSO_FW_RELASE_MINOR         0x6
5438 #define TG3_TSO_FW_RELEASE_FIX          0x0
5439 #define TG3_TSO_FW_START_ADDR           0x08000000
5440 #define TG3_TSO_FW_TEXT_ADDR            0x08000000
5441 #define TG3_TSO_FW_TEXT_LEN             0x1aa0
5442 #define TG3_TSO_FW_RODATA_ADDR          0x08001aa0
5443 #define TG3_TSO_FW_RODATA_LEN           0x60
5444 #define TG3_TSO_FW_DATA_ADDR            0x08001b20
5445 #define TG3_TSO_FW_DATA_LEN             0x30
5446 #define TG3_TSO_FW_SBSS_ADDR            0x08001b50
5447 #define TG3_TSO_FW_SBSS_LEN             0x2c
5448 #define TG3_TSO_FW_BSS_ADDR             0x08001b80
5449 #define TG3_TSO_FW_BSS_LEN              0x894
5450
5451 static const u32 tg3TsoFwText[(TG3_TSO_FW_TEXT_LEN / 4) + 1] = {
5452         0x0e000003, 0x00000000, 0x08001b24, 0x00000000, 0x10000003, 0x00000000,
5453         0x0000000d, 0x0000000d, 0x3c1d0800, 0x37bd4000, 0x03a0f021, 0x3c100800,
5454         0x26100000, 0x0e000010, 0x00000000, 0x0000000d, 0x27bdffe0, 0x3c04fefe,
5455         0xafbf0018, 0x0e0005d8, 0x34840002, 0x0e000668, 0x00000000, 0x3c030800,
5456         0x90631b68, 0x24020002, 0x3c040800, 0x24841aac, 0x14620003, 0x24050001,
5457         0x3c040800, 0x24841aa0, 0x24060006, 0x00003821, 0xafa00010, 0x0e00067c,
5458         0xafa00014, 0x8f625c50, 0x34420001, 0xaf625c50, 0x8f625c90, 0x34420001,
5459         0xaf625c90, 0x2402ffff, 0x0e000034, 0xaf625404, 0x8fbf0018, 0x03e00008,
5460         0x27bd0020, 0x00000000, 0x00000000, 0x00000000, 0x27bdffe0, 0xafbf001c,
5461         0xafb20018, 0xafb10014, 0x0e00005b, 0xafb00010, 0x24120002, 0x24110001,
5462         0x8f706820, 0x32020100, 0x10400003, 0x00000000, 0x0e0000bb, 0x00000000,
5463         0x8f706820, 0x32022000, 0x10400004, 0x32020001, 0x0e0001f0, 0x24040001,
5464         0x32020001, 0x10400003, 0x00000000, 0x0e0000a3, 0x00000000, 0x3c020800,
5465         0x90421b98, 0x14520003, 0x00000000, 0x0e0004c0, 0x00000000, 0x0a00003c,
5466         0xaf715028, 0x8fbf001c, 0x8fb20018, 0x8fb10014, 0x8fb00010, 0x03e00008,
5467         0x27bd0020, 0x27bdffe0, 0x3c040800, 0x24841ac0, 0x00002821, 0x00003021,
5468         0x00003821, 0xafbf0018, 0xafa00010, 0x0e00067c, 0xafa00014, 0x3c040800,
5469         0x248423d8, 0xa4800000, 0x3c010800, 0xa0201b98, 0x3c010800, 0xac201b9c,
5470         0x3c010800, 0xac201ba0, 0x3c010800, 0xac201ba4, 0x3c010800, 0xac201bac,
5471         0x3c010800, 0xac201bb8, 0x3c010800, 0xac201bbc, 0x8f624434, 0x3c010800,
5472         0xac221b88, 0x8f624438, 0x3c010800, 0xac221b8c, 0x8f624410, 0xac80f7a8,
5473         0x3c010800, 0xac201b84, 0x3c010800, 0xac2023e0, 0x3c010800, 0xac2023c8,
5474         0x3c010800, 0xac2023cc, 0x3c010800, 0xac202400, 0x3c010800, 0xac221b90,
5475         0x8f620068, 0x24030007, 0x00021702, 0x10430005, 0x00000000, 0x8f620068,
5476         0x00021702, 0x14400004, 0x24020001, 0x3c010800, 0x0a000097, 0xac20240c,
5477         0xac820034, 0x3c040800, 0x24841acc, 0x3c050800, 0x8ca5240c, 0x00003021,
5478         0x00003821, 0xafa00010, 0x0e00067c, 0xafa00014, 0x8fbf0018, 0x03e00008,
5479         0x27bd0020, 0x27bdffe0, 0x3c040800, 0x24841ad8, 0x00002821, 0x00003021,
5480         0x00003821, 0xafbf0018, 0xafa00010, 0x0e00067c, 0xafa00014, 0x0e00005b,
5481         0x00000000, 0x0e0000b4, 0x00002021, 0x8fbf0018, 0x03e00008, 0x27bd0020,
5482         0x24020001, 0x8f636820, 0x00821004, 0x00021027, 0x00621824, 0x03e00008,
5483         0xaf636820, 0x27bdffd0, 0xafbf002c, 0xafb60028, 0xafb50024, 0xafb40020,
5484         0xafb3001c, 0xafb20018, 0xafb10014, 0xafb00010, 0x8f675c5c, 0x3c030800,
5485         0x24631bbc, 0x8c620000, 0x14470005, 0x3c0200ff, 0x3c020800, 0x90421b98,
5486         0x14400119, 0x3c0200ff, 0x3442fff8, 0x00e28824, 0xac670000, 0x00111902,
5487         0x306300ff, 0x30e20003, 0x000211c0, 0x00622825, 0x00a04021, 0x00071602,
5488         0x3c030800, 0x90631b98, 0x3044000f, 0x14600036, 0x00804821, 0x24020001,
5489         0x3c010800, 0xa0221b98, 0x00051100, 0x00821025, 0x3c010800, 0xac201b9c,
5490         0x3c010800, 0xac201ba0, 0x3c010800, 0xac201ba4, 0x3c010800, 0xac201bac,
5491         0x3c010800, 0xac201bb8, 0x3c010800, 0xac201bb0, 0x3c010800, 0xac201bb4,
5492         0x3c010800, 0xa42223d8, 0x9622000c, 0x30437fff, 0x3c010800, 0xa4222410,
5493         0x30428000, 0x3c010800, 0xa4231bc6, 0x10400005, 0x24020001, 0x3c010800,
5494         0xac2223f4, 0x0a000102, 0x2406003e, 0x24060036, 0x3c010800, 0xac2023f4,
5495         0x9622000a, 0x3c030800, 0x94631bc6, 0x3c010800, 0xac2023f0, 0x3c010800,
5496         0xac2023f8, 0x00021302, 0x00021080, 0x00c21021, 0x00621821, 0x3c010800,
5497         0xa42223d0, 0x3c010800, 0x0a000115, 0xa4231b96, 0x9622000c, 0x3c010800,
5498         0xa42223ec, 0x3c040800, 0x24841b9c, 0x8c820000, 0x00021100, 0x3c010800,
5499         0x00220821, 0xac311bc8, 0x8c820000, 0x00021100, 0x3c010800, 0x00220821,
5500         0xac271bcc, 0x8c820000, 0x25030001, 0x306601ff, 0x00021100, 0x3c010800,
5501         0x00220821, 0xac261bd0, 0x8c820000, 0x00021100, 0x3c010800, 0x00220821,
5502         0xac291bd4, 0x96230008, 0x3c020800, 0x8c421bac, 0x00432821, 0x3c010800,
5503         0xac251bac, 0x9622000a, 0x30420004, 0x14400018, 0x00061100, 0x8f630c14,
5504         0x3063000f, 0x2c620002, 0x1440000b, 0x3c02c000, 0x8f630c14, 0x3c020800,
5505         0x8c421b40, 0x3063000f, 0x24420001, 0x3c010800, 0xac221b40, 0x2c620002,
5506         0x1040fff7, 0x3c02c000, 0x00e21825, 0xaf635c5c, 0x8f625c50, 0x30420002,
5507         0x10400014, 0x00000000, 0x0a000147, 0x00000000, 0x3c030800, 0x8c631b80,
5508         0x3c040800, 0x94841b94, 0x01221025, 0x3c010800, 0xa42223da, 0x24020001,
5509         0x3c010800, 0xac221bb8, 0x24630001, 0x0085202a, 0x3c010800, 0x10800003,
5510         0xac231b80, 0x3c010800, 0xa4251b94, 0x3c060800, 0x24c61b9c, 0x8cc20000,
5511         0x24420001, 0xacc20000, 0x28420080, 0x14400005, 0x00000000, 0x0e000656,
5512         0x24040002, 0x0a0001e6, 0x00000000, 0x3c020800, 0x8c421bb8, 0x10400078,
5513         0x24020001, 0x3c050800, 0x90a51b98, 0x14a20072, 0x00000000, 0x3c150800,
5514         0x96b51b96, 0x3c040800, 0x8c841bac, 0x32a3ffff, 0x0083102a, 0x1440006c,
5515         0x00000000, 0x14830003, 0x00000000, 0x3c010800, 0xac2523f0, 0x1060005c,
5516         0x00009021, 0x24d60004, 0x0060a021, 0x24d30014, 0x8ec20000, 0x00028100,
5517         0x3c110800, 0x02308821, 0x0e000625, 0x8e311bc8, 0x00402821, 0x10a00054,
5518         0x00000000, 0x9628000a, 0x31020040, 0x10400005, 0x2407180c, 0x8e22000c,
5519         0x2407188c, 0x00021400, 0xaca20018, 0x3c030800, 0x00701821, 0x8c631bd0,
5520         0x3c020800, 0x00501021, 0x8c421bd4, 0x00031d00, 0x00021400, 0x00621825,
5521         0xaca30014, 0x8ec30004, 0x96220008, 0x00432023, 0x3242ffff, 0x3083ffff,
5522         0x00431021, 0x0282102a, 0x14400002, 0x02b23023, 0x00803021, 0x8e620000,
5523         0x30c4ffff, 0x00441021, 0xae620000, 0x8e220000, 0xaca20000, 0x8e220004,
5524         0x8e63fff4, 0x00431021, 0xaca20004, 0xa4a6000e, 0x8e62fff4, 0x00441021,
5525         0xae62fff4, 0x96230008, 0x0043102a, 0x14400005, 0x02469021, 0x8e62fff0,
5526         0xae60fff4, 0x24420001, 0xae62fff0, 0xaca00008, 0x3242ffff, 0x14540008,
5527         0x24020305, 0x31020080, 0x54400001, 0x34e70010, 0x24020905, 0xa4a2000c,
5528         0x0a0001cb, 0x34e70020, 0xa4a2000c, 0x3c020800, 0x8c4223f0, 0x10400003,
5529         0x3c024b65, 0x0a0001d3, 0x34427654, 0x3c02b49a, 0x344289ab, 0xaca2001c,
5530         0x30e2ffff, 0xaca20010, 0x0e0005a2, 0x00a02021, 0x3242ffff, 0x0054102b,
5531         0x1440ffa9, 0x00000000, 0x24020002, 0x3c010800, 0x0a0001e6, 0xa0221b98,
5532         0x8ec2083c, 0x24420001, 0x0a0001e6, 0xaec2083c, 0x0e0004c0, 0x00000000,
5533         0x8fbf002c, 0x8fb60028, 0x8fb50024, 0x8fb40020, 0x8fb3001c, 0x8fb20018,
5534         0x8fb10014, 0x8fb00010, 0x03e00008, 0x27bd0030, 0x27bdffd0, 0xafbf0028,
5535         0xafb30024, 0xafb20020, 0xafb1001c, 0xafb00018, 0x8f725c9c, 0x3c0200ff,
5536         0x3442fff8, 0x3c070800, 0x24e71bb4, 0x02428824, 0x9623000e, 0x8ce20000,
5537         0x00431021, 0xace20000, 0x8e220010, 0x30420020, 0x14400011, 0x00809821,
5538         0x0e00063b, 0x02202021, 0x3c02c000, 0x02421825, 0xaf635c9c, 0x8f625c90,
5539         0x30420002, 0x1040011e, 0x00000000, 0xaf635c9c, 0x8f625c90, 0x30420002,
5540         0x10400119, 0x00000000, 0x0a00020d, 0x00000000, 0x8e240008, 0x8e230014,
5541         0x00041402, 0x000231c0, 0x00031502, 0x304201ff, 0x2442ffff, 0x3042007f,
5542         0x00031942, 0x30637800, 0x00021100, 0x24424000, 0x00624821, 0x9522000a,
5543         0x3084ffff, 0x30420008, 0x104000b0, 0x000429c0, 0x3c020800, 0x8c422400,
5544         0x14400024, 0x24c50008, 0x94c20014, 0x3c010800, 0xa42223d0, 0x8cc40010,
5545         0x00041402, 0x3c010800, 0xa42223d2, 0x3c010800, 0xa42423d4, 0x94c2000e,
5546         0x3083ffff, 0x00431023, 0x3c010800, 0xac222408, 0x94c2001a, 0x3c010800,
5547         0xac262400, 0x3c010800, 0xac322404, 0x3c010800, 0xac2223fc, 0x3c02c000,
5548         0x02421825, 0xaf635c9c, 0x8f625c90, 0x30420002, 0x104000e5, 0x00000000,
5549         0xaf635c9c, 0x8f625c90, 0x30420002, 0x104000e0, 0x00000000, 0x0a000246,
5550         0x00000000, 0x94c2000e, 0x3c030800, 0x946323d4, 0x00434023, 0x3103ffff,
5551         0x2c620008, 0x1040001c, 0x00000000, 0x94c20014, 0x24420028, 0x00a22821,
5552         0x00031042, 0x1840000b, 0x00002021, 0x24e60848, 0x00403821, 0x94a30000,
5553         0x8cc20000, 0x24840001, 0x00431021, 0xacc20000, 0x0087102a, 0x1440fff9,
5554         0x24a50002, 0x31020001, 0x1040001f, 0x3c024000, 0x3c040800, 0x248423fc,
5555         0xa0a00001, 0x94a30000, 0x8c820000, 0x00431021, 0x0a000285, 0xac820000,
5556         0x8f626800, 0x3c030010, 0x00431024, 0x10400009, 0x00000000, 0x94c2001a,
5557         0x3c030800, 0x8c6323fc, 0x00431021, 0x3c010800, 0xac2223fc, 0x0a000286,
5558         0x3c024000, 0x94c2001a, 0x94c4001c, 0x3c030800, 0x8c6323fc, 0x00441023,
5559         0x00621821, 0x3c010800, 0xac2323fc, 0x3c024000, 0x02421825, 0xaf635c9c,
5560         0x8f625c90, 0x30420002, 0x1440fffc, 0x00000000, 0x9522000a, 0x30420010,
5561         0x1040009b, 0x00000000, 0x3c030800, 0x946323d4, 0x3c070800, 0x24e72400,
5562         0x8ce40000, 0x8f626800, 0x24630030, 0x00832821, 0x3c030010, 0x00431024,
5563         0x1440000a, 0x00000000, 0x94a20004, 0x3c040800, 0x8c842408, 0x3c030800,
5564         0x8c6323fc, 0x00441023, 0x00621821, 0x3c010800, 0xac2323fc, 0x3c040800,
5565         0x8c8423fc, 0x00041c02, 0x3082ffff, 0x00622021, 0x00041402, 0x00822021,
5566         0x00041027, 0xa4a20006, 0x3c030800, 0x8c632404, 0x3c0200ff, 0x3442fff8,
5567         0x00628824, 0x96220008, 0x24050001, 0x24034000, 0x000231c0, 0x00801021,
5568         0xa4c2001a, 0xa4c0001c, 0xace00000, 0x3c010800, 0xac251b60, 0xaf635cb8,
5569         0x8f625cb0, 0x30420002, 0x10400003, 0x00000000, 0x3c010800, 0xac201b60,
5570         0x8e220008, 0xaf625cb8, 0x8f625cb0, 0x30420002, 0x10400003, 0x00000000,
5571         0x3c010800, 0xac201b60, 0x3c020800, 0x8c421b60, 0x1040ffec, 0x00000000,
5572         0x3c040800, 0x0e00063b, 0x8c842404, 0x0a00032a, 0x00000000, 0x3c030800,
5573         0x90631b98, 0x24020002, 0x14620003, 0x3c034b65, 0x0a0002e1, 0x00008021,
5574         0x8e22001c, 0x34637654, 0x10430002, 0x24100002, 0x24100001, 0x00c02021,
5575         0x0e000350, 0x02003021, 0x24020003, 0x3c010800, 0xa0221b98, 0x24020002,
5576         0x1202000a, 0x24020001, 0x3c030800, 0x8c6323f0, 0x10620006, 0x00000000,
5577         0x3c020800, 0x944223d8, 0x00021400, 0x0a00031f, 0xae220014, 0x3c040800,
5578         0x248423da, 0x94820000, 0x00021400, 0xae220014, 0x3c020800, 0x8c421bbc,
5579         0x3c03c000, 0x3c010800, 0xa0201b98, 0x00431025, 0xaf625c5c, 0x8f625c50,
5580         0x30420002, 0x10400009, 0x00000000, 0x2484f7e2, 0x8c820000, 0x00431025,
5581         0xaf625c5c, 0x8f625c50, 0x30420002, 0x1440fffa, 0x00000000, 0x3c020800,
5582         0x24421b84, 0x8c430000, 0x24630001, 0xac430000, 0x8f630c14, 0x3063000f,
5583         0x2c620002, 0x1440000c, 0x3c024000, 0x8f630c14, 0x3c020800, 0x8c421b40,
5584         0x3063000f, 0x24420001, 0x3c010800, 0xac221b40, 0x2c620002, 0x1040fff7,
5585         0x00000000, 0x3c024000, 0x02421825, 0xaf635c9c, 0x8f625c90, 0x30420002,
5586         0x1440fffc, 0x00000000, 0x12600003, 0x00000000, 0x0e0004c0, 0x00000000,
5587         0x8fbf0028, 0x8fb30024, 0x8fb20020, 0x8fb1001c, 0x8fb00018, 0x03e00008,
5588         0x27bd0030, 0x8f634450, 0x3c040800, 0x24841b88, 0x8c820000, 0x00031c02,
5589         0x0043102b, 0x14400007, 0x3c038000, 0x8c840004, 0x8f624450, 0x00021c02,
5590         0x0083102b, 0x1040fffc, 0x3c038000, 0xaf634444, 0x8f624444, 0x00431024,
5591         0x1440fffd, 0x00000000, 0x8f624448, 0x03e00008, 0x3042ffff, 0x3c024000,
5592         0x00822025, 0xaf645c38, 0x8f625c30, 0x30420002, 0x1440fffc, 0x00000000,
5593         0x03e00008, 0x00000000, 0x27bdffe0, 0x00805821, 0x14c00011, 0x256e0008,
5594         0x3c020800, 0x8c4223f4, 0x10400007, 0x24020016, 0x3c010800, 0xa42223d2,
5595         0x2402002a, 0x3c010800, 0x0a000364, 0xa42223d4, 0x8d670010, 0x00071402,
5596         0x3c010800, 0xa42223d2, 0x3c010800, 0xa42723d4, 0x3c040800, 0x948423d4,
5597         0x3c030800, 0x946323d2, 0x95cf0006, 0x3c020800, 0x944223d0, 0x00832023,
5598         0x01e2c023, 0x3065ffff, 0x24a20028, 0x01c24821, 0x3082ffff, 0x14c0001a,
5599         0x01226021, 0x9582000c, 0x3042003f, 0x3c010800, 0xa42223d6, 0x95820004,
5600         0x95830006, 0x3c010800, 0xac2023e4, 0x3c010800, 0xac2023e8, 0x00021400,
5601         0x00431025, 0x3c010800, 0xac221bc0, 0x95220004, 0x3c010800, 0xa4221bc4,
5602         0x95230002, 0x01e51023, 0x0043102a, 0x10400010, 0x24020001, 0x3c010800,
5603         0x0a000398, 0xac2223f8, 0x3c030800, 0x8c6323e8, 0x3c020800, 0x94421bc4,
5604         0x00431021, 0xa5220004, 0x3c020800, 0x94421bc0, 0xa5820004, 0x3c020800,
5605         0x8c421bc0, 0xa5820006, 0x3c020800, 0x8c4223f0, 0x3c0d0800, 0x8dad23e4,
5606         0x3c0a0800, 0x144000e5, 0x8d4a23e8, 0x3c020800, 0x94421bc4, 0x004a1821,
5607         0x3063ffff, 0x0062182b, 0x24020002, 0x10c2000d, 0x01435023, 0x3c020800,
5608         0x944223d6, 0x30420009, 0x10400008, 0x00000000, 0x9582000c, 0x3042fff6,
5609         0xa582000c, 0x3c020800, 0x944223d6, 0x30420009, 0x01a26823, 0x3c020800,
5610         0x8c4223f8, 0x1040004a, 0x01203821, 0x3c020800, 0x944223d2, 0x00004021,
5611         0xa520000a, 0x01e21023, 0xa5220002, 0x3082ffff, 0x00021042, 0x18400008,
5612         0x00003021, 0x00401821, 0x94e20000, 0x25080001, 0x00c23021, 0x0103102a,
5613         0x1440fffb, 0x24e70002, 0x00061c02, 0x30c2ffff, 0x00623021, 0x00061402,
5614         0x00c23021, 0x00c02821, 0x00061027, 0xa522000a, 0x00003021, 0x2527000c,
5615         0x00004021, 0x94e20000, 0x25080001, 0x00c23021, 0x2d020004, 0x1440fffb,
5616         0x24e70002, 0x95220002, 0x00004021, 0x91230009, 0x00442023, 0x01803821,
5617         0x3082ffff, 0xa4e00010, 0x00621821, 0x00021042, 0x18400010, 0x00c33021,
5618         0x00404821, 0x94e20000, 0x24e70002, 0x00c23021, 0x30e2007f, 0x14400006,
5619         0x25080001, 0x8d630000, 0x3c02007f, 0x3442ff80, 0x00625824, 0x25670008,
5620         0x0109102a, 0x1440fff3, 0x00000000, 0x30820001, 0x10400005, 0x00061c02,
5621         0xa0e00001, 0x94e20000, 0x00c23021, 0x00061c02, 0x30c2ffff, 0x00623021,
5622         0x00061402, 0x00c23021, 0x0a00047d, 0x30c6ffff, 0x24020002, 0x14c20081,
5623         0x00000000, 0x3c020800, 0x8c42240c, 0x14400007, 0x00000000, 0x3c020800,
5624         0x944223d2, 0x95230002, 0x01e21023, 0x10620077, 0x00000000, 0x3c020800,
5625         0x944223d2, 0x01e21023, 0xa5220002, 0x3c020800, 0x8c42240c, 0x1040001a,
5626         0x31e3ffff, 0x8dc70010, 0x3c020800, 0x94421b96, 0x00e04021, 0x00072c02,
5627         0x00aa2021, 0x00431023, 0x00823823, 0x00072402, 0x30e2ffff, 0x00823821,
5628         0x00071027, 0xa522000a, 0x3102ffff, 0x3c040800, 0x948423d4, 0x00453023,
5629         0x00e02821, 0x00641823, 0x006d1821, 0x00c33021, 0x00061c02, 0x30c2ffff,
5630         0x0a00047d, 0x00623021, 0x01203821, 0x00004021, 0x3082ffff, 0x00021042,
5631         0x18400008, 0x00003021, 0x00401821, 0x94e20000, 0x25080001, 0x00c23021,
5632         0x0103102a, 0x1440fffb, 0x24e70002, 0x00061c02, 0x30c2ffff, 0x00623021,
5633         0x00061402, 0x00c23021, 0x00c02821, 0x00061027, 0xa522000a, 0x00003021,
5634         0x2527000c, 0x00004021, 0x94e20000, 0x25080001, 0x00c23021, 0x2d020004,
5635         0x1440fffb, 0x24e70002, 0x95220002, 0x00004021, 0x91230009, 0x00442023,
5636         0x01803821, 0x3082ffff, 0xa4e00010, 0x3c040800, 0x948423d4, 0x00621821,
5637         0x00c33021, 0x00061c02, 0x30c2ffff, 0x00623021, 0x00061c02, 0x3c020800,
5638         0x944223d0, 0x00c34821, 0x00441023, 0x00021fc2, 0x00431021, 0x00021043,
5639         0x18400010, 0x00003021, 0x00402021, 0x94e20000, 0x24e70002, 0x00c23021,
5640         0x30e2007f, 0x14400006, 0x25080001, 0x8d630000, 0x3c02007f, 0x3442ff80,
5641         0x00625824, 0x25670008, 0x0104102a, 0x1440fff3, 0x00000000, 0x3c020800,
5642         0x944223ec, 0x00c23021, 0x3122ffff, 0x00c23021, 0x00061c02, 0x30c2ffff,
5643         0x00623021, 0x00061402, 0x00c23021, 0x00c04021, 0x00061027, 0xa5820010,
5644         0xadc00014, 0x0a00049d, 0xadc00000, 0x8dc70010, 0x00e04021, 0x11400007,
5645         0x00072c02, 0x00aa3021, 0x00061402, 0x30c3ffff, 0x00433021, 0x00061402,
5646         0x00c22821, 0x00051027, 0xa522000a, 0x3c030800, 0x946323d4, 0x3102ffff,
5647         0x01e21021, 0x00433023, 0x00cd3021, 0x00061c02, 0x30c2ffff, 0x00623021,
5648         0x00061402, 0x00c23021, 0x00c04021, 0x00061027, 0xa5820010, 0x3102ffff,
5649         0x00051c00, 0x00431025, 0xadc20010, 0x3c020800, 0x8c4223f4, 0x10400005,
5650         0x2de205eb, 0x14400002, 0x25e2fff2, 0x34028870, 0xa5c20034, 0x3c030800,
5651         0x246323e8, 0x8c620000, 0x24420001, 0xac620000, 0x3c040800, 0x8c8423e4,
5652         0x3c020800, 0x8c421bc0, 0x3303f