[TG3]: 5722/5756 don't need PHY jitter workaround.
[linux-3.10.git] / drivers / net / tg3.c
1 /*
2  * tg3.c: Broadcom Tigon3 ethernet driver.
3  *
4  * Copyright (C) 2001, 2002, 2003, 2004 David S. Miller (davem@redhat.com)
5  * Copyright (C) 2001, 2002, 2003 Jeff Garzik (jgarzik@pobox.com)
6  * Copyright (C) 2004 Sun Microsystems Inc.
7  * Copyright (C) 2005 Broadcom Corporation.
8  *
9  * Firmware is:
10  *      Derived from proprietary unpublished source code,
11  *      Copyright (C) 2000-2003 Broadcom Corporation.
12  *
13  *      Permission is hereby granted for the distribution of this firmware
14  *      data in hexadecimal or equivalent format, provided this copyright
15  *      notice is accompanying it.
16  */
17
18
19 #include <linux/module.h>
20 #include <linux/moduleparam.h>
21 #include <linux/kernel.h>
22 #include <linux/types.h>
23 #include <linux/compiler.h>
24 #include <linux/slab.h>
25 #include <linux/delay.h>
26 #include <linux/in.h>
27 #include <linux/init.h>
28 #include <linux/ioport.h>
29 #include <linux/pci.h>
30 #include <linux/netdevice.h>
31 #include <linux/etherdevice.h>
32 #include <linux/skbuff.h>
33 #include <linux/ethtool.h>
34 #include <linux/mii.h>
35 #include <linux/if_vlan.h>
36 #include <linux/ip.h>
37 #include <linux/tcp.h>
38 #include <linux/workqueue.h>
39 #include <linux/prefetch.h>
40 #include <linux/dma-mapping.h>
41
42 #include <net/checksum.h>
43
44 #include <asm/system.h>
45 #include <asm/io.h>
46 #include <asm/byteorder.h>
47 #include <asm/uaccess.h>
48
49 #ifdef CONFIG_SPARC64
50 #include <asm/idprom.h>
51 #include <asm/oplib.h>
52 #include <asm/pbm.h>
53 #endif
54
55 #if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE)
56 #define TG3_VLAN_TAG_USED 1
57 #else
58 #define TG3_VLAN_TAG_USED 0
59 #endif
60
61 #define TG3_TSO_SUPPORT 1
62
63 #include "tg3.h"
64
65 #define DRV_MODULE_NAME         "tg3"
66 #define PFX DRV_MODULE_NAME     ": "
67 #define DRV_MODULE_VERSION      "3.72"
68 #define DRV_MODULE_RELDATE      "January 8, 2007"
69
70 #define TG3_DEF_MAC_MODE        0
71 #define TG3_DEF_RX_MODE         0
72 #define TG3_DEF_TX_MODE         0
73 #define TG3_DEF_MSG_ENABLE        \
74         (NETIF_MSG_DRV          | \
75          NETIF_MSG_PROBE        | \
76          NETIF_MSG_LINK         | \
77          NETIF_MSG_TIMER        | \
78          NETIF_MSG_IFDOWN       | \
79          NETIF_MSG_IFUP         | \
80          NETIF_MSG_RX_ERR       | \
81          NETIF_MSG_TX_ERR)
82
83 /* length of time before we decide the hardware is borked,
84  * and dev->tx_timeout() should be called to fix the problem
85  */
86 #define TG3_TX_TIMEOUT                  (5 * HZ)
87
88 /* hardware minimum and maximum for a single frame's data payload */
89 #define TG3_MIN_MTU                     60
90 #define TG3_MAX_MTU(tp) \
91         ((tp->tg3_flags2 & TG3_FLG2_JUMBO_CAPABLE) ? 9000 : 1500)
92
93 /* These numbers seem to be hard coded in the NIC firmware somehow.
94  * You can't change the ring sizes, but you can change where you place
95  * them in the NIC onboard memory.
96  */
97 #define TG3_RX_RING_SIZE                512
98 #define TG3_DEF_RX_RING_PENDING         200
99 #define TG3_RX_JUMBO_RING_SIZE          256
100 #define TG3_DEF_RX_JUMBO_RING_PENDING   100
101
102 /* Do not place this n-ring entries value into the tp struct itself,
103  * we really want to expose these constants to GCC so that modulo et
104  * al.  operations are done with shifts and masks instead of with
105  * hw multiply/modulo instructions.  Another solution would be to
106  * replace things like '% foo' with '& (foo - 1)'.
107  */
108 #define TG3_RX_RCB_RING_SIZE(tp)        \
109         ((tp->tg3_flags2 & TG3_FLG2_5705_PLUS) ?  512 : 1024)
110
111 #define TG3_TX_RING_SIZE                512
112 #define TG3_DEF_TX_RING_PENDING         (TG3_TX_RING_SIZE - 1)
113
114 #define TG3_RX_RING_BYTES       (sizeof(struct tg3_rx_buffer_desc) * \
115                                  TG3_RX_RING_SIZE)
116 #define TG3_RX_JUMBO_RING_BYTES (sizeof(struct tg3_rx_buffer_desc) * \
117                                  TG3_RX_JUMBO_RING_SIZE)
118 #define TG3_RX_RCB_RING_BYTES(tp) (sizeof(struct tg3_rx_buffer_desc) * \
119                                    TG3_RX_RCB_RING_SIZE(tp))
120 #define TG3_TX_RING_BYTES       (sizeof(struct tg3_tx_buffer_desc) * \
121                                  TG3_TX_RING_SIZE)
122 #define NEXT_TX(N)              (((N) + 1) & (TG3_TX_RING_SIZE - 1))
123
124 #define RX_PKT_BUF_SZ           (1536 + tp->rx_offset + 64)
125 #define RX_JUMBO_PKT_BUF_SZ     (9046 + tp->rx_offset + 64)
126
127 /* minimum number of free TX descriptors required to wake up TX process */
128 #define TG3_TX_WAKEUP_THRESH(tp)                ((tp)->tx_pending / 4)
129
130 /* number of ETHTOOL_GSTATS u64's */
131 #define TG3_NUM_STATS           (sizeof(struct tg3_ethtool_stats)/sizeof(u64))
132
133 #define TG3_NUM_TEST            6
134
135 static char version[] __devinitdata =
136         DRV_MODULE_NAME ".c:v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
137
138 MODULE_AUTHOR("David S. Miller (davem@redhat.com) and Jeff Garzik (jgarzik@pobox.com)");
139 MODULE_DESCRIPTION("Broadcom Tigon3 ethernet driver");
140 MODULE_LICENSE("GPL");
141 MODULE_VERSION(DRV_MODULE_VERSION);
142
143 static int tg3_debug = -1;      /* -1 == use TG3_DEF_MSG_ENABLE as value */
144 module_param(tg3_debug, int, 0);
145 MODULE_PARM_DESC(tg3_debug, "Tigon3 bitmapped debugging message enable value");
146
147 static struct pci_device_id tg3_pci_tbl[] = {
148         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5700)},
149         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5701)},
150         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702)},
151         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703)},
152         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704)},
153         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702FE)},
154         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705)},
155         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705_2)},
156         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705M)},
157         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705M_2)},
158         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702X)},
159         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703X)},
160         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704S)},
161         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702A3)},
162         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703A3)},
163         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5782)},
164         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5788)},
165         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5789)},
166         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5901)},
167         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5901_2)},
168         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704S_2)},
169         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705F)},
170         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5720)},
171         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5721)},
172         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5722)},
173         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5750)},
174         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751)},
175         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5750M)},
176         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751M)},
177         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751F)},
178         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5752)},
179         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5752M)},
180         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753)},
181         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753M)},
182         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753F)},
183         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5754)},
184         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5754M)},
185         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5755)},
186         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5755M)},
187         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5756)},
188         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5786)},
189         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787)},
190         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787M)},
191         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787F)},
192         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5714)},
193         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5714S)},
194         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5715)},
195         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5715S)},
196         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5780)},
197         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5780S)},
198         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5781)},
199         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5906)},
200         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5906M)},
201         {PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9DXX)},
202         {PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9MXX)},
203         {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1000)},
204         {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1001)},
205         {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1003)},
206         {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC9100)},
207         {PCI_DEVICE(PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_TIGON3)},
208         {}
209 };
210
211 MODULE_DEVICE_TABLE(pci, tg3_pci_tbl);
212
213 static const struct {
214         const char string[ETH_GSTRING_LEN];
215 } ethtool_stats_keys[TG3_NUM_STATS] = {
216         { "rx_octets" },
217         { "rx_fragments" },
218         { "rx_ucast_packets" },
219         { "rx_mcast_packets" },
220         { "rx_bcast_packets" },
221         { "rx_fcs_errors" },
222         { "rx_align_errors" },
223         { "rx_xon_pause_rcvd" },
224         { "rx_xoff_pause_rcvd" },
225         { "rx_mac_ctrl_rcvd" },
226         { "rx_xoff_entered" },
227         { "rx_frame_too_long_errors" },
228         { "rx_jabbers" },
229         { "rx_undersize_packets" },
230         { "rx_in_length_errors" },
231         { "rx_out_length_errors" },
232         { "rx_64_or_less_octet_packets" },
233         { "rx_65_to_127_octet_packets" },
234         { "rx_128_to_255_octet_packets" },
235         { "rx_256_to_511_octet_packets" },
236         { "rx_512_to_1023_octet_packets" },
237         { "rx_1024_to_1522_octet_packets" },
238         { "rx_1523_to_2047_octet_packets" },
239         { "rx_2048_to_4095_octet_packets" },
240         { "rx_4096_to_8191_octet_packets" },
241         { "rx_8192_to_9022_octet_packets" },
242
243         { "tx_octets" },
244         { "tx_collisions" },
245
246         { "tx_xon_sent" },
247         { "tx_xoff_sent" },
248         { "tx_flow_control" },
249         { "tx_mac_errors" },
250         { "tx_single_collisions" },
251         { "tx_mult_collisions" },
252         { "tx_deferred" },
253         { "tx_excessive_collisions" },
254         { "tx_late_collisions" },
255         { "tx_collide_2times" },
256         { "tx_collide_3times" },
257         { "tx_collide_4times" },
258         { "tx_collide_5times" },
259         { "tx_collide_6times" },
260         { "tx_collide_7times" },
261         { "tx_collide_8times" },
262         { "tx_collide_9times" },
263         { "tx_collide_10times" },
264         { "tx_collide_11times" },
265         { "tx_collide_12times" },
266         { "tx_collide_13times" },
267         { "tx_collide_14times" },
268         { "tx_collide_15times" },
269         { "tx_ucast_packets" },
270         { "tx_mcast_packets" },
271         { "tx_bcast_packets" },
272         { "tx_carrier_sense_errors" },
273         { "tx_discards" },
274         { "tx_errors" },
275
276         { "dma_writeq_full" },
277         { "dma_write_prioq_full" },
278         { "rxbds_empty" },
279         { "rx_discards" },
280         { "rx_errors" },
281         { "rx_threshold_hit" },
282
283         { "dma_readq_full" },
284         { "dma_read_prioq_full" },
285         { "tx_comp_queue_full" },
286
287         { "ring_set_send_prod_index" },
288         { "ring_status_update" },
289         { "nic_irqs" },
290         { "nic_avoided_irqs" },
291         { "nic_tx_threshold_hit" }
292 };
293
294 static const struct {
295         const char string[ETH_GSTRING_LEN];
296 } ethtool_test_keys[TG3_NUM_TEST] = {
297         { "nvram test     (online) " },
298         { "link test      (online) " },
299         { "register test  (offline)" },
300         { "memory test    (offline)" },
301         { "loopback test  (offline)" },
302         { "interrupt test (offline)" },
303 };
304
305 static void tg3_write32(struct tg3 *tp, u32 off, u32 val)
306 {
307         writel(val, tp->regs + off);
308 }
309
310 static u32 tg3_read32(struct tg3 *tp, u32 off)
311 {
312         return (readl(tp->regs + off));
313 }
314
315 static void tg3_write_indirect_reg32(struct tg3 *tp, u32 off, u32 val)
316 {
317         unsigned long flags;
318
319         spin_lock_irqsave(&tp->indirect_lock, flags);
320         pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off);
321         pci_write_config_dword(tp->pdev, TG3PCI_REG_DATA, val);
322         spin_unlock_irqrestore(&tp->indirect_lock, flags);
323 }
324
325 static void tg3_write_flush_reg32(struct tg3 *tp, u32 off, u32 val)
326 {
327         writel(val, tp->regs + off);
328         readl(tp->regs + off);
329 }
330
331 static u32 tg3_read_indirect_reg32(struct tg3 *tp, u32 off)
332 {
333         unsigned long flags;
334         u32 val;
335
336         spin_lock_irqsave(&tp->indirect_lock, flags);
337         pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off);
338         pci_read_config_dword(tp->pdev, TG3PCI_REG_DATA, &val);
339         spin_unlock_irqrestore(&tp->indirect_lock, flags);
340         return val;
341 }
342
343 static void tg3_write_indirect_mbox(struct tg3 *tp, u32 off, u32 val)
344 {
345         unsigned long flags;
346
347         if (off == (MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW)) {
348                 pci_write_config_dword(tp->pdev, TG3PCI_RCV_RET_RING_CON_IDX +
349                                        TG3_64BIT_REG_LOW, val);
350                 return;
351         }
352         if (off == (MAILBOX_RCV_STD_PROD_IDX + TG3_64BIT_REG_LOW)) {
353                 pci_write_config_dword(tp->pdev, TG3PCI_STD_RING_PROD_IDX +
354                                        TG3_64BIT_REG_LOW, val);
355                 return;
356         }
357
358         spin_lock_irqsave(&tp->indirect_lock, flags);
359         pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off + 0x5600);
360         pci_write_config_dword(tp->pdev, TG3PCI_REG_DATA, val);
361         spin_unlock_irqrestore(&tp->indirect_lock, flags);
362
363         /* In indirect mode when disabling interrupts, we also need
364          * to clear the interrupt bit in the GRC local ctrl register.
365          */
366         if ((off == (MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW)) &&
367             (val == 0x1)) {
368                 pci_write_config_dword(tp->pdev, TG3PCI_MISC_LOCAL_CTRL,
369                                        tp->grc_local_ctrl|GRC_LCLCTRL_CLEARINT);
370         }
371 }
372
373 static u32 tg3_read_indirect_mbox(struct tg3 *tp, u32 off)
374 {
375         unsigned long flags;
376         u32 val;
377
378         spin_lock_irqsave(&tp->indirect_lock, flags);
379         pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off + 0x5600);
380         pci_read_config_dword(tp->pdev, TG3PCI_REG_DATA, &val);
381         spin_unlock_irqrestore(&tp->indirect_lock, flags);
382         return val;
383 }
384
385 /* usec_wait specifies the wait time in usec when writing to certain registers
386  * where it is unsafe to read back the register without some delay.
387  * GRC_LOCAL_CTRL is one example if the GPIOs are toggled to switch power.
388  * TG3PCI_CLOCK_CTRL is another example if the clock frequencies are changed.
389  */
390 static void _tw32_flush(struct tg3 *tp, u32 off, u32 val, u32 usec_wait)
391 {
392         if ((tp->tg3_flags & TG3_FLAG_PCIX_TARGET_HWBUG) ||
393             (tp->tg3_flags2 & TG3_FLG2_ICH_WORKAROUND))
394                 /* Non-posted methods */
395                 tp->write32(tp, off, val);
396         else {
397                 /* Posted method */
398                 tg3_write32(tp, off, val);
399                 if (usec_wait)
400                         udelay(usec_wait);
401                 tp->read32(tp, off);
402         }
403         /* Wait again after the read for the posted method to guarantee that
404          * the wait time is met.
405          */
406         if (usec_wait)
407                 udelay(usec_wait);
408 }
409
410 static inline void tw32_mailbox_flush(struct tg3 *tp, u32 off, u32 val)
411 {
412         tp->write32_mbox(tp, off, val);
413         if (!(tp->tg3_flags & TG3_FLAG_MBOX_WRITE_REORDER) &&
414             !(tp->tg3_flags2 & TG3_FLG2_ICH_WORKAROUND))
415                 tp->read32_mbox(tp, off);
416 }
417
418 static void tg3_write32_tx_mbox(struct tg3 *tp, u32 off, u32 val)
419 {
420         void __iomem *mbox = tp->regs + off;
421         writel(val, mbox);
422         if (tp->tg3_flags & TG3_FLAG_TXD_MBOX_HWBUG)
423                 writel(val, mbox);
424         if (tp->tg3_flags & TG3_FLAG_MBOX_WRITE_REORDER)
425                 readl(mbox);
426 }
427
428 static u32 tg3_read32_mbox_5906(struct tg3 *tp, u32 off)
429 {
430         return (readl(tp->regs + off + GRCMBOX_BASE));
431 }
432
433 static void tg3_write32_mbox_5906(struct tg3 *tp, u32 off, u32 val)
434 {
435         writel(val, tp->regs + off + GRCMBOX_BASE);
436 }
437
438 #define tw32_mailbox(reg, val)  tp->write32_mbox(tp, reg, val)
439 #define tw32_mailbox_f(reg, val)        tw32_mailbox_flush(tp, (reg), (val))
440 #define tw32_rx_mbox(reg, val)  tp->write32_rx_mbox(tp, reg, val)
441 #define tw32_tx_mbox(reg, val)  tp->write32_tx_mbox(tp, reg, val)
442 #define tr32_mailbox(reg)       tp->read32_mbox(tp, reg)
443
444 #define tw32(reg,val)           tp->write32(tp, reg, val)
445 #define tw32_f(reg,val)         _tw32_flush(tp,(reg),(val), 0)
446 #define tw32_wait_f(reg,val,us) _tw32_flush(tp,(reg),(val), (us))
447 #define tr32(reg)               tp->read32(tp, reg)
448
449 static void tg3_write_mem(struct tg3 *tp, u32 off, u32 val)
450 {
451         unsigned long flags;
452
453         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) &&
454             (off >= NIC_SRAM_STATS_BLK) && (off < NIC_SRAM_TX_BUFFER_DESC))
455                 return;
456
457         spin_lock_irqsave(&tp->indirect_lock, flags);
458         if (tp->tg3_flags & TG3_FLAG_SRAM_USE_CONFIG) {
459                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off);
460                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
461
462                 /* Always leave this as zero. */
463                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
464         } else {
465                 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, off);
466                 tw32_f(TG3PCI_MEM_WIN_DATA, val);
467
468                 /* Always leave this as zero. */
469                 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, 0);
470         }
471         spin_unlock_irqrestore(&tp->indirect_lock, flags);
472 }
473
474 static void tg3_read_mem(struct tg3 *tp, u32 off, u32 *val)
475 {
476         unsigned long flags;
477
478         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) &&
479             (off >= NIC_SRAM_STATS_BLK) && (off < NIC_SRAM_TX_BUFFER_DESC)) {
480                 *val = 0;
481                 return;
482         }
483
484         spin_lock_irqsave(&tp->indirect_lock, flags);
485         if (tp->tg3_flags & TG3_FLAG_SRAM_USE_CONFIG) {
486                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off);
487                 pci_read_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
488
489                 /* Always leave this as zero. */
490                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
491         } else {
492                 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, off);
493                 *val = tr32(TG3PCI_MEM_WIN_DATA);
494
495                 /* Always leave this as zero. */
496                 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, 0);
497         }
498         spin_unlock_irqrestore(&tp->indirect_lock, flags);
499 }
500
501 static void tg3_disable_ints(struct tg3 *tp)
502 {
503         tw32(TG3PCI_MISC_HOST_CTRL,
504              (tp->misc_host_ctrl | MISC_HOST_CTRL_MASK_PCI_INT));
505         tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
506 }
507
508 static inline void tg3_cond_int(struct tg3 *tp)
509 {
510         if (!(tp->tg3_flags & TG3_FLAG_TAGGED_STATUS) &&
511             (tp->hw_status->status & SD_STATUS_UPDATED))
512                 tw32(GRC_LOCAL_CTRL, tp->grc_local_ctrl | GRC_LCLCTRL_SETINT);
513         else
514                 tw32(HOSTCC_MODE, tp->coalesce_mode |
515                      (HOSTCC_MODE_ENABLE | HOSTCC_MODE_NOW));
516 }
517
518 static void tg3_enable_ints(struct tg3 *tp)
519 {
520         tp->irq_sync = 0;
521         wmb();
522
523         tw32(TG3PCI_MISC_HOST_CTRL,
524              (tp->misc_host_ctrl & ~MISC_HOST_CTRL_MASK_PCI_INT));
525         tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
526                        (tp->last_tag << 24));
527         if (tp->tg3_flags2 & TG3_FLG2_1SHOT_MSI)
528                 tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
529                                (tp->last_tag << 24));
530         tg3_cond_int(tp);
531 }
532
533 static inline unsigned int tg3_has_work(struct tg3 *tp)
534 {
535         struct tg3_hw_status *sblk = tp->hw_status;
536         unsigned int work_exists = 0;
537
538         /* check for phy events */
539         if (!(tp->tg3_flags &
540               (TG3_FLAG_USE_LINKCHG_REG |
541                TG3_FLAG_POLL_SERDES))) {
542                 if (sblk->status & SD_STATUS_LINK_CHG)
543                         work_exists = 1;
544         }
545         /* check for RX/TX work to do */
546         if (sblk->idx[0].tx_consumer != tp->tx_cons ||
547             sblk->idx[0].rx_producer != tp->rx_rcb_ptr)
548                 work_exists = 1;
549
550         return work_exists;
551 }
552
553 /* tg3_restart_ints
554  *  similar to tg3_enable_ints, but it accurately determines whether there
555  *  is new work pending and can return without flushing the PIO write
556  *  which reenables interrupts
557  */
558 static void tg3_restart_ints(struct tg3 *tp)
559 {
560         tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
561                      tp->last_tag << 24);
562         mmiowb();
563
564         /* When doing tagged status, this work check is unnecessary.
565          * The last_tag we write above tells the chip which piece of
566          * work we've completed.
567          */
568         if (!(tp->tg3_flags & TG3_FLAG_TAGGED_STATUS) &&
569             tg3_has_work(tp))
570                 tw32(HOSTCC_MODE, tp->coalesce_mode |
571                      (HOSTCC_MODE_ENABLE | HOSTCC_MODE_NOW));
572 }
573
574 static inline void tg3_netif_stop(struct tg3 *tp)
575 {
576         tp->dev->trans_start = jiffies; /* prevent tx timeout */
577         netif_poll_disable(tp->dev);
578         netif_tx_disable(tp->dev);
579 }
580
581 static inline void tg3_netif_start(struct tg3 *tp)
582 {
583         netif_wake_queue(tp->dev);
584         /* NOTE: unconditional netif_wake_queue is only appropriate
585          * so long as all callers are assured to have free tx slots
586          * (such as after tg3_init_hw)
587          */
588         netif_poll_enable(tp->dev);
589         tp->hw_status->status |= SD_STATUS_UPDATED;
590         tg3_enable_ints(tp);
591 }
592
593 static void tg3_switch_clocks(struct tg3 *tp)
594 {
595         u32 clock_ctrl = tr32(TG3PCI_CLOCK_CTRL);
596         u32 orig_clock_ctrl;
597
598         if (tp->tg3_flags2 & TG3_FLG2_5780_CLASS)
599                 return;
600
601         orig_clock_ctrl = clock_ctrl;
602         clock_ctrl &= (CLOCK_CTRL_FORCE_CLKRUN |
603                        CLOCK_CTRL_CLKRUN_OENABLE |
604                        0x1f);
605         tp->pci_clock_ctrl = clock_ctrl;
606
607         if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
608                 if (orig_clock_ctrl & CLOCK_CTRL_625_CORE) {
609                         tw32_wait_f(TG3PCI_CLOCK_CTRL,
610                                     clock_ctrl | CLOCK_CTRL_625_CORE, 40);
611                 }
612         } else if ((orig_clock_ctrl & CLOCK_CTRL_44MHZ_CORE) != 0) {
613                 tw32_wait_f(TG3PCI_CLOCK_CTRL,
614                             clock_ctrl |
615                             (CLOCK_CTRL_44MHZ_CORE | CLOCK_CTRL_ALTCLK),
616                             40);
617                 tw32_wait_f(TG3PCI_CLOCK_CTRL,
618                             clock_ctrl | (CLOCK_CTRL_ALTCLK),
619                             40);
620         }
621         tw32_wait_f(TG3PCI_CLOCK_CTRL, clock_ctrl, 40);
622 }
623
624 #define PHY_BUSY_LOOPS  5000
625
626 static int tg3_readphy(struct tg3 *tp, int reg, u32 *val)
627 {
628         u32 frame_val;
629         unsigned int loops;
630         int ret;
631
632         if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
633                 tw32_f(MAC_MI_MODE,
634                      (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
635                 udelay(80);
636         }
637
638         *val = 0x0;
639
640         frame_val  = ((PHY_ADDR << MI_COM_PHY_ADDR_SHIFT) &
641                       MI_COM_PHY_ADDR_MASK);
642         frame_val |= ((reg << MI_COM_REG_ADDR_SHIFT) &
643                       MI_COM_REG_ADDR_MASK);
644         frame_val |= (MI_COM_CMD_READ | MI_COM_START);
645
646         tw32_f(MAC_MI_COM, frame_val);
647
648         loops = PHY_BUSY_LOOPS;
649         while (loops != 0) {
650                 udelay(10);
651                 frame_val = tr32(MAC_MI_COM);
652
653                 if ((frame_val & MI_COM_BUSY) == 0) {
654                         udelay(5);
655                         frame_val = tr32(MAC_MI_COM);
656                         break;
657                 }
658                 loops -= 1;
659         }
660
661         ret = -EBUSY;
662         if (loops != 0) {
663                 *val = frame_val & MI_COM_DATA_MASK;
664                 ret = 0;
665         }
666
667         if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
668                 tw32_f(MAC_MI_MODE, tp->mi_mode);
669                 udelay(80);
670         }
671
672         return ret;
673 }
674
675 static int tg3_writephy(struct tg3 *tp, int reg, u32 val)
676 {
677         u32 frame_val;
678         unsigned int loops;
679         int ret;
680
681         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906 &&
682             (reg == MII_TG3_CTRL || reg == MII_TG3_AUX_CTRL))
683                 return 0;
684
685         if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
686                 tw32_f(MAC_MI_MODE,
687                      (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
688                 udelay(80);
689         }
690
691         frame_val  = ((PHY_ADDR << MI_COM_PHY_ADDR_SHIFT) &
692                       MI_COM_PHY_ADDR_MASK);
693         frame_val |= ((reg << MI_COM_REG_ADDR_SHIFT) &
694                       MI_COM_REG_ADDR_MASK);
695         frame_val |= (val & MI_COM_DATA_MASK);
696         frame_val |= (MI_COM_CMD_WRITE | MI_COM_START);
697
698         tw32_f(MAC_MI_COM, frame_val);
699
700         loops = PHY_BUSY_LOOPS;
701         while (loops != 0) {
702                 udelay(10);
703                 frame_val = tr32(MAC_MI_COM);
704                 if ((frame_val & MI_COM_BUSY) == 0) {
705                         udelay(5);
706                         frame_val = tr32(MAC_MI_COM);
707                         break;
708                 }
709                 loops -= 1;
710         }
711
712         ret = -EBUSY;
713         if (loops != 0)
714                 ret = 0;
715
716         if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
717                 tw32_f(MAC_MI_MODE, tp->mi_mode);
718                 udelay(80);
719         }
720
721         return ret;
722 }
723
724 static void tg3_phy_set_wirespeed(struct tg3 *tp)
725 {
726         u32 val;
727
728         if (tp->tg3_flags2 & TG3_FLG2_NO_ETH_WIRE_SPEED)
729                 return;
730
731         if (!tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x7007) &&
732             !tg3_readphy(tp, MII_TG3_AUX_CTRL, &val))
733                 tg3_writephy(tp, MII_TG3_AUX_CTRL,
734                              (val | (1 << 15) | (1 << 4)));
735 }
736
737 static int tg3_bmcr_reset(struct tg3 *tp)
738 {
739         u32 phy_control;
740         int limit, err;
741
742         /* OK, reset it, and poll the BMCR_RESET bit until it
743          * clears or we time out.
744          */
745         phy_control = BMCR_RESET;
746         err = tg3_writephy(tp, MII_BMCR, phy_control);
747         if (err != 0)
748                 return -EBUSY;
749
750         limit = 5000;
751         while (limit--) {
752                 err = tg3_readphy(tp, MII_BMCR, &phy_control);
753                 if (err != 0)
754                         return -EBUSY;
755
756                 if ((phy_control & BMCR_RESET) == 0) {
757                         udelay(40);
758                         break;
759                 }
760                 udelay(10);
761         }
762         if (limit <= 0)
763                 return -EBUSY;
764
765         return 0;
766 }
767
768 static int tg3_wait_macro_done(struct tg3 *tp)
769 {
770         int limit = 100;
771
772         while (limit--) {
773                 u32 tmp32;
774
775                 if (!tg3_readphy(tp, 0x16, &tmp32)) {
776                         if ((tmp32 & 0x1000) == 0)
777                                 break;
778                 }
779         }
780         if (limit <= 0)
781                 return -EBUSY;
782
783         return 0;
784 }
785
786 static int tg3_phy_write_and_check_testpat(struct tg3 *tp, int *resetp)
787 {
788         static const u32 test_pat[4][6] = {
789         { 0x00005555, 0x00000005, 0x00002aaa, 0x0000000a, 0x00003456, 0x00000003 },
790         { 0x00002aaa, 0x0000000a, 0x00003333, 0x00000003, 0x0000789a, 0x00000005 },
791         { 0x00005a5a, 0x00000005, 0x00002a6a, 0x0000000a, 0x00001bcd, 0x00000003 },
792         { 0x00002a5a, 0x0000000a, 0x000033c3, 0x00000003, 0x00002ef1, 0x00000005 }
793         };
794         int chan;
795
796         for (chan = 0; chan < 4; chan++) {
797                 int i;
798
799                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
800                              (chan * 0x2000) | 0x0200);
801                 tg3_writephy(tp, 0x16, 0x0002);
802
803                 for (i = 0; i < 6; i++)
804                         tg3_writephy(tp, MII_TG3_DSP_RW_PORT,
805                                      test_pat[chan][i]);
806
807                 tg3_writephy(tp, 0x16, 0x0202);
808                 if (tg3_wait_macro_done(tp)) {
809                         *resetp = 1;
810                         return -EBUSY;
811                 }
812
813                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
814                              (chan * 0x2000) | 0x0200);
815                 tg3_writephy(tp, 0x16, 0x0082);
816                 if (tg3_wait_macro_done(tp)) {
817                         *resetp = 1;
818                         return -EBUSY;
819                 }
820
821                 tg3_writephy(tp, 0x16, 0x0802);
822                 if (tg3_wait_macro_done(tp)) {
823                         *resetp = 1;
824                         return -EBUSY;
825                 }
826
827                 for (i = 0; i < 6; i += 2) {
828                         u32 low, high;
829
830                         if (tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &low) ||
831                             tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &high) ||
832                             tg3_wait_macro_done(tp)) {
833                                 *resetp = 1;
834                                 return -EBUSY;
835                         }
836                         low &= 0x7fff;
837                         high &= 0x000f;
838                         if (low != test_pat[chan][i] ||
839                             high != test_pat[chan][i+1]) {
840                                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000b);
841                                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x4001);
842                                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x4005);
843
844                                 return -EBUSY;
845                         }
846                 }
847         }
848
849         return 0;
850 }
851
852 static int tg3_phy_reset_chanpat(struct tg3 *tp)
853 {
854         int chan;
855
856         for (chan = 0; chan < 4; chan++) {
857                 int i;
858
859                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
860                              (chan * 0x2000) | 0x0200);
861                 tg3_writephy(tp, 0x16, 0x0002);
862                 for (i = 0; i < 6; i++)
863                         tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x000);
864                 tg3_writephy(tp, 0x16, 0x0202);
865                 if (tg3_wait_macro_done(tp))
866                         return -EBUSY;
867         }
868
869         return 0;
870 }
871
872 static int tg3_phy_reset_5703_4_5(struct tg3 *tp)
873 {
874         u32 reg32, phy9_orig;
875         int retries, do_phy_reset, err;
876
877         retries = 10;
878         do_phy_reset = 1;
879         do {
880                 if (do_phy_reset) {
881                         err = tg3_bmcr_reset(tp);
882                         if (err)
883                                 return err;
884                         do_phy_reset = 0;
885                 }
886
887                 /* Disable transmitter and interrupt.  */
888                 if (tg3_readphy(tp, MII_TG3_EXT_CTRL, &reg32))
889                         continue;
890
891                 reg32 |= 0x3000;
892                 tg3_writephy(tp, MII_TG3_EXT_CTRL, reg32);
893
894                 /* Set full-duplex, 1000 mbps.  */
895                 tg3_writephy(tp, MII_BMCR,
896                              BMCR_FULLDPLX | TG3_BMCR_SPEED1000);
897
898                 /* Set to master mode.  */
899                 if (tg3_readphy(tp, MII_TG3_CTRL, &phy9_orig))
900                         continue;
901
902                 tg3_writephy(tp, MII_TG3_CTRL,
903                              (MII_TG3_CTRL_AS_MASTER |
904                               MII_TG3_CTRL_ENABLE_AS_MASTER));
905
906                 /* Enable SM_DSP_CLOCK and 6dB.  */
907                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0c00);
908
909                 /* Block the PHY control access.  */
910                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8005);
911                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0800);
912
913                 err = tg3_phy_write_and_check_testpat(tp, &do_phy_reset);
914                 if (!err)
915                         break;
916         } while (--retries);
917
918         err = tg3_phy_reset_chanpat(tp);
919         if (err)
920                 return err;
921
922         tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8005);
923         tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0000);
924
925         tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8200);
926         tg3_writephy(tp, 0x16, 0x0000);
927
928         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
929             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
930                 /* Set Extended packet length bit for jumbo frames */
931                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x4400);
932         }
933         else {
934                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0400);
935         }
936
937         tg3_writephy(tp, MII_TG3_CTRL, phy9_orig);
938
939         if (!tg3_readphy(tp, MII_TG3_EXT_CTRL, &reg32)) {
940                 reg32 &= ~0x3000;
941                 tg3_writephy(tp, MII_TG3_EXT_CTRL, reg32);
942         } else if (!err)
943                 err = -EBUSY;
944
945         return err;
946 }
947
948 static void tg3_link_report(struct tg3 *);
949
950 /* This will reset the tigon3 PHY if there is no valid
951  * link unless the FORCE argument is non-zero.
952  */
953 static int tg3_phy_reset(struct tg3 *tp)
954 {
955         u32 phy_status;
956         int err;
957
958         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
959                 u32 val;
960
961                 val = tr32(GRC_MISC_CFG);
962                 tw32_f(GRC_MISC_CFG, val & ~GRC_MISC_CFG_EPHY_IDDQ);
963                 udelay(40);
964         }
965         err  = tg3_readphy(tp, MII_BMSR, &phy_status);
966         err |= tg3_readphy(tp, MII_BMSR, &phy_status);
967         if (err != 0)
968                 return -EBUSY;
969
970         if (netif_running(tp->dev) && netif_carrier_ok(tp->dev)) {
971                 netif_carrier_off(tp->dev);
972                 tg3_link_report(tp);
973         }
974
975         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
976             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
977             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
978                 err = tg3_phy_reset_5703_4_5(tp);
979                 if (err)
980                         return err;
981                 goto out;
982         }
983
984         err = tg3_bmcr_reset(tp);
985         if (err)
986                 return err;
987
988 out:
989         if (tp->tg3_flags2 & TG3_FLG2_PHY_ADC_BUG) {
990                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0c00);
991                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x201f);
992                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x2aaa);
993                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000a);
994                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0323);
995                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0400);
996         }
997         if (tp->tg3_flags2 & TG3_FLG2_PHY_5704_A0_BUG) {
998                 tg3_writephy(tp, 0x1c, 0x8d68);
999                 tg3_writephy(tp, 0x1c, 0x8d68);
1000         }
1001         if (tp->tg3_flags2 & TG3_FLG2_PHY_BER_BUG) {
1002                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0c00);
1003                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000a);
1004                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x310b);
1005                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x201f);
1006                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x9506);
1007                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x401f);
1008                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x14e2);
1009                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0400);
1010         }
1011         else if (tp->tg3_flags2 & TG3_FLG2_PHY_JITTER_BUG) {
1012                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0c00);
1013                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000a);
1014                 if (tp->tg3_flags2 & TG3_FLG2_PHY_ADJUST_TRIM) {
1015                         tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x110b);
1016                         tg3_writephy(tp, MII_TG3_TEST1,
1017                                      MII_TG3_TEST1_TRIM_EN | 0x4);
1018                 } else
1019                         tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x010b);
1020                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0400);
1021         }
1022         /* Set Extended packet length bit (bit 14) on all chips that */
1023         /* support jumbo frames */
1024         if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5401) {
1025                 /* Cannot do read-modify-write on 5401 */
1026                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x4c20);
1027         } else if (tp->tg3_flags2 & TG3_FLG2_JUMBO_CAPABLE) {
1028                 u32 phy_reg;
1029
1030                 /* Set bit 14 with read-modify-write to preserve other bits */
1031                 if (!tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0007) &&
1032                     !tg3_readphy(tp, MII_TG3_AUX_CTRL, &phy_reg))
1033                         tg3_writephy(tp, MII_TG3_AUX_CTRL, phy_reg | 0x4000);
1034         }
1035
1036         /* Set phy register 0x10 bit 0 to high fifo elasticity to support
1037          * jumbo frames transmission.
1038          */
1039         if (tp->tg3_flags2 & TG3_FLG2_JUMBO_CAPABLE) {
1040                 u32 phy_reg;
1041
1042                 if (!tg3_readphy(tp, MII_TG3_EXT_CTRL, &phy_reg))
1043                     tg3_writephy(tp, MII_TG3_EXT_CTRL,
1044                                  phy_reg | MII_TG3_EXT_CTRL_FIFO_ELASTIC);
1045         }
1046
1047         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
1048                 u32 phy_reg;
1049
1050                 /* adjust output voltage */
1051                 tg3_writephy(tp, MII_TG3_EPHY_PTEST, 0x12);
1052
1053                 if (!tg3_readphy(tp, MII_TG3_EPHY_TEST, &phy_reg)) {
1054                         u32 phy_reg2;
1055
1056                         tg3_writephy(tp, MII_TG3_EPHY_TEST,
1057                                      phy_reg | MII_TG3_EPHY_SHADOW_EN);
1058                         /* Enable auto-MDIX */
1059                         if (!tg3_readphy(tp, 0x10, &phy_reg2))
1060                                 tg3_writephy(tp, 0x10, phy_reg2 | 0x4000);
1061                         tg3_writephy(tp, MII_TG3_EPHY_TEST, phy_reg);
1062                 }
1063         }
1064
1065         tg3_phy_set_wirespeed(tp);
1066         return 0;
1067 }
1068
1069 static void tg3_frob_aux_power(struct tg3 *tp)
1070 {
1071         struct tg3 *tp_peer = tp;
1072
1073         if ((tp->tg3_flags2 & TG3_FLG2_IS_NIC) == 0)
1074                 return;
1075
1076         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) ||
1077             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714)) {
1078                 struct net_device *dev_peer;
1079
1080                 dev_peer = pci_get_drvdata(tp->pdev_peer);
1081                 /* remove_one() may have been run on the peer. */
1082                 if (!dev_peer)
1083                         tp_peer = tp;
1084                 else
1085                         tp_peer = netdev_priv(dev_peer);
1086         }
1087
1088         if ((tp->tg3_flags & TG3_FLAG_WOL_ENABLE) != 0 ||
1089             (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) != 0 ||
1090             (tp_peer->tg3_flags & TG3_FLAG_WOL_ENABLE) != 0 ||
1091             (tp_peer->tg3_flags & TG3_FLAG_ENABLE_ASF) != 0) {
1092                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
1093                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
1094                         tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1095                                     (GRC_LCLCTRL_GPIO_OE0 |
1096                                      GRC_LCLCTRL_GPIO_OE1 |
1097                                      GRC_LCLCTRL_GPIO_OE2 |
1098                                      GRC_LCLCTRL_GPIO_OUTPUT0 |
1099                                      GRC_LCLCTRL_GPIO_OUTPUT1),
1100                                     100);
1101                 } else {
1102                         u32 no_gpio2;
1103                         u32 grc_local_ctrl = 0;
1104
1105                         if (tp_peer != tp &&
1106                             (tp_peer->tg3_flags & TG3_FLAG_INIT_COMPLETE) != 0)
1107                                 return;
1108
1109                         /* Workaround to prevent overdrawing Amps. */
1110                         if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
1111                             ASIC_REV_5714) {
1112                                 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE3;
1113                                 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1114                                             grc_local_ctrl, 100);
1115                         }
1116
1117                         /* On 5753 and variants, GPIO2 cannot be used. */
1118                         no_gpio2 = tp->nic_sram_data_cfg &
1119                                     NIC_SRAM_DATA_CFG_NO_GPIO2;
1120
1121                         grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE0 |
1122                                          GRC_LCLCTRL_GPIO_OE1 |
1123                                          GRC_LCLCTRL_GPIO_OE2 |
1124                                          GRC_LCLCTRL_GPIO_OUTPUT1 |
1125                                          GRC_LCLCTRL_GPIO_OUTPUT2;
1126                         if (no_gpio2) {
1127                                 grc_local_ctrl &= ~(GRC_LCLCTRL_GPIO_OE2 |
1128                                                     GRC_LCLCTRL_GPIO_OUTPUT2);
1129                         }
1130                         tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1131                                                     grc_local_ctrl, 100);
1132
1133                         grc_local_ctrl |= GRC_LCLCTRL_GPIO_OUTPUT0;
1134
1135                         tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1136                                                     grc_local_ctrl, 100);
1137
1138                         if (!no_gpio2) {
1139                                 grc_local_ctrl &= ~GRC_LCLCTRL_GPIO_OUTPUT2;
1140                                 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1141                                             grc_local_ctrl, 100);
1142                         }
1143                 }
1144         } else {
1145                 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
1146                     GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701) {
1147                         if (tp_peer != tp &&
1148                             (tp_peer->tg3_flags & TG3_FLAG_INIT_COMPLETE) != 0)
1149                                 return;
1150
1151                         tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1152                                     (GRC_LCLCTRL_GPIO_OE1 |
1153                                      GRC_LCLCTRL_GPIO_OUTPUT1), 100);
1154
1155                         tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1156                                     GRC_LCLCTRL_GPIO_OE1, 100);
1157
1158                         tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1159                                     (GRC_LCLCTRL_GPIO_OE1 |
1160                                      GRC_LCLCTRL_GPIO_OUTPUT1), 100);
1161                 }
1162         }
1163 }
1164
1165 static int tg3_setup_phy(struct tg3 *, int);
1166
1167 #define RESET_KIND_SHUTDOWN     0
1168 #define RESET_KIND_INIT         1
1169 #define RESET_KIND_SUSPEND      2
1170
1171 static void tg3_write_sig_post_reset(struct tg3 *, int);
1172 static int tg3_halt_cpu(struct tg3 *, u32);
1173 static int tg3_nvram_lock(struct tg3 *);
1174 static void tg3_nvram_unlock(struct tg3 *);
1175
1176 static void tg3_power_down_phy(struct tg3 *tp)
1177 {
1178         if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)
1179                 return;
1180
1181         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
1182                 u32 val;
1183
1184                 tg3_bmcr_reset(tp);
1185                 val = tr32(GRC_MISC_CFG);
1186                 tw32_f(GRC_MISC_CFG, val | GRC_MISC_CFG_EPHY_IDDQ);
1187                 udelay(40);
1188                 return;
1189         } else {
1190                 tg3_writephy(tp, MII_TG3_EXT_CTRL,
1191                              MII_TG3_EXT_CTRL_FORCE_LED_OFF);
1192                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x01b2);
1193         }
1194
1195         /* The PHY should not be powered down on some chips because
1196          * of bugs.
1197          */
1198         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
1199             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
1200             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780 &&
1201              (tp->tg3_flags2 & TG3_FLG2_MII_SERDES)))
1202                 return;
1203         tg3_writephy(tp, MII_BMCR, BMCR_PDOWN);
1204 }
1205
1206 static int tg3_set_power_state(struct tg3 *tp, pci_power_t state)
1207 {
1208         u32 misc_host_ctrl;
1209         u16 power_control, power_caps;
1210         int pm = tp->pm_cap;
1211
1212         /* Make sure register accesses (indirect or otherwise)
1213          * will function correctly.
1214          */
1215         pci_write_config_dword(tp->pdev,
1216                                TG3PCI_MISC_HOST_CTRL,
1217                                tp->misc_host_ctrl);
1218
1219         pci_read_config_word(tp->pdev,
1220                              pm + PCI_PM_CTRL,
1221                              &power_control);
1222         power_control |= PCI_PM_CTRL_PME_STATUS;
1223         power_control &= ~(PCI_PM_CTRL_STATE_MASK);
1224         switch (state) {
1225         case PCI_D0:
1226                 power_control |= 0;
1227                 pci_write_config_word(tp->pdev,
1228                                       pm + PCI_PM_CTRL,
1229                                       power_control);
1230                 udelay(100);    /* Delay after power state change */
1231
1232                 /* Switch out of Vaux if it is a NIC */
1233                 if (tp->tg3_flags2 & TG3_FLG2_IS_NIC)
1234                         tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl, 100);
1235
1236                 return 0;
1237
1238         case PCI_D1:
1239                 power_control |= 1;
1240                 break;
1241
1242         case PCI_D2:
1243                 power_control |= 2;
1244                 break;
1245
1246         case PCI_D3hot:
1247                 power_control |= 3;
1248                 break;
1249
1250         default:
1251                 printk(KERN_WARNING PFX "%s: Invalid power state (%d) "
1252                        "requested.\n",
1253                        tp->dev->name, state);
1254                 return -EINVAL;
1255         };
1256
1257         power_control |= PCI_PM_CTRL_PME_ENABLE;
1258
1259         misc_host_ctrl = tr32(TG3PCI_MISC_HOST_CTRL);
1260         tw32(TG3PCI_MISC_HOST_CTRL,
1261              misc_host_ctrl | MISC_HOST_CTRL_MASK_PCI_INT);
1262
1263         if (tp->link_config.phy_is_low_power == 0) {
1264                 tp->link_config.phy_is_low_power = 1;
1265                 tp->link_config.orig_speed = tp->link_config.speed;
1266                 tp->link_config.orig_duplex = tp->link_config.duplex;
1267                 tp->link_config.orig_autoneg = tp->link_config.autoneg;
1268         }
1269
1270         if (!(tp->tg3_flags2 & TG3_FLG2_ANY_SERDES)) {
1271                 tp->link_config.speed = SPEED_10;
1272                 tp->link_config.duplex = DUPLEX_HALF;
1273                 tp->link_config.autoneg = AUTONEG_ENABLE;
1274                 tg3_setup_phy(tp, 0);
1275         }
1276
1277         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
1278                 u32 val;
1279
1280                 val = tr32(GRC_VCPU_EXT_CTRL);
1281                 tw32(GRC_VCPU_EXT_CTRL, val | GRC_VCPU_EXT_CTRL_DISABLE_WOL);
1282         } else if (!(tp->tg3_flags & TG3_FLAG_ENABLE_ASF)) {
1283                 int i;
1284                 u32 val;
1285
1286                 for (i = 0; i < 200; i++) {
1287                         tg3_read_mem(tp, NIC_SRAM_FW_ASF_STATUS_MBOX, &val);
1288                         if (val == ~NIC_SRAM_FIRMWARE_MBOX_MAGIC1)
1289                                 break;
1290                         msleep(1);
1291                 }
1292         }
1293         tg3_write_mem(tp, NIC_SRAM_WOL_MBOX, WOL_SIGNATURE |
1294                                              WOL_DRV_STATE_SHUTDOWN |
1295                                              WOL_DRV_WOL | WOL_SET_MAGIC_PKT);
1296
1297         pci_read_config_word(tp->pdev, pm + PCI_PM_PMC, &power_caps);
1298
1299         if (tp->tg3_flags & TG3_FLAG_WOL_ENABLE) {
1300                 u32 mac_mode;
1301
1302                 if (!(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)) {
1303                         tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x5a);
1304                         udelay(40);
1305
1306                         if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES)
1307                                 mac_mode = MAC_MODE_PORT_MODE_GMII;
1308                         else
1309                                 mac_mode = MAC_MODE_PORT_MODE_MII;
1310
1311                         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 ||
1312                             !(tp->tg3_flags & TG3_FLAG_WOL_SPEED_100MB))
1313                                 mac_mode |= MAC_MODE_LINK_POLARITY;
1314                 } else {
1315                         mac_mode = MAC_MODE_PORT_MODE_TBI;
1316                 }
1317
1318                 if (!(tp->tg3_flags2 & TG3_FLG2_5750_PLUS))
1319                         tw32(MAC_LED_CTRL, tp->led_ctrl);
1320
1321                 if (((power_caps & PCI_PM_CAP_PME_D3cold) &&
1322                      (tp->tg3_flags & TG3_FLAG_WOL_ENABLE)))
1323                         mac_mode |= MAC_MODE_MAGIC_PKT_ENABLE;
1324
1325                 tw32_f(MAC_MODE, mac_mode);
1326                 udelay(100);
1327
1328                 tw32_f(MAC_RX_MODE, RX_MODE_ENABLE);
1329                 udelay(10);
1330         }
1331
1332         if (!(tp->tg3_flags & TG3_FLAG_WOL_SPEED_100MB) &&
1333             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
1334              GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)) {
1335                 u32 base_val;
1336
1337                 base_val = tp->pci_clock_ctrl;
1338                 base_val |= (CLOCK_CTRL_RXCLK_DISABLE |
1339                              CLOCK_CTRL_TXCLK_DISABLE);
1340
1341                 tw32_wait_f(TG3PCI_CLOCK_CTRL, base_val | CLOCK_CTRL_ALTCLK |
1342                             CLOCK_CTRL_PWRDOWN_PLL133, 40);
1343         } else if (tp->tg3_flags2 & TG3_FLG2_5780_CLASS) {
1344                 /* do nothing */
1345         } else if (!((tp->tg3_flags2 & TG3_FLG2_5750_PLUS) &&
1346                      (tp->tg3_flags & TG3_FLAG_ENABLE_ASF))) {
1347                 u32 newbits1, newbits2;
1348
1349                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
1350                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
1351                         newbits1 = (CLOCK_CTRL_RXCLK_DISABLE |
1352                                     CLOCK_CTRL_TXCLK_DISABLE |
1353                                     CLOCK_CTRL_ALTCLK);
1354                         newbits2 = newbits1 | CLOCK_CTRL_44MHZ_CORE;
1355                 } else if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
1356                         newbits1 = CLOCK_CTRL_625_CORE;
1357                         newbits2 = newbits1 | CLOCK_CTRL_ALTCLK;
1358                 } else {
1359                         newbits1 = CLOCK_CTRL_ALTCLK;
1360                         newbits2 = newbits1 | CLOCK_CTRL_44MHZ_CORE;
1361                 }
1362
1363                 tw32_wait_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl | newbits1,
1364                             40);
1365
1366                 tw32_wait_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl | newbits2,
1367                             40);
1368
1369                 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
1370                         u32 newbits3;
1371
1372                         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
1373                             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
1374                                 newbits3 = (CLOCK_CTRL_RXCLK_DISABLE |
1375                                             CLOCK_CTRL_TXCLK_DISABLE |
1376                                             CLOCK_CTRL_44MHZ_CORE);
1377                         } else {
1378                                 newbits3 = CLOCK_CTRL_44MHZ_CORE;
1379                         }
1380
1381                         tw32_wait_f(TG3PCI_CLOCK_CTRL,
1382                                     tp->pci_clock_ctrl | newbits3, 40);
1383                 }
1384         }
1385
1386         if (!(tp->tg3_flags & TG3_FLAG_WOL_ENABLE) &&
1387             !(tp->tg3_flags & TG3_FLAG_ENABLE_ASF))
1388                 tg3_power_down_phy(tp);
1389
1390         tg3_frob_aux_power(tp);
1391
1392         /* Workaround for unstable PLL clock */
1393         if ((GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_AX) ||
1394             (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_BX)) {
1395                 u32 val = tr32(0x7d00);
1396
1397                 val &= ~((1 << 16) | (1 << 4) | (1 << 2) | (1 << 1) | 1);
1398                 tw32(0x7d00, val);
1399                 if (!(tp->tg3_flags & TG3_FLAG_ENABLE_ASF)) {
1400                         int err;
1401
1402                         err = tg3_nvram_lock(tp);
1403                         tg3_halt_cpu(tp, RX_CPU_BASE);
1404                         if (!err)
1405                                 tg3_nvram_unlock(tp);
1406                 }
1407         }
1408
1409         tg3_write_sig_post_reset(tp, RESET_KIND_SHUTDOWN);
1410
1411         /* Finally, set the new power state. */
1412         pci_write_config_word(tp->pdev, pm + PCI_PM_CTRL, power_control);
1413         udelay(100);    /* Delay after power state change */
1414
1415         return 0;
1416 }
1417
1418 static void tg3_link_report(struct tg3 *tp)
1419 {
1420         if (!netif_carrier_ok(tp->dev)) {
1421                 if (netif_msg_link(tp))
1422                         printk(KERN_INFO PFX "%s: Link is down.\n",
1423                                tp->dev->name);
1424         } else if (netif_msg_link(tp)) {
1425                 printk(KERN_INFO PFX "%s: Link is up at %d Mbps, %s duplex.\n",
1426                        tp->dev->name,
1427                        (tp->link_config.active_speed == SPEED_1000 ?
1428                         1000 :
1429                         (tp->link_config.active_speed == SPEED_100 ?
1430                          100 : 10)),
1431                        (tp->link_config.active_duplex == DUPLEX_FULL ?
1432                         "full" : "half"));
1433
1434                 printk(KERN_INFO PFX "%s: Flow control is %s for TX and "
1435                        "%s for RX.\n",
1436                        tp->dev->name,
1437                        (tp->tg3_flags & TG3_FLAG_TX_PAUSE) ? "on" : "off",
1438                        (tp->tg3_flags & TG3_FLAG_RX_PAUSE) ? "on" : "off");
1439         }
1440 }
1441
1442 static void tg3_setup_flow_control(struct tg3 *tp, u32 local_adv, u32 remote_adv)
1443 {
1444         u32 new_tg3_flags = 0;
1445         u32 old_rx_mode = tp->rx_mode;
1446         u32 old_tx_mode = tp->tx_mode;
1447
1448         if (tp->tg3_flags & TG3_FLAG_PAUSE_AUTONEG) {
1449
1450                 /* Convert 1000BaseX flow control bits to 1000BaseT
1451                  * bits before resolving flow control.
1452                  */
1453                 if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES) {
1454                         local_adv &= ~(ADVERTISE_PAUSE_CAP |
1455                                        ADVERTISE_PAUSE_ASYM);
1456                         remote_adv &= ~(LPA_PAUSE_CAP | LPA_PAUSE_ASYM);
1457
1458                         if (local_adv & ADVERTISE_1000XPAUSE)
1459                                 local_adv |= ADVERTISE_PAUSE_CAP;
1460                         if (local_adv & ADVERTISE_1000XPSE_ASYM)
1461                                 local_adv |= ADVERTISE_PAUSE_ASYM;
1462                         if (remote_adv & LPA_1000XPAUSE)
1463                                 remote_adv |= LPA_PAUSE_CAP;
1464                         if (remote_adv & LPA_1000XPAUSE_ASYM)
1465                                 remote_adv |= LPA_PAUSE_ASYM;
1466                 }
1467
1468                 if (local_adv & ADVERTISE_PAUSE_CAP) {
1469                         if (local_adv & ADVERTISE_PAUSE_ASYM) {
1470                                 if (remote_adv & LPA_PAUSE_CAP)
1471                                         new_tg3_flags |=
1472                                                 (TG3_FLAG_RX_PAUSE |
1473                                                 TG3_FLAG_TX_PAUSE);
1474                                 else if (remote_adv & LPA_PAUSE_ASYM)
1475                                         new_tg3_flags |=
1476                                                 (TG3_FLAG_RX_PAUSE);
1477                         } else {
1478                                 if (remote_adv & LPA_PAUSE_CAP)
1479                                         new_tg3_flags |=
1480                                                 (TG3_FLAG_RX_PAUSE |
1481                                                 TG3_FLAG_TX_PAUSE);
1482                         }
1483                 } else if (local_adv & ADVERTISE_PAUSE_ASYM) {
1484                         if ((remote_adv & LPA_PAUSE_CAP) &&
1485                         (remote_adv & LPA_PAUSE_ASYM))
1486                                 new_tg3_flags |= TG3_FLAG_TX_PAUSE;
1487                 }
1488
1489                 tp->tg3_flags &= ~(TG3_FLAG_RX_PAUSE | TG3_FLAG_TX_PAUSE);
1490                 tp->tg3_flags |= new_tg3_flags;
1491         } else {
1492                 new_tg3_flags = tp->tg3_flags;
1493         }
1494
1495         if (new_tg3_flags & TG3_FLAG_RX_PAUSE)
1496                 tp->rx_mode |= RX_MODE_FLOW_CTRL_ENABLE;
1497         else
1498                 tp->rx_mode &= ~RX_MODE_FLOW_CTRL_ENABLE;
1499
1500         if (old_rx_mode != tp->rx_mode) {
1501                 tw32_f(MAC_RX_MODE, tp->rx_mode);
1502         }
1503
1504         if (new_tg3_flags & TG3_FLAG_TX_PAUSE)
1505                 tp->tx_mode |= TX_MODE_FLOW_CTRL_ENABLE;
1506         else
1507                 tp->tx_mode &= ~TX_MODE_FLOW_CTRL_ENABLE;
1508
1509         if (old_tx_mode != tp->tx_mode) {
1510                 tw32_f(MAC_TX_MODE, tp->tx_mode);
1511         }
1512 }
1513
1514 static void tg3_aux_stat_to_speed_duplex(struct tg3 *tp, u32 val, u16 *speed, u8 *duplex)
1515 {
1516         switch (val & MII_TG3_AUX_STAT_SPDMASK) {
1517         case MII_TG3_AUX_STAT_10HALF:
1518                 *speed = SPEED_10;
1519                 *duplex = DUPLEX_HALF;
1520                 break;
1521
1522         case MII_TG3_AUX_STAT_10FULL:
1523                 *speed = SPEED_10;
1524                 *duplex = DUPLEX_FULL;
1525                 break;
1526
1527         case MII_TG3_AUX_STAT_100HALF:
1528                 *speed = SPEED_100;
1529                 *duplex = DUPLEX_HALF;
1530                 break;
1531
1532         case MII_TG3_AUX_STAT_100FULL:
1533                 *speed = SPEED_100;
1534                 *duplex = DUPLEX_FULL;
1535                 break;
1536
1537         case MII_TG3_AUX_STAT_1000HALF:
1538                 *speed = SPEED_1000;
1539                 *duplex = DUPLEX_HALF;
1540                 break;
1541
1542         case MII_TG3_AUX_STAT_1000FULL:
1543                 *speed = SPEED_1000;
1544                 *duplex = DUPLEX_FULL;
1545                 break;
1546
1547         default:
1548                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
1549                         *speed = (val & MII_TG3_AUX_STAT_100) ? SPEED_100 :
1550                                  SPEED_10;
1551                         *duplex = (val & MII_TG3_AUX_STAT_FULL) ? DUPLEX_FULL :
1552                                   DUPLEX_HALF;
1553                         break;
1554                 }
1555                 *speed = SPEED_INVALID;
1556                 *duplex = DUPLEX_INVALID;
1557                 break;
1558         };
1559 }
1560
1561 static void tg3_phy_copper_begin(struct tg3 *tp)
1562 {
1563         u32 new_adv;
1564         int i;
1565
1566         if (tp->link_config.phy_is_low_power) {
1567                 /* Entering low power mode.  Disable gigabit and
1568                  * 100baseT advertisements.
1569                  */
1570                 tg3_writephy(tp, MII_TG3_CTRL, 0);
1571
1572                 new_adv = (ADVERTISE_10HALF | ADVERTISE_10FULL |
1573                            ADVERTISE_CSMA | ADVERTISE_PAUSE_CAP);
1574                 if (tp->tg3_flags & TG3_FLAG_WOL_SPEED_100MB)
1575                         new_adv |= (ADVERTISE_100HALF | ADVERTISE_100FULL);
1576
1577                 tg3_writephy(tp, MII_ADVERTISE, new_adv);
1578         } else if (tp->link_config.speed == SPEED_INVALID) {
1579                 if (tp->tg3_flags & TG3_FLAG_10_100_ONLY)
1580                         tp->link_config.advertising &=
1581                                 ~(ADVERTISED_1000baseT_Half |
1582                                   ADVERTISED_1000baseT_Full);
1583
1584                 new_adv = (ADVERTISE_CSMA | ADVERTISE_PAUSE_CAP);
1585                 if (tp->link_config.advertising & ADVERTISED_10baseT_Half)
1586                         new_adv |= ADVERTISE_10HALF;
1587                 if (tp->link_config.advertising & ADVERTISED_10baseT_Full)
1588                         new_adv |= ADVERTISE_10FULL;
1589                 if (tp->link_config.advertising & ADVERTISED_100baseT_Half)
1590                         new_adv |= ADVERTISE_100HALF;
1591                 if (tp->link_config.advertising & ADVERTISED_100baseT_Full)
1592                         new_adv |= ADVERTISE_100FULL;
1593                 tg3_writephy(tp, MII_ADVERTISE, new_adv);
1594
1595                 if (tp->link_config.advertising &
1596                     (ADVERTISED_1000baseT_Half | ADVERTISED_1000baseT_Full)) {
1597                         new_adv = 0;
1598                         if (tp->link_config.advertising & ADVERTISED_1000baseT_Half)
1599                                 new_adv |= MII_TG3_CTRL_ADV_1000_HALF;
1600                         if (tp->link_config.advertising & ADVERTISED_1000baseT_Full)
1601                                 new_adv |= MII_TG3_CTRL_ADV_1000_FULL;
1602                         if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY) &&
1603                             (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
1604                              tp->pci_chip_rev_id == CHIPREV_ID_5701_B0))
1605                                 new_adv |= (MII_TG3_CTRL_AS_MASTER |
1606                                             MII_TG3_CTRL_ENABLE_AS_MASTER);
1607                         tg3_writephy(tp, MII_TG3_CTRL, new_adv);
1608                 } else {
1609                         tg3_writephy(tp, MII_TG3_CTRL, 0);
1610                 }
1611         } else {
1612                 /* Asking for a specific link mode. */
1613                 if (tp->link_config.speed == SPEED_1000) {
1614                         new_adv = ADVERTISE_CSMA | ADVERTISE_PAUSE_CAP;
1615                         tg3_writephy(tp, MII_ADVERTISE, new_adv);
1616
1617                         if (tp->link_config.duplex == DUPLEX_FULL)
1618                                 new_adv = MII_TG3_CTRL_ADV_1000_FULL;
1619                         else
1620                                 new_adv = MII_TG3_CTRL_ADV_1000_HALF;
1621                         if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
1622                             tp->pci_chip_rev_id == CHIPREV_ID_5701_B0)
1623                                 new_adv |= (MII_TG3_CTRL_AS_MASTER |
1624                                             MII_TG3_CTRL_ENABLE_AS_MASTER);
1625                         tg3_writephy(tp, MII_TG3_CTRL, new_adv);
1626                 } else {
1627                         tg3_writephy(tp, MII_TG3_CTRL, 0);
1628
1629                         new_adv = ADVERTISE_CSMA | ADVERTISE_PAUSE_CAP;
1630                         if (tp->link_config.speed == SPEED_100) {
1631                                 if (tp->link_config.duplex == DUPLEX_FULL)
1632                                         new_adv |= ADVERTISE_100FULL;
1633                                 else
1634                                         new_adv |= ADVERTISE_100HALF;
1635                         } else {
1636                                 if (tp->link_config.duplex == DUPLEX_FULL)
1637                                         new_adv |= ADVERTISE_10FULL;
1638                                 else
1639                                         new_adv |= ADVERTISE_10HALF;
1640                         }
1641                         tg3_writephy(tp, MII_ADVERTISE, new_adv);
1642                 }
1643         }
1644
1645         if (tp->link_config.autoneg == AUTONEG_DISABLE &&
1646             tp->link_config.speed != SPEED_INVALID) {
1647                 u32 bmcr, orig_bmcr;
1648
1649                 tp->link_config.active_speed = tp->link_config.speed;
1650                 tp->link_config.active_duplex = tp->link_config.duplex;
1651
1652                 bmcr = 0;
1653                 switch (tp->link_config.speed) {
1654                 default:
1655                 case SPEED_10:
1656                         break;
1657
1658                 case SPEED_100:
1659                         bmcr |= BMCR_SPEED100;
1660                         break;
1661
1662                 case SPEED_1000:
1663                         bmcr |= TG3_BMCR_SPEED1000;
1664                         break;
1665                 };
1666
1667                 if (tp->link_config.duplex == DUPLEX_FULL)
1668                         bmcr |= BMCR_FULLDPLX;
1669
1670                 if (!tg3_readphy(tp, MII_BMCR, &orig_bmcr) &&
1671                     (bmcr != orig_bmcr)) {
1672                         tg3_writephy(tp, MII_BMCR, BMCR_LOOPBACK);
1673                         for (i = 0; i < 1500; i++) {
1674                                 u32 tmp;
1675
1676                                 udelay(10);
1677                                 if (tg3_readphy(tp, MII_BMSR, &tmp) ||
1678                                     tg3_readphy(tp, MII_BMSR, &tmp))
1679                                         continue;
1680                                 if (!(tmp & BMSR_LSTATUS)) {
1681                                         udelay(40);
1682                                         break;
1683                                 }
1684                         }
1685                         tg3_writephy(tp, MII_BMCR, bmcr);
1686                         udelay(40);
1687                 }
1688         } else {
1689                 tg3_writephy(tp, MII_BMCR,
1690                              BMCR_ANENABLE | BMCR_ANRESTART);
1691         }
1692 }
1693
1694 static int tg3_init_5401phy_dsp(struct tg3 *tp)
1695 {
1696         int err;
1697
1698         /* Turn off tap power management. */
1699         /* Set Extended packet length bit */
1700         err  = tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x4c20);
1701
1702         err |= tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x0012);
1703         err |= tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x1804);
1704
1705         err |= tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x0013);
1706         err |= tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x1204);
1707
1708         err |= tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8006);
1709         err |= tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0132);
1710
1711         err |= tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8006);
1712         err |= tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0232);
1713
1714         err |= tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x201f);
1715         err |= tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0a20);
1716
1717         udelay(40);
1718
1719         return err;
1720 }
1721
1722 static int tg3_copper_is_advertising_all(struct tg3 *tp, u32 mask)
1723 {
1724         u32 adv_reg, all_mask = 0;
1725
1726         if (mask & ADVERTISED_10baseT_Half)
1727                 all_mask |= ADVERTISE_10HALF;
1728         if (mask & ADVERTISED_10baseT_Full)
1729                 all_mask |= ADVERTISE_10FULL;
1730         if (mask & ADVERTISED_100baseT_Half)
1731                 all_mask |= ADVERTISE_100HALF;
1732         if (mask & ADVERTISED_100baseT_Full)
1733                 all_mask |= ADVERTISE_100FULL;
1734
1735         if (tg3_readphy(tp, MII_ADVERTISE, &adv_reg))
1736                 return 0;
1737
1738         if ((adv_reg & all_mask) != all_mask)
1739                 return 0;
1740         if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY)) {
1741                 u32 tg3_ctrl;
1742
1743                 all_mask = 0;
1744                 if (mask & ADVERTISED_1000baseT_Half)
1745                         all_mask |= ADVERTISE_1000HALF;
1746                 if (mask & ADVERTISED_1000baseT_Full)
1747                         all_mask |= ADVERTISE_1000FULL;
1748
1749                 if (tg3_readphy(tp, MII_TG3_CTRL, &tg3_ctrl))
1750                         return 0;
1751
1752                 if ((tg3_ctrl & all_mask) != all_mask)
1753                         return 0;
1754         }
1755         return 1;
1756 }
1757
1758 static int tg3_setup_copper_phy(struct tg3 *tp, int force_reset)
1759 {
1760         int current_link_up;
1761         u32 bmsr, dummy;
1762         u16 current_speed;
1763         u8 current_duplex;
1764         int i, err;
1765
1766         tw32(MAC_EVENT, 0);
1767
1768         tw32_f(MAC_STATUS,
1769              (MAC_STATUS_SYNC_CHANGED |
1770               MAC_STATUS_CFG_CHANGED |
1771               MAC_STATUS_MI_COMPLETION |
1772               MAC_STATUS_LNKSTATE_CHANGED));
1773         udelay(40);
1774
1775         tp->mi_mode = MAC_MI_MODE_BASE;
1776         tw32_f(MAC_MI_MODE, tp->mi_mode);
1777         udelay(80);
1778
1779         tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x02);
1780
1781         /* Some third-party PHYs need to be reset on link going
1782          * down.
1783          */
1784         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
1785              GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
1786              GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) &&
1787             netif_carrier_ok(tp->dev)) {
1788                 tg3_readphy(tp, MII_BMSR, &bmsr);
1789                 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
1790                     !(bmsr & BMSR_LSTATUS))
1791                         force_reset = 1;
1792         }
1793         if (force_reset)
1794                 tg3_phy_reset(tp);
1795
1796         if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5401) {
1797                 tg3_readphy(tp, MII_BMSR, &bmsr);
1798                 if (tg3_readphy(tp, MII_BMSR, &bmsr) ||
1799                     !(tp->tg3_flags & TG3_FLAG_INIT_COMPLETE))
1800                         bmsr = 0;
1801
1802                 if (!(bmsr & BMSR_LSTATUS)) {
1803                         err = tg3_init_5401phy_dsp(tp);
1804                         if (err)
1805                                 return err;
1806
1807                         tg3_readphy(tp, MII_BMSR, &bmsr);
1808                         for (i = 0; i < 1000; i++) {
1809                                 udelay(10);
1810                                 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
1811                                     (bmsr & BMSR_LSTATUS)) {
1812                                         udelay(40);
1813                                         break;
1814                                 }
1815                         }
1816
1817                         if ((tp->phy_id & PHY_ID_REV_MASK) == PHY_REV_BCM5401_B0 &&
1818                             !(bmsr & BMSR_LSTATUS) &&
1819                             tp->link_config.active_speed == SPEED_1000) {
1820                                 err = tg3_phy_reset(tp);
1821                                 if (!err)
1822                                         err = tg3_init_5401phy_dsp(tp);
1823                                 if (err)
1824                                         return err;
1825                         }
1826                 }
1827         } else if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
1828                    tp->pci_chip_rev_id == CHIPREV_ID_5701_B0) {
1829                 /* 5701 {A0,B0} CRC bug workaround */
1830                 tg3_writephy(tp, 0x15, 0x0a75);
1831                 tg3_writephy(tp, 0x1c, 0x8c68);
1832                 tg3_writephy(tp, 0x1c, 0x8d68);
1833                 tg3_writephy(tp, 0x1c, 0x8c68);
1834         }
1835
1836         /* Clear pending interrupts... */
1837         tg3_readphy(tp, MII_TG3_ISTAT, &dummy);
1838         tg3_readphy(tp, MII_TG3_ISTAT, &dummy);
1839
1840         if (tp->tg3_flags & TG3_FLAG_USE_MI_INTERRUPT)
1841                 tg3_writephy(tp, MII_TG3_IMASK, ~MII_TG3_INT_LINKCHG);
1842         else if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5906)
1843                 tg3_writephy(tp, MII_TG3_IMASK, ~0);
1844
1845         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
1846             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
1847                 if (tp->led_ctrl == LED_CTRL_MODE_PHY_1)
1848                         tg3_writephy(tp, MII_TG3_EXT_CTRL,
1849                                      MII_TG3_EXT_CTRL_LNK3_LED_MODE);
1850                 else
1851                         tg3_writephy(tp, MII_TG3_EXT_CTRL, 0);
1852         }
1853
1854         current_link_up = 0;
1855         current_speed = SPEED_INVALID;
1856         current_duplex = DUPLEX_INVALID;
1857
1858         if (tp->tg3_flags2 & TG3_FLG2_CAPACITIVE_COUPLING) {
1859                 u32 val;
1860
1861                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x4007);
1862                 tg3_readphy(tp, MII_TG3_AUX_CTRL, &val);
1863                 if (!(val & (1 << 10))) {
1864                         val |= (1 << 10);
1865                         tg3_writephy(tp, MII_TG3_AUX_CTRL, val);
1866                         goto relink;
1867                 }
1868         }
1869
1870         bmsr = 0;
1871         for (i = 0; i < 100; i++) {
1872                 tg3_readphy(tp, MII_BMSR, &bmsr);
1873                 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
1874                     (bmsr & BMSR_LSTATUS))
1875                         break;
1876                 udelay(40);
1877         }
1878
1879         if (bmsr & BMSR_LSTATUS) {
1880                 u32 aux_stat, bmcr;
1881
1882                 tg3_readphy(tp, MII_TG3_AUX_STAT, &aux_stat);
1883                 for (i = 0; i < 2000; i++) {
1884                         udelay(10);
1885                         if (!tg3_readphy(tp, MII_TG3_AUX_STAT, &aux_stat) &&
1886                             aux_stat)
1887                                 break;
1888                 }
1889
1890                 tg3_aux_stat_to_speed_duplex(tp, aux_stat,
1891                                              &current_speed,
1892                                              &current_duplex);
1893
1894                 bmcr = 0;
1895                 for (i = 0; i < 200; i++) {
1896                         tg3_readphy(tp, MII_BMCR, &bmcr);
1897                         if (tg3_readphy(tp, MII_BMCR, &bmcr))
1898                                 continue;
1899                         if (bmcr && bmcr != 0x7fff)
1900                                 break;
1901                         udelay(10);
1902                 }
1903
1904                 if (tp->link_config.autoneg == AUTONEG_ENABLE) {
1905                         if (bmcr & BMCR_ANENABLE) {
1906                                 current_link_up = 1;
1907
1908                                 /* Force autoneg restart if we are exiting
1909                                  * low power mode.
1910                                  */
1911                                 if (!tg3_copper_is_advertising_all(tp,
1912                                                 tp->link_config.advertising))
1913                                         current_link_up = 0;
1914                         } else {
1915                                 current_link_up = 0;
1916                         }
1917                 } else {
1918                         if (!(bmcr & BMCR_ANENABLE) &&
1919                             tp->link_config.speed == current_speed &&
1920                             tp->link_config.duplex == current_duplex) {
1921                                 current_link_up = 1;
1922                         } else {
1923                                 current_link_up = 0;
1924                         }
1925                 }
1926
1927                 tp->link_config.active_speed = current_speed;
1928                 tp->link_config.active_duplex = current_duplex;
1929         }
1930
1931         if (current_link_up == 1 &&
1932             (tp->link_config.active_duplex == DUPLEX_FULL) &&
1933             (tp->link_config.autoneg == AUTONEG_ENABLE)) {
1934                 u32 local_adv, remote_adv;
1935
1936                 if (tg3_readphy(tp, MII_ADVERTISE, &local_adv))
1937                         local_adv = 0;
1938                 local_adv &= (ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM);
1939
1940                 if (tg3_readphy(tp, MII_LPA, &remote_adv))
1941                         remote_adv = 0;
1942
1943                 remote_adv &= (LPA_PAUSE_CAP | LPA_PAUSE_ASYM);
1944
1945                 /* If we are not advertising full pause capability,
1946                  * something is wrong.  Bring the link down and reconfigure.
1947                  */
1948                 if (local_adv != ADVERTISE_PAUSE_CAP) {
1949                         current_link_up = 0;
1950                 } else {
1951                         tg3_setup_flow_control(tp, local_adv, remote_adv);
1952                 }
1953         }
1954 relink:
1955         if (current_link_up == 0 || tp->link_config.phy_is_low_power) {
1956                 u32 tmp;
1957
1958                 tg3_phy_copper_begin(tp);
1959
1960                 tg3_readphy(tp, MII_BMSR, &tmp);
1961                 if (!tg3_readphy(tp, MII_BMSR, &tmp) &&
1962                     (tmp & BMSR_LSTATUS))
1963                         current_link_up = 1;
1964         }
1965
1966         tp->mac_mode &= ~MAC_MODE_PORT_MODE_MASK;
1967         if (current_link_up == 1) {
1968                 if (tp->link_config.active_speed == SPEED_100 ||
1969                     tp->link_config.active_speed == SPEED_10)
1970                         tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
1971                 else
1972                         tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
1973         } else
1974                 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
1975
1976         tp->mac_mode &= ~MAC_MODE_HALF_DUPLEX;
1977         if (tp->link_config.active_duplex == DUPLEX_HALF)
1978                 tp->mac_mode |= MAC_MODE_HALF_DUPLEX;
1979
1980         tp->mac_mode &= ~MAC_MODE_LINK_POLARITY;
1981         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700) {
1982                 if ((tp->led_ctrl == LED_CTRL_MODE_PHY_2) ||
1983                     (current_link_up == 1 &&
1984                      tp->link_config.active_speed == SPEED_10))
1985                         tp->mac_mode |= MAC_MODE_LINK_POLARITY;
1986         } else {
1987                 if (current_link_up == 1)
1988                         tp->mac_mode |= MAC_MODE_LINK_POLARITY;
1989         }
1990
1991         /* ??? Without this setting Netgear GA302T PHY does not
1992          * ??? send/receive packets...
1993          */
1994         if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5411 &&
1995             tp->pci_chip_rev_id == CHIPREV_ID_5700_ALTIMA) {
1996                 tp->mi_mode |= MAC_MI_MODE_AUTO_POLL;
1997                 tw32_f(MAC_MI_MODE, tp->mi_mode);
1998                 udelay(80);
1999         }
2000
2001         tw32_f(MAC_MODE, tp->mac_mode);
2002         udelay(40);
2003
2004         if (tp->tg3_flags & TG3_FLAG_USE_LINKCHG_REG) {
2005                 /* Polled via timer. */
2006                 tw32_f(MAC_EVENT, 0);
2007         } else {
2008                 tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
2009         }
2010         udelay(40);
2011
2012         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 &&
2013             current_link_up == 1 &&
2014             tp->link_config.active_speed == SPEED_1000 &&
2015             ((tp->tg3_flags & TG3_FLAG_PCIX_MODE) ||
2016              (tp->tg3_flags & TG3_FLAG_PCI_HIGH_SPEED))) {
2017                 udelay(120);
2018                 tw32_f(MAC_STATUS,
2019                      (MAC_STATUS_SYNC_CHANGED |
2020                       MAC_STATUS_CFG_CHANGED));
2021                 udelay(40);
2022                 tg3_write_mem(tp,
2023                               NIC_SRAM_FIRMWARE_MBOX,
2024                               NIC_SRAM_FIRMWARE_MBOX_MAGIC2);
2025         }
2026
2027         if (current_link_up != netif_carrier_ok(tp->dev)) {
2028                 if (current_link_up)
2029                         netif_carrier_on(tp->dev);
2030                 else
2031                         netif_carrier_off(tp->dev);
2032                 tg3_link_report(tp);
2033         }
2034
2035         return 0;
2036 }
2037
2038 struct tg3_fiber_aneginfo {
2039         int state;
2040 #define ANEG_STATE_UNKNOWN              0
2041 #define ANEG_STATE_AN_ENABLE            1
2042 #define ANEG_STATE_RESTART_INIT         2
2043 #define ANEG_STATE_RESTART              3
2044 #define ANEG_STATE_DISABLE_LINK_OK      4
2045 #define ANEG_STATE_ABILITY_DETECT_INIT  5
2046 #define ANEG_STATE_ABILITY_DETECT       6
2047 #define ANEG_STATE_ACK_DETECT_INIT      7
2048 #define ANEG_STATE_ACK_DETECT           8
2049 #define ANEG_STATE_COMPLETE_ACK_INIT    9
2050 #define ANEG_STATE_COMPLETE_ACK         10
2051 #define ANEG_STATE_IDLE_DETECT_INIT     11
2052 #define ANEG_STATE_IDLE_DETECT          12
2053 #define ANEG_STATE_LINK_OK              13
2054 #define ANEG_STATE_NEXT_PAGE_WAIT_INIT  14
2055 #define ANEG_STATE_NEXT_PAGE_WAIT       15
2056
2057         u32 flags;
2058 #define MR_AN_ENABLE            0x00000001
2059 #define MR_RESTART_AN           0x00000002
2060 #define MR_AN_COMPLETE          0x00000004
2061 #define MR_PAGE_RX              0x00000008
2062 #define MR_NP_LOADED            0x00000010
2063 #define MR_TOGGLE_TX            0x00000020
2064 #define MR_LP_ADV_FULL_DUPLEX   0x00000040
2065 #define MR_LP_ADV_HALF_DUPLEX   0x00000080
2066 #define MR_LP_ADV_SYM_PAUSE     0x00000100
2067 #define MR_LP_ADV_ASYM_PAUSE    0x00000200
2068 #define MR_LP_ADV_REMOTE_FAULT1 0x00000400
2069 #define MR_LP_ADV_REMOTE_FAULT2 0x00000800
2070 #define MR_LP_ADV_NEXT_PAGE     0x00001000
2071 #define MR_TOGGLE_RX            0x00002000
2072 #define MR_NP_RX                0x00004000
2073
2074 #define MR_LINK_OK              0x80000000
2075
2076         unsigned long link_time, cur_time;
2077
2078         u32 ability_match_cfg;
2079         int ability_match_count;
2080
2081         char ability_match, idle_match, ack_match;
2082
2083         u32 txconfig, rxconfig;
2084 #define ANEG_CFG_NP             0x00000080
2085 #define ANEG_CFG_ACK            0x00000040
2086 #define ANEG_CFG_RF2            0x00000020
2087 #define ANEG_CFG_RF1            0x00000010
2088 #define ANEG_CFG_PS2            0x00000001
2089 #define ANEG_CFG_PS1            0x00008000
2090 #define ANEG_CFG_HD             0x00004000
2091 #define ANEG_CFG_FD             0x00002000
2092 #define ANEG_CFG_INVAL          0x00001f06
2093
2094 };
2095 #define ANEG_OK         0
2096 #define ANEG_DONE       1
2097 #define ANEG_TIMER_ENAB 2
2098 #define ANEG_FAILED     -1
2099
2100 #define ANEG_STATE_SETTLE_TIME  10000
2101
2102 static int tg3_fiber_aneg_smachine(struct tg3 *tp,
2103                                    struct tg3_fiber_aneginfo *ap)
2104 {
2105         unsigned long delta;
2106         u32 rx_cfg_reg;
2107         int ret;
2108
2109         if (ap->state == ANEG_STATE_UNKNOWN) {
2110                 ap->rxconfig = 0;
2111                 ap->link_time = 0;
2112                 ap->cur_time = 0;
2113                 ap->ability_match_cfg = 0;
2114                 ap->ability_match_count = 0;
2115                 ap->ability_match = 0;
2116                 ap->idle_match = 0;
2117                 ap->ack_match = 0;
2118         }
2119         ap->cur_time++;
2120
2121         if (tr32(MAC_STATUS) & MAC_STATUS_RCVD_CFG) {
2122                 rx_cfg_reg = tr32(MAC_RX_AUTO_NEG);
2123
2124                 if (rx_cfg_reg != ap->ability_match_cfg) {
2125                         ap->ability_match_cfg = rx_cfg_reg;
2126                         ap->ability_match = 0;
2127                         ap->ability_match_count = 0;
2128                 } else {
2129                         if (++ap->ability_match_count > 1) {
2130                                 ap->ability_match = 1;
2131                                 ap->ability_match_cfg = rx_cfg_reg;
2132                         }
2133                 }
2134                 if (rx_cfg_reg & ANEG_CFG_ACK)
2135                         ap->ack_match = 1;
2136                 else
2137                         ap->ack_match = 0;
2138
2139                 ap->idle_match = 0;
2140         } else {
2141                 ap->idle_match = 1;
2142                 ap->ability_match_cfg = 0;
2143                 ap->ability_match_count = 0;
2144                 ap->ability_match = 0;
2145                 ap->ack_match = 0;
2146
2147                 rx_cfg_reg = 0;
2148         }
2149
2150         ap->rxconfig = rx_cfg_reg;
2151         ret = ANEG_OK;
2152
2153         switch(ap->state) {
2154         case ANEG_STATE_UNKNOWN:
2155                 if (ap->flags & (MR_AN_ENABLE | MR_RESTART_AN))
2156                         ap->state = ANEG_STATE_AN_ENABLE;
2157
2158                 /* fallthru */
2159         case ANEG_STATE_AN_ENABLE:
2160                 ap->flags &= ~(MR_AN_COMPLETE | MR_PAGE_RX);
2161                 if (ap->flags & MR_AN_ENABLE) {
2162                         ap->link_time = 0;
2163                         ap->cur_time = 0;
2164                         ap->ability_match_cfg = 0;
2165                         ap->ability_match_count = 0;
2166                         ap->ability_match = 0;
2167                         ap->idle_match = 0;
2168                         ap->ack_match = 0;
2169
2170                         ap->state = ANEG_STATE_RESTART_INIT;
2171                 } else {
2172                         ap->state = ANEG_STATE_DISABLE_LINK_OK;
2173                 }
2174                 break;
2175
2176         case ANEG_STATE_RESTART_INIT:
2177                 ap->link_time = ap->cur_time;
2178                 ap->flags &= ~(MR_NP_LOADED);
2179                 ap->txconfig = 0;
2180                 tw32(MAC_TX_AUTO_NEG, 0);
2181                 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
2182                 tw32_f(MAC_MODE, tp->mac_mode);
2183                 udelay(40);
2184
2185                 ret = ANEG_TIMER_ENAB;
2186                 ap->state = ANEG_STATE_RESTART;
2187
2188                 /* fallthru */
2189         case ANEG_STATE_RESTART:
2190                 delta = ap->cur_time - ap->link_time;
2191                 if (delta > ANEG_STATE_SETTLE_TIME) {
2192                         ap->state = ANEG_STATE_ABILITY_DETECT_INIT;
2193                 } else {
2194                         ret = ANEG_TIMER_ENAB;
2195                 }
2196                 break;
2197
2198         case ANEG_STATE_DISABLE_LINK_OK:
2199                 ret = ANEG_DONE;
2200                 break;
2201
2202         case ANEG_STATE_ABILITY_DETECT_INIT:
2203                 ap->flags &= ~(MR_TOGGLE_TX);
2204                 ap->txconfig = (ANEG_CFG_FD | ANEG_CFG_PS1);
2205                 tw32(MAC_TX_AUTO_NEG, ap->txconfig);
2206                 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
2207                 tw32_f(MAC_MODE, tp->mac_mode);
2208                 udelay(40);
2209
2210                 ap->state = ANEG_STATE_ABILITY_DETECT;
2211                 break;
2212
2213         case ANEG_STATE_ABILITY_DETECT:
2214                 if (ap->ability_match != 0 && ap->rxconfig != 0) {
2215                         ap->state = ANEG_STATE_ACK_DETECT_INIT;
2216                 }
2217                 break;
2218
2219         case ANEG_STATE_ACK_DETECT_INIT:
2220                 ap->txconfig |= ANEG_CFG_ACK;
2221                 tw32(MAC_TX_AUTO_NEG, ap->txconfig);
2222                 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
2223                 tw32_f(MAC_MODE, tp->mac_mode);
2224                 udelay(40);
2225
2226                 ap->state = ANEG_STATE_ACK_DETECT;
2227
2228                 /* fallthru */
2229         case ANEG_STATE_ACK_DETECT:
2230                 if (ap->ack_match != 0) {
2231                         if ((ap->rxconfig & ~ANEG_CFG_ACK) ==
2232                             (ap->ability_match_cfg & ~ANEG_CFG_ACK)) {
2233                                 ap->state = ANEG_STATE_COMPLETE_ACK_INIT;
2234                         } else {
2235                                 ap->state = ANEG_STATE_AN_ENABLE;
2236                         }
2237                 } else if (ap->ability_match != 0 &&
2238                            ap->rxconfig == 0) {
2239                         ap->state = ANEG_STATE_AN_ENABLE;
2240                 }
2241                 break;
2242
2243         case ANEG_STATE_COMPLETE_ACK_INIT:
2244                 if (ap->rxconfig & ANEG_CFG_INVAL) {
2245                         ret = ANEG_FAILED;
2246                         break;
2247                 }
2248                 ap->flags &= ~(MR_LP_ADV_FULL_DUPLEX |
2249                                MR_LP_ADV_HALF_DUPLEX |
2250                                MR_LP_ADV_SYM_PAUSE |
2251                                MR_LP_ADV_ASYM_PAUSE |
2252                                MR_LP_ADV_REMOTE_FAULT1 |
2253                                MR_LP_ADV_REMOTE_FAULT2 |
2254                                MR_LP_ADV_NEXT_PAGE |
2255                                MR_TOGGLE_RX |
2256                                MR_NP_RX);
2257                 if (ap->rxconfig & ANEG_CFG_FD)
2258                         ap->flags |= MR_LP_ADV_FULL_DUPLEX;
2259                 if (ap->rxconfig & ANEG_CFG_HD)
2260                         ap->flags |= MR_LP_ADV_HALF_DUPLEX;
2261                 if (ap->rxconfig & ANEG_CFG_PS1)
2262                         ap->flags |= MR_LP_ADV_SYM_PAUSE;
2263                 if (ap->rxconfig & ANEG_CFG_PS2)
2264                         ap->flags |= MR_LP_ADV_ASYM_PAUSE;
2265                 if (ap->rxconfig & ANEG_CFG_RF1)
2266                         ap->flags |= MR_LP_ADV_REMOTE_FAULT1;
2267                 if (ap->rxconfig & ANEG_CFG_RF2)
2268                         ap->flags |= MR_LP_ADV_REMOTE_FAULT2;
2269                 if (ap->rxconfig & ANEG_CFG_NP)
2270                         ap->flags |= MR_LP_ADV_NEXT_PAGE;
2271
2272                 ap->link_time = ap->cur_time;
2273
2274                 ap->flags ^= (MR_TOGGLE_TX);
2275                 if (ap->rxconfig & 0x0008)
2276                         ap->flags |= MR_TOGGLE_RX;
2277                 if (ap->rxconfig & ANEG_CFG_NP)
2278                         ap->flags |= MR_NP_RX;
2279                 ap->flags |= MR_PAGE_RX;
2280
2281                 ap->state = ANEG_STATE_COMPLETE_ACK;
2282                 ret = ANEG_TIMER_ENAB;
2283                 break;
2284
2285         case ANEG_STATE_COMPLETE_ACK:
2286                 if (ap->ability_match != 0 &&
2287                     ap->rxconfig == 0) {
2288                         ap->state = ANEG_STATE_AN_ENABLE;
2289                         break;
2290                 }
2291                 delta = ap->cur_time - ap->link_time;
2292                 if (delta > ANEG_STATE_SETTLE_TIME) {
2293                         if (!(ap->flags & (MR_LP_ADV_NEXT_PAGE))) {
2294                                 ap->state = ANEG_STATE_IDLE_DETECT_INIT;
2295                         } else {
2296                                 if ((ap->txconfig & ANEG_CFG_NP) == 0 &&
2297                                     !(ap->flags & MR_NP_RX)) {
2298                                         ap->state = ANEG_STATE_IDLE_DETECT_INIT;
2299                                 } else {
2300                                         ret = ANEG_FAILED;
2301                                 }
2302                         }
2303                 }
2304                 break;
2305
2306         case ANEG_STATE_IDLE_DETECT_INIT:
2307                 ap->link_time = ap->cur_time;
2308                 tp->mac_mode &= ~MAC_MODE_SEND_CONFIGS;
2309                 tw32_f(MAC_MODE, tp->mac_mode);
2310                 udelay(40);
2311
2312                 ap->state = ANEG_STATE_IDLE_DETECT;
2313                 ret = ANEG_TIMER_ENAB;
2314                 break;
2315
2316         case ANEG_STATE_IDLE_DETECT:
2317                 if (ap->ability_match != 0 &&
2318                     ap->rxconfig == 0) {
2319                         ap->state = ANEG_STATE_AN_ENABLE;
2320                         break;
2321                 }
2322                 delta = ap->cur_time - ap->link_time;
2323                 if (delta > ANEG_STATE_SETTLE_TIME) {
2324                         /* XXX another gem from the Broadcom driver :( */
2325                         ap->state = ANEG_STATE_LINK_OK;
2326                 }
2327                 break;
2328
2329         case ANEG_STATE_LINK_OK:
2330                 ap->flags |= (MR_AN_COMPLETE | MR_LINK_OK);
2331                 ret = ANEG_DONE;
2332                 break;
2333
2334         case ANEG_STATE_NEXT_PAGE_WAIT_INIT:
2335                 /* ??? unimplemented */
2336                 break;
2337
2338         case ANEG_STATE_NEXT_PAGE_WAIT:
2339                 /* ??? unimplemented */
2340                 break;
2341
2342         default:
2343                 ret = ANEG_FAILED;
2344                 break;
2345         };
2346
2347         return ret;
2348 }
2349
2350 static int fiber_autoneg(struct tg3 *tp, u32 *flags)
2351 {
2352         int res = 0;
2353         struct tg3_fiber_aneginfo aninfo;
2354         int status = ANEG_FAILED;
2355         unsigned int tick;
2356         u32 tmp;
2357
2358         tw32_f(MAC_TX_AUTO_NEG, 0);
2359
2360         tmp = tp->mac_mode & ~MAC_MODE_PORT_MODE_MASK;
2361         tw32_f(MAC_MODE, tmp | MAC_MODE_PORT_MODE_GMII);
2362         udelay(40);
2363
2364         tw32_f(MAC_MODE, tp->mac_mode | MAC_MODE_SEND_CONFIGS);
2365         udelay(40);
2366
2367         memset(&aninfo, 0, sizeof(aninfo));
2368         aninfo.flags |= MR_AN_ENABLE;
2369         aninfo.state = ANEG_STATE_UNKNOWN;
2370         aninfo.cur_time = 0;
2371         tick = 0;
2372         while (++tick < 195000) {
2373                 status = tg3_fiber_aneg_smachine(tp, &aninfo);
2374                 if (status == ANEG_DONE || status == ANEG_FAILED)
2375                         break;
2376
2377                 udelay(1);
2378         }
2379
2380         tp->mac_mode &= ~MAC_MODE_SEND_CONFIGS;
2381         tw32_f(MAC_MODE, tp->mac_mode);
2382         udelay(40);
2383
2384         *flags = aninfo.flags;
2385
2386         if (status == ANEG_DONE &&
2387             (aninfo.flags & (MR_AN_COMPLETE | MR_LINK_OK |
2388                              MR_LP_ADV_FULL_DUPLEX)))
2389                 res = 1;
2390
2391         return res;
2392 }
2393
2394 static void tg3_init_bcm8002(struct tg3 *tp)
2395 {
2396         u32 mac_status = tr32(MAC_STATUS);
2397         int i;
2398
2399         /* Reset when initting first time or we have a link. */
2400         if ((tp->tg3_flags & TG3_FLAG_INIT_COMPLETE) &&
2401             !(mac_status & MAC_STATUS_PCS_SYNCED))
2402                 return;
2403
2404         /* Set PLL lock range. */
2405         tg3_writephy(tp, 0x16, 0x8007);
2406
2407         /* SW reset */
2408         tg3_writephy(tp, MII_BMCR, BMCR_RESET);
2409
2410         /* Wait for reset to complete. */
2411         /* XXX schedule_timeout() ... */
2412         for (i = 0; i < 500; i++)
2413                 udelay(10);
2414
2415         /* Config mode; select PMA/Ch 1 regs. */
2416         tg3_writephy(tp, 0x10, 0x8411);
2417
2418         /* Enable auto-lock and comdet, select txclk for tx. */
2419         tg3_writephy(tp, 0x11, 0x0a10);
2420
2421         tg3_writephy(tp, 0x18, 0x00a0);
2422         tg3_writephy(tp, 0x16, 0x41ff);
2423
2424         /* Assert and deassert POR. */
2425         tg3_writephy(tp, 0x13, 0x0400);
2426         udelay(40);
2427         tg3_writephy(tp, 0x13, 0x0000);
2428
2429         tg3_writephy(tp, 0x11, 0x0a50);
2430         udelay(40);
2431         tg3_writephy(tp, 0x11, 0x0a10);
2432
2433         /* Wait for signal to stabilize */
2434         /* XXX schedule_timeout() ... */
2435         for (i = 0; i < 15000; i++)
2436                 udelay(10);
2437
2438         /* Deselect the channel register so we can read the PHYID
2439          * later.
2440          */
2441         tg3_writephy(tp, 0x10, 0x8011);
2442 }
2443
2444 static int tg3_setup_fiber_hw_autoneg(struct tg3 *tp, u32 mac_status)
2445 {
2446         u32 sg_dig_ctrl, sg_dig_status;
2447         u32 serdes_cfg, expected_sg_dig_ctrl;
2448         int workaround, port_a;
2449         int current_link_up;
2450
2451         serdes_cfg = 0;
2452         expected_sg_dig_ctrl = 0;
2453         workaround = 0;
2454         port_a = 1;
2455         current_link_up = 0;
2456
2457         if (tp->pci_chip_rev_id != CHIPREV_ID_5704_A0 &&
2458             tp->pci_chip_rev_id != CHIPREV_ID_5704_A1) {
2459                 workaround = 1;
2460                 if (tr32(TG3PCI_DUAL_MAC_CTRL) & DUAL_MAC_CTRL_ID)
2461                         port_a = 0;
2462
2463                 /* preserve bits 0-11,13,14 for signal pre-emphasis */
2464                 /* preserve bits 20-23 for voltage regulator */
2465                 serdes_cfg = tr32(MAC_SERDES_CFG) & 0x00f06fff;
2466         }
2467
2468         sg_dig_ctrl = tr32(SG_DIG_CTRL);
2469
2470         if (tp->link_config.autoneg != AUTONEG_ENABLE) {
2471                 if (sg_dig_ctrl & (1 << 31)) {
2472                         if (workaround) {
2473                                 u32 val = serdes_cfg;
2474
2475                                 if (port_a)
2476                                         val |= 0xc010000;
2477                                 else
2478                                         val |= 0x4010000;
2479                                 tw32_f(MAC_SERDES_CFG, val);
2480                         }
2481                         tw32_f(SG_DIG_CTRL, 0x01388400);
2482                 }
2483                 if (mac_status & MAC_STATUS_PCS_SYNCED) {
2484                         tg3_setup_flow_control(tp, 0, 0);
2485                         current_link_up = 1;
2486                 }
2487                 goto out;
2488         }
2489
2490         /* Want auto-negotiation.  */
2491         expected_sg_dig_ctrl = 0x81388400;
2492
2493         /* Pause capability */
2494         expected_sg_dig_ctrl |= (1 << 11);
2495
2496         /* Asymettric pause */
2497         expected_sg_dig_ctrl |= (1 << 12);
2498
2499         if (sg_dig_ctrl != expected_sg_dig_ctrl) {
2500                 if ((tp->tg3_flags2 & TG3_FLG2_PARALLEL_DETECT) &&
2501                     tp->serdes_counter &&
2502                     ((mac_status & (MAC_STATUS_PCS_SYNCED |
2503                                     MAC_STATUS_RCVD_CFG)) ==
2504                      MAC_STATUS_PCS_SYNCED)) {
2505                         tp->serdes_counter--;
2506                         current_link_up = 1;
2507                         goto out;
2508                 }
2509 restart_autoneg:
2510                 if (workaround)
2511                         tw32_f(MAC_SERDES_CFG, serdes_cfg | 0xc011000);
2512                 tw32_f(SG_DIG_CTRL, expected_sg_dig_ctrl | (1 << 30));
2513                 udelay(5);
2514                 tw32_f(SG_DIG_CTRL, expected_sg_dig_ctrl);
2515
2516                 tp->serdes_counter = SERDES_AN_TIMEOUT_5704S;
2517                 tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
2518         } else if (mac_status & (MAC_STATUS_PCS_SYNCED |
2519                                  MAC_STATUS_SIGNAL_DET)) {
2520                 sg_dig_status = tr32(SG_DIG_STATUS);
2521                 mac_status = tr32(MAC_STATUS);
2522
2523                 if ((sg_dig_status & (1 << 1)) &&
2524                     (mac_status & MAC_STATUS_PCS_SYNCED)) {
2525                         u32 local_adv, remote_adv;
2526
2527                         local_adv = ADVERTISE_PAUSE_CAP;
2528                         remote_adv = 0;
2529                         if (sg_dig_status & (1 << 19))
2530                                 remote_adv |= LPA_PAUSE_CAP;
2531                         if (sg_dig_status & (1 << 20))
2532                                 remote_adv |= LPA_PAUSE_ASYM;
2533
2534                         tg3_setup_flow_control(tp, local_adv, remote_adv);
2535                         current_link_up = 1;
2536                         tp->serdes_counter = 0;
2537                         tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
2538                 } else if (!(sg_dig_status & (1 << 1))) {
2539                         if (tp->serdes_counter)
2540                                 tp->serdes_counter--;
2541                         else {
2542                                 if (workaround) {
2543                                         u32 val = serdes_cfg;
2544
2545                                         if (port_a)
2546                                                 val |= 0xc010000;
2547                                         else
2548                                                 val |= 0x4010000;
2549
2550                                         tw32_f(MAC_SERDES_CFG, val);
2551                                 }
2552
2553                                 tw32_f(SG_DIG_CTRL, 0x01388400);
2554                                 udelay(40);
2555
2556                                 /* Link parallel detection - link is up */
2557                                 /* only if we have PCS_SYNC and not */
2558                                 /* receiving config code words */
2559                                 mac_status = tr32(MAC_STATUS);
2560                                 if ((mac_status & MAC_STATUS_PCS_SYNCED) &&
2561                                     !(mac_status & MAC_STATUS_RCVD_CFG)) {
2562                                         tg3_setup_flow_control(tp, 0, 0);
2563                                         current_link_up = 1;
2564                                         tp->tg3_flags2 |=
2565                                                 TG3_FLG2_PARALLEL_DETECT;
2566                                         tp->serdes_counter =
2567                                                 SERDES_PARALLEL_DET_TIMEOUT;
2568                                 } else
2569                                         goto restart_autoneg;
2570                         }
2571                 }
2572         } else {
2573                 tp->serdes_counter = SERDES_AN_TIMEOUT_5704S;
2574                 tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
2575         }
2576
2577 out:
2578         return current_link_up;
2579 }
2580
2581 static int tg3_setup_fiber_by_hand(struct tg3 *tp, u32 mac_status)
2582 {
2583         int current_link_up = 0;
2584
2585         if (!(mac_status & MAC_STATUS_PCS_SYNCED)) {
2586                 tp->tg3_flags &= ~TG3_FLAG_GOT_SERDES_FLOWCTL;
2587                 goto out;
2588         }
2589
2590         if (tp->link_config.autoneg == AUTONEG_ENABLE) {
2591                 u32 flags;
2592                 int i;
2593
2594                 if (fiber_autoneg(tp, &flags)) {
2595                         u32 local_adv, remote_adv;
2596
2597                         local_adv = ADVERTISE_PAUSE_CAP;
2598                         remote_adv = 0;
2599                         if (flags & MR_LP_ADV_SYM_PAUSE)
2600                                 remote_adv |= LPA_PAUSE_CAP;
2601                         if (flags & MR_LP_ADV_ASYM_PAUSE)
2602                                 remote_adv |= LPA_PAUSE_ASYM;
2603
2604                         tg3_setup_flow_control(tp, local_adv, remote_adv);
2605
2606                         tp->tg3_flags |= TG3_FLAG_GOT_SERDES_FLOWCTL;
2607                         current_link_up = 1;
2608                 }
2609                 for (i = 0; i < 30; i++) {
2610                         udelay(20);
2611                         tw32_f(MAC_STATUS,
2612                                (MAC_STATUS_SYNC_CHANGED |
2613                                 MAC_STATUS_CFG_CHANGED));
2614                         udelay(40);
2615                         if ((tr32(MAC_STATUS) &
2616                              (MAC_STATUS_SYNC_CHANGED |
2617                               MAC_STATUS_CFG_CHANGED)) == 0)
2618                                 break;
2619                 }
2620
2621                 mac_status = tr32(MAC_STATUS);
2622                 if (current_link_up == 0 &&
2623                     (mac_status & MAC_STATUS_PCS_SYNCED) &&
2624                     !(mac_status & MAC_STATUS_RCVD_CFG))
2625                         current_link_up = 1;
2626         } else {
2627                 /* Forcing 1000FD link up. */
2628                 current_link_up = 1;
2629                 tp->tg3_flags |= TG3_FLAG_GOT_SERDES_FLOWCTL;
2630
2631                 tw32_f(MAC_MODE, (tp->mac_mode | MAC_MODE_SEND_CONFIGS));
2632                 udelay(40);
2633         }
2634
2635 out:
2636         return current_link_up;
2637 }
2638
2639 static int tg3_setup_fiber_phy(struct tg3 *tp, int force_reset)
2640 {
2641         u32 orig_pause_cfg;
2642         u16 orig_active_speed;
2643         u8 orig_active_duplex;
2644         u32 mac_status;
2645         int current_link_up;
2646         int i;
2647
2648         orig_pause_cfg =
2649                 (tp->tg3_flags & (TG3_FLAG_RX_PAUSE |
2650                                   TG3_FLAG_TX_PAUSE));
2651         orig_active_speed = tp->link_config.active_speed;
2652         orig_active_duplex = tp->link_config.active_duplex;
2653
2654         if (!(tp->tg3_flags2 & TG3_FLG2_HW_AUTONEG) &&
2655             netif_carrier_ok(tp->dev) &&
2656             (tp->tg3_flags & TG3_FLAG_INIT_COMPLETE)) {
2657                 mac_status = tr32(MAC_STATUS);
2658                 mac_status &= (MAC_STATUS_PCS_SYNCED |
2659                                MAC_STATUS_SIGNAL_DET |
2660                                MAC_STATUS_CFG_CHANGED |
2661                                MAC_STATUS_RCVD_CFG);
2662                 if (mac_status == (MAC_STATUS_PCS_SYNCED |
2663                                    MAC_STATUS_SIGNAL_DET)) {
2664                         tw32_f(MAC_STATUS, (MAC_STATUS_SYNC_CHANGED |
2665                                             MAC_STATUS_CFG_CHANGED));
2666                         return 0;
2667                 }
2668         }
2669
2670         tw32_f(MAC_TX_AUTO_NEG, 0);
2671
2672         tp->mac_mode &= ~(MAC_MODE_PORT_MODE_MASK | MAC_MODE_HALF_DUPLEX);
2673         tp->mac_mode |= MAC_MODE_PORT_MODE_TBI;
2674         tw32_f(MAC_MODE, tp->mac_mode);
2675         udelay(40);
2676
2677         if (tp->phy_id == PHY_ID_BCM8002)
2678                 tg3_init_bcm8002(tp);
2679
2680         /* Enable link change event even when serdes polling.  */
2681         tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
2682         udelay(40);
2683
2684         current_link_up = 0;
2685         mac_status = tr32(MAC_STATUS);
2686
2687         if (tp->tg3_flags2 & TG3_FLG2_HW_AUTONEG)
2688                 current_link_up = tg3_setup_fiber_hw_autoneg(tp, mac_status);
2689         else
2690                 current_link_up = tg3_setup_fiber_by_hand(tp, mac_status);
2691
2692         tp->mac_mode &= ~MAC_MODE_LINK_POLARITY;
2693         tw32_f(MAC_MODE, tp->mac_mode);
2694         udelay(40);
2695
2696         tp->hw_status->status =
2697                 (SD_STATUS_UPDATED |
2698                  (tp->hw_status->status & ~SD_STATUS_LINK_CHG));
2699
2700         for (i = 0; i < 100; i++) {
2701                 tw32_f(MAC_STATUS, (MAC_STATUS_SYNC_CHANGED |
2702                                     MAC_STATUS_CFG_CHANGED));
2703                 udelay(5);
2704                 if ((tr32(MAC_STATUS) & (MAC_STATUS_SYNC_CHANGED |
2705                                          MAC_STATUS_CFG_CHANGED |
2706                                          MAC_STATUS_LNKSTATE_CHANGED)) == 0)
2707                         break;
2708         }
2709
2710         mac_status = tr32(MAC_STATUS);
2711         if ((mac_status & MAC_STATUS_PCS_SYNCED) == 0) {
2712                 current_link_up = 0;
2713                 if (tp->link_config.autoneg == AUTONEG_ENABLE &&
2714                     tp->serdes_counter == 0) {
2715                         tw32_f(MAC_MODE, (tp->mac_mode |
2716                                           MAC_MODE_SEND_CONFIGS));
2717                         udelay(1);
2718                         tw32_f(MAC_MODE, tp->mac_mode);
2719                 }
2720         }
2721
2722         if (current_link_up == 1) {
2723                 tp->link_config.active_speed = SPEED_1000;
2724                 tp->link_config.active_duplex = DUPLEX_FULL;
2725                 tw32(MAC_LED_CTRL, (tp->led_ctrl |
2726                                     LED_CTRL_LNKLED_OVERRIDE |
2727                                     LED_CTRL_1000MBPS_ON));
2728         } else {
2729                 tp->link_config.active_speed = SPEED_INVALID;
2730                 tp->link_config.active_duplex = DUPLEX_INVALID;
2731                 tw32(MAC_LED_CTRL, (tp->led_ctrl |
2732                                     LED_CTRL_LNKLED_OVERRIDE |
2733                                     LED_CTRL_TRAFFIC_OVERRIDE));
2734         }
2735
2736         if (current_link_up != netif_carrier_ok(tp->dev)) {
2737                 if (current_link_up)
2738                         netif_carrier_on(tp->dev);
2739                 else
2740                         netif_carrier_off(tp->dev);
2741                 tg3_link_report(tp);
2742         } else {
2743                 u32 now_pause_cfg =
2744                         tp->tg3_flags & (TG3_FLAG_RX_PAUSE |
2745                                          TG3_FLAG_TX_PAUSE);
2746                 if (orig_pause_cfg != now_pause_cfg ||
2747                     orig_active_speed != tp->link_config.active_speed ||
2748                     orig_active_duplex != tp->link_config.active_duplex)
2749                         tg3_link_report(tp);
2750         }
2751
2752         return 0;
2753 }
2754
2755 static int tg3_setup_fiber_mii_phy(struct tg3 *tp, int force_reset)
2756 {
2757         int current_link_up, err = 0;
2758         u32 bmsr, bmcr;
2759         u16 current_speed;
2760         u8 current_duplex;
2761
2762         tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
2763         tw32_f(MAC_MODE, tp->mac_mode);
2764         udelay(40);
2765
2766         tw32(MAC_EVENT, 0);
2767
2768         tw32_f(MAC_STATUS,
2769              (MAC_STATUS_SYNC_CHANGED |
2770               MAC_STATUS_CFG_CHANGED |
2771               MAC_STATUS_MI_COMPLETION |
2772               MAC_STATUS_LNKSTATE_CHANGED));
2773         udelay(40);
2774
2775         if (force_reset)
2776                 tg3_phy_reset(tp);
2777
2778         current_link_up = 0;
2779         current_speed = SPEED_INVALID;
2780         current_duplex = DUPLEX_INVALID;
2781
2782         err |= tg3_readphy(tp, MII_BMSR, &bmsr);
2783         err |= tg3_readphy(tp, MII_BMSR, &bmsr);
2784         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) {
2785                 if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
2786                         bmsr |= BMSR_LSTATUS;
2787                 else
2788                         bmsr &= ~BMSR_LSTATUS;
2789         }
2790
2791         err |= tg3_readphy(tp, MII_BMCR, &bmcr);
2792
2793         if ((tp->link_config.autoneg == AUTONEG_ENABLE) && !force_reset &&
2794             (tp->tg3_flags2 & TG3_FLG2_PARALLEL_DETECT)) {
2795                 /* do nothing, just check for link up at the end */
2796         } else if (tp->link_config.autoneg == AUTONEG_ENABLE) {
2797                 u32 adv, new_adv;
2798
2799                 err |= tg3_readphy(tp, MII_ADVERTISE, &adv);
2800                 new_adv = adv & ~(ADVERTISE_1000XFULL | ADVERTISE_1000XHALF |
2801                                   ADVERTISE_1000XPAUSE |
2802                                   ADVERTISE_1000XPSE_ASYM |
2803                                   ADVERTISE_SLCT);
2804
2805                 /* Always advertise symmetric PAUSE just like copper */
2806                 new_adv |= ADVERTISE_1000XPAUSE;
2807
2808                 if (tp->link_config.advertising & ADVERTISED_1000baseT_Half)
2809                         new_adv |= ADVERTISE_1000XHALF;
2810                 if (tp->link_config.advertising & ADVERTISED_1000baseT_Full)
2811                         new_adv |= ADVERTISE_1000XFULL;
2812
2813                 if ((new_adv != adv) || !(bmcr & BMCR_ANENABLE)) {
2814                         tg3_writephy(tp, MII_ADVERTISE, new_adv);
2815                         bmcr |= BMCR_ANENABLE | BMCR_ANRESTART;
2816                         tg3_writephy(tp, MII_BMCR, bmcr);
2817
2818                         tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
2819                         tp->serdes_counter = SERDES_AN_TIMEOUT_5714S;
2820                         tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
2821
2822                         return err;
2823                 }
2824         } else {
2825                 u32 new_bmcr;
2826
2827                 bmcr &= ~BMCR_SPEED1000;
2828                 new_bmcr = bmcr & ~(BMCR_ANENABLE | BMCR_FULLDPLX);
2829
2830                 if (tp->link_config.duplex == DUPLEX_FULL)
2831                         new_bmcr |= BMCR_FULLDPLX;
2832
2833                 if (new_bmcr != bmcr) {
2834                         /* BMCR_SPEED1000 is a reserved bit that needs
2835                          * to be set on write.
2836                          */
2837                         new_bmcr |= BMCR_SPEED1000;
2838
2839                         /* Force a linkdown */
2840                         if (netif_carrier_ok(tp->dev)) {
2841                                 u32 adv;
2842
2843                                 err |= tg3_readphy(tp, MII_ADVERTISE, &adv);
2844                                 adv &= ~(ADVERTISE_1000XFULL |
2845                                          ADVERTISE_1000XHALF |
2846                                          ADVERTISE_SLCT);
2847                                 tg3_writephy(tp, MII_ADVERTISE, adv);
2848                                 tg3_writephy(tp, MII_BMCR, bmcr |
2849                                                            BMCR_ANRESTART |
2850                                                            BMCR_ANENABLE);
2851                                 udelay(10);
2852                                 netif_carrier_off(tp->dev);
2853                         }
2854                         tg3_writephy(tp, MII_BMCR, new_bmcr);
2855                         bmcr = new_bmcr;
2856                         err |= tg3_readphy(tp, MII_BMSR, &bmsr);
2857                         err |= tg3_readphy(tp, MII_BMSR, &bmsr);
2858                         if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
2859                             ASIC_REV_5714) {
2860                                 if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
2861                                         bmsr |= BMSR_LSTATUS;
2862                                 else
2863                                         bmsr &= ~BMSR_LSTATUS;
2864                         }
2865                         tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
2866                 }
2867         }
2868
2869         if (bmsr & BMSR_LSTATUS) {
2870                 current_speed = SPEED_1000;
2871                 current_link_up = 1;
2872                 if (bmcr & BMCR_FULLDPLX)
2873                         current_duplex = DUPLEX_FULL;
2874                 else
2875                         current_duplex = DUPLEX_HALF;
2876
2877                 if (bmcr & BMCR_ANENABLE) {
2878                         u32 local_adv, remote_adv, common;
2879
2880                         err |= tg3_readphy(tp, MII_ADVERTISE, &local_adv);
2881                         err |= tg3_readphy(tp, MII_LPA, &remote_adv);
2882                         common = local_adv & remote_adv;
2883                         if (common & (ADVERTISE_1000XHALF |
2884                                       ADVERTISE_1000XFULL)) {
2885                                 if (common & ADVERTISE_1000XFULL)
2886                                         current_duplex = DUPLEX_FULL;
2887                                 else
2888                                         current_duplex = DUPLEX_HALF;
2889
2890                                 tg3_setup_flow_control(tp, local_adv,
2891                                                        remote_adv);
2892                         }
2893                         else
2894                                 current_link_up = 0;
2895                 }
2896         }
2897
2898         tp->mac_mode &= ~MAC_MODE_HALF_DUPLEX;
2899         if (tp->link_config.active_duplex == DUPLEX_HALF)
2900                 tp->mac_mode |= MAC_MODE_HALF_DUPLEX;
2901
2902         tw32_f(MAC_MODE, tp->mac_mode);
2903         udelay(40);
2904
2905         tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
2906
2907         tp->link_config.active_speed = current_speed;
2908         tp->link_config.active_duplex = current_duplex;
2909
2910         if (current_link_up != netif_carrier_ok(tp->dev)) {
2911                 if (current_link_up)
2912                         netif_carrier_on(tp->dev);
2913                 else {
2914                         netif_carrier_off(tp->dev);
2915                         tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
2916                 }
2917                 tg3_link_report(tp);
2918         }
2919         return err;
2920 }
2921
2922 static void tg3_serdes_parallel_detect(struct tg3 *tp)
2923 {
2924         if (tp->serdes_counter) {
2925                 /* Give autoneg time to complete. */
2926                 tp->serdes_counter--;
2927                 return;
2928         }
2929         if (!netif_carrier_ok(tp->dev) &&
2930             (tp->link_config.autoneg == AUTONEG_ENABLE)) {
2931                 u32 bmcr;
2932
2933                 tg3_readphy(tp, MII_BMCR, &bmcr);
2934                 if (bmcr & BMCR_ANENABLE) {
2935                         u32 phy1, phy2;
2936
2937                         /* Select shadow register 0x1f */
2938                         tg3_writephy(tp, 0x1c, 0x7c00);
2939                         tg3_readphy(tp, 0x1c, &phy1);
2940
2941                         /* Select expansion interrupt status register */
2942                         tg3_writephy(tp, 0x17, 0x0f01);
2943                         tg3_readphy(tp, 0x15, &phy2);
2944                         tg3_readphy(tp, 0x15, &phy2);
2945
2946                         if ((phy1 & 0x10) && !(phy2 & 0x20)) {
2947                                 /* We have signal detect and not receiving
2948                                  * config code words, link is up by parallel
2949                                  * detection.
2950                                  */
2951
2952                                 bmcr &= ~BMCR_ANENABLE;
2953                                 bmcr |= BMCR_SPEED1000 | BMCR_FULLDPLX;
2954                                 tg3_writephy(tp, MII_BMCR, bmcr);
2955                                 tp->tg3_flags2 |= TG3_FLG2_PARALLEL_DETECT;
2956                         }
2957                 }
2958         }
2959         else if (netif_carrier_ok(tp->dev) &&
2960                  (tp->link_config.autoneg == AUTONEG_ENABLE) &&
2961                  (tp->tg3_flags2 & TG3_FLG2_PARALLEL_DETECT)) {
2962                 u32 phy2;
2963
2964                 /* Select expansion interrupt status register */
2965                 tg3_writephy(tp, 0x17, 0x0f01);
2966                 tg3_readphy(tp, 0x15, &phy2);
2967                 if (phy2 & 0x20) {
2968                         u32 bmcr;
2969
2970                         /* Config code words received, turn on autoneg. */
2971                         tg3_readphy(tp, MII_BMCR, &bmcr);
2972                         tg3_writephy(tp, MII_BMCR, bmcr | BMCR_ANENABLE);
2973
2974                         tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
2975
2976                 }
2977         }
2978 }
2979
2980 static int tg3_setup_phy(struct tg3 *tp, int force_reset)
2981 {
2982         int err;
2983
2984         if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) {
2985                 err = tg3_setup_fiber_phy(tp, force_reset);
2986         } else if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES) {
2987                 err = tg3_setup_fiber_mii_phy(tp, force_reset);
2988         } else {
2989                 err = tg3_setup_copper_phy(tp, force_reset);
2990         }
2991
2992         if (tp->link_config.active_speed == SPEED_1000 &&
2993             tp->link_config.active_duplex == DUPLEX_HALF)
2994                 tw32(MAC_TX_LENGTHS,
2995                      ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
2996                       (6 << TX_LENGTHS_IPG_SHIFT) |
2997                       (0xff << TX_LENGTHS_SLOT_TIME_SHIFT)));
2998         else
2999                 tw32(MAC_TX_LENGTHS,
3000                      ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
3001                       (6 << TX_LENGTHS_IPG_SHIFT) |
3002                       (32 << TX_LENGTHS_SLOT_TIME_SHIFT)));
3003
3004         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
3005                 if (netif_carrier_ok(tp->dev)) {
3006                         tw32(HOSTCC_STAT_COAL_TICKS,
3007                              tp->coal.stats_block_coalesce_usecs);
3008                 } else {
3009                         tw32(HOSTCC_STAT_COAL_TICKS, 0);
3010                 }
3011         }
3012
3013         return err;
3014 }
3015
3016 /* This is called whenever we suspect that the system chipset is re-
3017  * ordering the sequence of MMIO to the tx send mailbox. The symptom
3018  * is bogus tx completions. We try to recover by setting the
3019  * TG3_FLAG_MBOX_WRITE_REORDER flag and resetting the chip later
3020  * in the workqueue.
3021  */
3022 static void tg3_tx_recover(struct tg3 *tp)
3023 {
3024         BUG_ON((tp->tg3_flags & TG3_FLAG_MBOX_WRITE_REORDER) ||
3025                tp->write32_tx_mbox == tg3_write_indirect_mbox);
3026
3027         printk(KERN_WARNING PFX "%s: The system may be re-ordering memory-"
3028                "mapped I/O cycles to the network device, attempting to "
3029                "recover. Please report the problem to the driver maintainer "
3030                "and include system chipset information.\n", tp->dev->name);
3031
3032         spin_lock(&tp->lock);
3033         tp->tg3_flags |= TG3_FLAG_TX_RECOVERY_PENDING;
3034         spin_unlock(&tp->lock);
3035 }
3036
3037 static inline u32 tg3_tx_avail(struct tg3 *tp)
3038 {
3039         smp_mb();
3040         return (tp->tx_pending -
3041                 ((tp->tx_prod - tp->tx_cons) & (TG3_TX_RING_SIZE - 1)));
3042 }
3043
3044 /* Tigon3 never reports partial packet sends.  So we do not
3045  * need special logic to handle SKBs that have not had all
3046  * of their frags sent yet, like SunGEM does.
3047  */
3048 static void tg3_tx(struct tg3 *tp)
3049 {
3050         u32 hw_idx = tp->hw_status->idx[0].tx_consumer;
3051         u32 sw_idx = tp->tx_cons;
3052
3053         while (sw_idx != hw_idx) {
3054                 struct tx_ring_info *ri = &tp->tx_buffers[sw_idx];
3055                 struct sk_buff *skb = ri->skb;
3056                 int i, tx_bug = 0;
3057
3058                 if (unlikely(skb == NULL)) {
3059                         tg3_tx_recover(tp);
3060                         return;
3061                 }
3062
3063                 pci_unmap_single(tp->pdev,
3064                                  pci_unmap_addr(ri, mapping),
3065                                  skb_headlen(skb),
3066                                  PCI_DMA_TODEVICE);
3067
3068                 ri->skb = NULL;
3069
3070                 sw_idx = NEXT_TX(sw_idx);
3071
3072                 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
3073                         ri = &tp->tx_buffers[sw_idx];
3074                         if (unlikely(ri->skb != NULL || sw_idx == hw_idx))
3075                                 tx_bug = 1;
3076
3077                         pci_unmap_page(tp->pdev,
3078                                        pci_unmap_addr(ri, mapping),
3079                                        skb_shinfo(skb)->frags[i].size,
3080                                        PCI_DMA_TODEVICE);
3081
3082                         sw_idx = NEXT_TX(sw_idx);
3083                 }
3084
3085                 dev_kfree_skb(skb);
3086
3087                 if (unlikely(tx_bug)) {
3088                         tg3_tx_recover(tp);
3089                         return;
3090                 }
3091         }
3092
3093         tp->tx_cons = sw_idx;
3094
3095         /* Need to make the tx_cons update visible to tg3_start_xmit()
3096          * before checking for netif_queue_stopped().  Without the
3097          * memory barrier, there is a small possibility that tg3_start_xmit()
3098          * will miss it and cause the queue to be stopped forever.
3099          */
3100         smp_mb();
3101
3102         if (unlikely(netif_queue_stopped(tp->dev) &&
3103                      (tg3_tx_avail(tp) > TG3_TX_WAKEUP_THRESH(tp)))) {
3104                 netif_tx_lock(tp->dev);
3105                 if (netif_queue_stopped(tp->dev) &&
3106                     (tg3_tx_avail(tp) > TG3_TX_WAKEUP_THRESH(tp)))
3107                         netif_wake_queue(tp->dev);
3108                 netif_tx_unlock(tp->dev);
3109         }
3110 }
3111
3112 /* Returns size of skb allocated or < 0 on error.
3113  *
3114  * We only need to fill in the address because the other members
3115  * of the RX descriptor are invariant, see tg3_init_rings.
3116  *
3117  * Note the purposeful assymetry of cpu vs. chip accesses.  For
3118  * posting buffers we only dirty the first cache line of the RX
3119  * descriptor (containing the address).  Whereas for the RX status
3120  * buffers the cpu only reads the last cacheline of the RX descriptor
3121  * (to fetch the error flags, vlan tag, checksum, and opaque cookie).
3122  */
3123 static int tg3_alloc_rx_skb(struct tg3 *tp, u32 opaque_key,
3124                             int src_idx, u32 dest_idx_unmasked)
3125 {
3126         struct tg3_rx_buffer_desc *desc;
3127         struct ring_info *map, *src_map;
3128         struct sk_buff *skb;
3129         dma_addr_t mapping;
3130         int skb_size, dest_idx;
3131
3132         src_map = NULL;
3133         switch (opaque_key) {
3134         case RXD_OPAQUE_RING_STD:
3135                 dest_idx = dest_idx_unmasked % TG3_RX_RING_SIZE;
3136                 desc = &tp->rx_std[dest_idx];
3137                 map = &tp->rx_std_buffers[dest_idx];
3138                 if (src_idx >= 0)
3139                         src_map = &tp->rx_std_buffers[src_idx];
3140                 skb_size = tp->rx_pkt_buf_sz;
3141                 break;
3142
3143         case RXD_OPAQUE_RING_JUMBO:
3144                 dest_idx = dest_idx_unmasked % TG3_RX_JUMBO_RING_SIZE;
3145                 desc = &tp->rx_jumbo[dest_idx];
3146                 map = &tp->rx_jumbo_buffers[dest_idx];
3147                 if (src_idx >= 0)
3148                         src_map = &tp->rx_jumbo_buffers[src_idx];
3149                 skb_size = RX_JUMBO_PKT_BUF_SZ;
3150                 break;
3151
3152         default:
3153                 return -EINVAL;
3154         };
3155
3156         /* Do not overwrite any of the map or rp information
3157          * until we are sure we can commit to a new buffer.
3158          *
3159          * Callers depend upon this behavior and assume that
3160          * we leave everything unchanged if we fail.
3161          */
3162         skb = netdev_alloc_skb(tp->dev, skb_size);
3163         if (skb == NULL)
3164                 return -ENOMEM;
3165
3166         skb_reserve(skb, tp->rx_offset);
3167
3168         mapping = pci_map_single(tp->pdev, skb->data,
3169                                  skb_size - tp->rx_offset,
3170                                  PCI_DMA_FROMDEVICE);
3171
3172         map->skb = skb;
3173         pci_unmap_addr_set(map, mapping, mapping);
3174
3175         if (src_map != NULL)
3176                 src_map->skb = NULL;
3177
3178         desc->addr_hi = ((u64)mapping >> 32);
3179         desc->addr_lo = ((u64)mapping & 0xffffffff);
3180
3181         return skb_size;
3182 }
3183
3184 /* We only need to move over in the address because the other
3185  * members of the RX descriptor are invariant.  See notes above
3186  * tg3_alloc_rx_skb for full details.
3187  */
3188 static void tg3_recycle_rx(struct tg3 *tp, u32 opaque_key,
3189                            int src_idx, u32 dest_idx_unmasked)
3190 {
3191         struct tg3_rx_buffer_desc *src_desc, *dest_desc;
3192         struct ring_info *src_map, *dest_map;
3193         int dest_idx;
3194
3195         switch (opaque_key) {
3196         case RXD_OPAQUE_RING_STD:
3197                 dest_idx = dest_idx_unmasked % TG3_RX_RING_SIZE;
3198                 dest_desc = &tp->rx_std[dest_idx];
3199                 dest_map = &tp->rx_std_buffers[dest_idx];
3200                 src_desc = &tp->rx_std[src_idx];
3201                 src_map = &tp->rx_std_buffers[src_idx];
3202                 break;
3203
3204         case RXD_OPAQUE_RING_JUMBO:
3205                 dest_idx = dest_idx_unmasked % TG3_RX_JUMBO_RING_SIZE;
3206                 dest_desc = &tp->rx_jumbo[dest_idx];
3207                 dest_map = &tp->rx_jumbo_buffers[dest_idx];
3208                 src_desc = &tp->rx_jumbo[src_idx];
3209                 src_map = &tp->rx_jumbo_buffers[src_idx];
3210                 break;
3211
3212         default:
3213                 return;
3214         };
3215
3216         dest_map->skb = src_map->skb;
3217         pci_unmap_addr_set(dest_map, mapping,
3218                            pci_unmap_addr(src_map, mapping));
3219         dest_desc->addr_hi = src_desc->addr_hi;
3220         dest_desc->addr_lo = src_desc->addr_lo;
3221
3222         src_map->skb = NULL;
3223 }
3224
3225 #if TG3_VLAN_TAG_USED
3226 static int tg3_vlan_rx(struct tg3 *tp, struct sk_buff *skb, u16 vlan_tag)
3227 {
3228         return vlan_hwaccel_receive_skb(skb, tp->vlgrp, vlan_tag);
3229 }
3230 #endif
3231
3232 /* The RX ring scheme is composed of multiple rings which post fresh
3233  * buffers to the chip, and one special ring the chip uses to report
3234  * status back to the host.
3235  *
3236  * The special ring reports the status of received packets to the
3237  * host.  The chip does not write into the original descriptor the
3238  * RX buffer was obtained from.  The chip simply takes the original
3239  * descriptor as provided by the host, updates the status and length
3240  * field, then writes this into the next status ring entry.
3241  *
3242  * Each ring the host uses to post buffers to the chip is described
3243  * by a TG3_BDINFO entry in the chips SRAM area.  When a packet arrives,
3244  * it is first placed into the on-chip ram.  When the packet's length
3245  * is known, it walks down the TG3_BDINFO entries to select the ring.
3246  * Each TG3_BDINFO specifies a MAXLEN field and the first TG3_BDINFO
3247  * which is within the range of the new packet's length is chosen.
3248  *
3249  * The "separate ring for rx status" scheme may sound queer, but it makes
3250  * sense from a cache coherency perspective.  If only the host writes
3251  * to the buffer post rings, and only the chip writes to the rx status
3252  * rings, then cache lines never move beyond shared-modified state.
3253  * If both the host and chip were to write into the same ring, cache line
3254  * eviction could occur since both entities want it in an exclusive state.
3255  */
3256 static int tg3_rx(struct tg3 *tp, int budget)
3257 {
3258         u32 work_mask, rx_std_posted = 0;
3259         u32 sw_idx = tp->rx_rcb_ptr;
3260         u16 hw_idx;
3261         int received;
3262
3263         hw_idx = tp->hw_status->idx[0].rx_producer;
3264         /*
3265          * We need to order the read of hw_idx and the read of
3266          * the opaque cookie.
3267          */
3268         rmb();
3269         work_mask = 0;
3270         received = 0;
3271         while (sw_idx != hw_idx && budget > 0) {
3272                 struct tg3_rx_buffer_desc *desc = &tp->rx_rcb[sw_idx];
3273                 unsigned int len;
3274                 struct sk_buff *skb;
3275                 dma_addr_t dma_addr;
3276                 u32 opaque_key, desc_idx, *post_ptr;
3277
3278                 desc_idx = desc->opaque & RXD_OPAQUE_INDEX_MASK;
3279                 opaque_key = desc->opaque & RXD_OPAQUE_RING_MASK;
3280                 if (opaque_key == RXD_OPAQUE_RING_STD) {
3281                         dma_addr = pci_unmap_addr(&tp->rx_std_buffers[desc_idx],
3282                                                   mapping);
3283                         skb = tp->rx_std_buffers[desc_idx].skb;
3284                         post_ptr = &tp->rx_std_ptr;
3285                         rx_std_posted++;
3286                 } else if (opaque_key == RXD_OPAQUE_RING_JUMBO) {
3287                         dma_addr = pci_unmap_addr(&tp->rx_jumbo_buffers[desc_idx],
3288                                                   mapping);
3289                         skb = tp->rx_jumbo_buffers[desc_idx].skb;
3290                         post_ptr = &tp->rx_jumbo_ptr;
3291                 }
3292                 else {
3293                         goto next_pkt_nopost;
3294                 }
3295
3296                 work_mask |= opaque_key;
3297
3298                 if ((desc->err_vlan & RXD_ERR_MASK) != 0 &&
3299                     (desc->err_vlan != RXD_ERR_ODD_NIBBLE_RCVD_MII)) {
3300                 drop_it:
3301                         tg3_recycle_rx(tp, opaque_key,
3302                                        desc_idx, *post_ptr);
3303                 drop_it_no_recycle:
3304                         /* Other statistics kept track of by card. */
3305                         tp->net_stats.rx_dropped++;
3306                         goto next_pkt;
3307                 }
3308
3309                 len = ((desc->idx_len & RXD_LEN_MASK) >> RXD_LEN_SHIFT) - 4; /* omit crc */
3310
3311                 if (len > RX_COPY_THRESHOLD
3312                         && tp->rx_offset == 2
3313                         /* rx_offset != 2 iff this is a 5701 card running
3314                          * in PCI-X mode [see tg3_get_invariants()] */
3315                 ) {
3316                         int skb_size;
3317
3318                         skb_size = tg3_alloc_rx_skb(tp, opaque_key,
3319                                                     desc_idx, *post_ptr);
3320                         if (skb_size < 0)
3321                                 goto drop_it;
3322
3323                         pci_unmap_single(tp->pdev, dma_addr,
3324                                          skb_size - tp->rx_offset,
3325                                          PCI_DMA_FROMDEVICE);
3326
3327                         skb_put(skb, len);
3328                 } else {
3329                         struct sk_buff *copy_skb;
3330
3331                         tg3_recycle_rx(tp, opaque_key,
3332                                        desc_idx, *post_ptr);
3333
3334                         copy_skb = netdev_alloc_skb(tp->dev, len + 2);
3335                         if (copy_skb == NULL)
3336                                 goto drop_it_no_recycle;
3337
3338                         skb_reserve(copy_skb, 2);
3339                         skb_put(copy_skb, len);
3340                         pci_dma_sync_single_for_cpu(tp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE);
3341                         memcpy(copy_skb->data, skb->data, len);
3342                         pci_dma_sync_single_for_device(tp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE);
3343
3344                         /* We'll reuse the original ring buffer. */
3345                         skb = copy_skb;
3346                 }
3347
3348                 if ((tp->tg3_flags & TG3_FLAG_RX_CHECKSUMS) &&
3349                     (desc->type_flags & RXD_FLAG_TCPUDP_CSUM) &&
3350                     (((desc->ip_tcp_csum & RXD_TCPCSUM_MASK)
3351                       >> RXD_TCPCSUM_SHIFT) == 0xffff))
3352                         skb->ip_summed = CHECKSUM_UNNECESSARY;
3353                 else
3354                         skb->ip_summed = CHECKSUM_NONE;
3355
3356                 skb->protocol = eth_type_trans(skb, tp->dev);
3357 #if TG3_VLAN_TAG_USED
3358                 if (tp->vlgrp != NULL &&
3359                     desc->type_flags & RXD_FLAG_VLAN) {
3360                         tg3_vlan_rx(tp, skb,
3361                                     desc->err_vlan & RXD_VLAN_MASK);
3362                 } else
3363 #endif
3364                         netif_receive_skb(skb);
3365
3366                 tp->dev->last_rx = jiffies;
3367                 received++;
3368                 budget--;
3369
3370 next_pkt:
3371                 (*post_ptr)++;
3372
3373                 if (unlikely(rx_std_posted >= tp->rx_std_max_post)) {
3374                         u32 idx = *post_ptr % TG3_RX_RING_SIZE;
3375
3376                         tw32_rx_mbox(MAILBOX_RCV_STD_PROD_IDX +
3377                                      TG3_64BIT_REG_LOW, idx);
3378                         work_mask &= ~RXD_OPAQUE_RING_STD;
3379                         rx_std_posted = 0;
3380                 }
3381 next_pkt_nopost:
3382                 sw_idx++;
3383                 sw_idx &= (TG3_RX_RCB_RING_SIZE(tp) - 1);
3384
3385                 /* Refresh hw_idx to see if there is new work */
3386                 if (sw_idx == hw_idx) {
3387                         hw_idx = tp->hw_status->idx[0].rx_producer;
3388                         rmb();
3389                 }
3390         }
3391
3392         /* ACK the status ring. */
3393         tp->rx_rcb_ptr = sw_idx;
3394         tw32_rx_mbox(MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW, sw_idx);
3395
3396         /* Refill RX ring(s). */
3397         if (work_mask & RXD_OPAQUE_RING_STD) {
3398                 sw_idx = tp->rx_std_ptr % TG3_RX_RING_SIZE;
3399                 tw32_rx_mbox(MAILBOX_RCV_STD_PROD_IDX + TG3_64BIT_REG_LOW,
3400                              sw_idx);
3401         }
3402         if (work_mask & RXD_OPAQUE_RING_JUMBO) {
3403                 sw_idx = tp->rx_jumbo_ptr % TG3_RX_JUMBO_RING_SIZE;
3404                 tw32_rx_mbox(MAILBOX_RCV_JUMBO_PROD_IDX + TG3_64BIT_REG_LOW,
3405                              sw_idx);
3406         }
3407         mmiowb();
3408
3409         return received;
3410 }
3411
3412 static int tg3_poll(struct net_device *netdev, int *budget)
3413 {
3414         struct tg3 *tp = netdev_priv(netdev);
3415         struct tg3_hw_status *sblk = tp->hw_status;
3416         int done;
3417
3418         /* handle link change and other phy events */
3419         if (!(tp->tg3_flags &
3420               (TG3_FLAG_USE_LINKCHG_REG |
3421                TG3_FLAG_POLL_SERDES))) {
3422                 if (sblk->status & SD_STATUS_LINK_CHG) {
3423                         sblk->status = SD_STATUS_UPDATED |
3424                                 (sblk->status & ~SD_STATUS_LINK_CHG);
3425                         spin_lock(&tp->lock);
3426                         tg3_setup_phy(tp, 0);
3427                         spin_unlock(&tp->lock);
3428                 }
3429         }
3430
3431         /* run TX completion thread */
3432         if (sblk->idx[0].tx_consumer != tp->tx_cons) {
3433                 tg3_tx(tp);
3434                 if (unlikely(tp->tg3_flags & TG3_FLAG_TX_RECOVERY_PENDING)) {
3435                         netif_rx_complete(netdev);
3436                         schedule_work(&tp->reset_task);
3437                         return 0;
3438                 }
3439         }
3440
3441         /* run RX thread, within the bounds set by NAPI.
3442          * All RX "locking" is done by ensuring outside
3443          * code synchronizes with dev->poll()
3444          */
3445         if (sblk->idx[0].rx_producer != tp->rx_rcb_ptr) {
3446                 int orig_budget = *budget;
3447                 int work_done;
3448
3449                 if (orig_budget > netdev->quota)
3450                         orig_budget = netdev->quota;
3451
3452                 work_done = tg3_rx(tp, orig_budget);
3453
3454                 *budget -= work_done;
3455                 netdev->quota -= work_done;
3456         }
3457
3458         if (tp->tg3_flags & TG3_FLAG_TAGGED_STATUS) {
3459                 tp->last_tag = sblk->status_tag;
3460                 rmb();
3461         } else
3462                 sblk->status &= ~SD_STATUS_UPDATED;
3463
3464         /* if no more work, tell net stack and NIC we're done */
3465         done = !tg3_has_work(tp);
3466         if (done) {
3467                 netif_rx_complete(netdev);
3468                 tg3_restart_ints(tp);
3469         }
3470
3471         return (done ? 0 : 1);
3472 }
3473
3474 static void tg3_irq_quiesce(struct tg3 *tp)
3475 {
3476         BUG_ON(tp->irq_sync);
3477
3478         tp->irq_sync = 1;
3479         smp_mb();
3480
3481         synchronize_irq(tp->pdev->irq);
3482 }
3483
3484 static inline int tg3_irq_sync(struct tg3 *tp)
3485 {
3486         return tp->irq_sync;
3487 }
3488
3489 /* Fully shutdown all tg3 driver activity elsewhere in the system.
3490  * If irq_sync is non-zero, then the IRQ handler must be synchronized
3491  * with as well.  Most of the time, this is not necessary except when
3492  * shutting down the device.
3493  */
3494 static inline void tg3_full_lock(struct tg3 *tp, int irq_sync)
3495 {
3496         if (irq_sync)
3497                 tg3_irq_quiesce(tp);
3498         spin_lock_bh(&tp->lock);
3499 }
3500
3501 static inline void tg3_full_unlock(struct tg3 *tp)
3502 {
3503         spin_unlock_bh(&tp->lock);
3504 }
3505
3506 /* One-shot MSI handler - Chip automatically disables interrupt
3507  * after sending MSI so driver doesn't have to do it.
3508  */
3509 static irqreturn_t tg3_msi_1shot(int irq, void *dev_id)
3510 {
3511         struct net_device *dev = dev_id;
3512         struct tg3 *tp = netdev_priv(dev);
3513
3514         prefetch(tp->hw_status);
3515         prefetch(&tp->rx_rcb[tp->rx_rcb_ptr]);
3516
3517         if (likely(!tg3_irq_sync(tp)))
3518                 netif_rx_schedule(dev);         /* schedule NAPI poll */
3519
3520         return IRQ_HANDLED;
3521 }
3522
3523 /* MSI ISR - No need to check for interrupt sharing and no need to
3524  * flush status block and interrupt mailbox. PCI ordering rules
3525  * guarantee that MSI will arrive after the status block.
3526  */
3527 static irqreturn_t tg3_msi(int irq, void *dev_id)
3528 {
3529         struct net_device *dev = dev_id;
3530         struct tg3 *tp = netdev_priv(dev);
3531
3532         prefetch(tp->hw_status);
3533         prefetch(&tp->rx_rcb[tp->rx_rcb_ptr]);
3534         /*
3535          * Writing any value to intr-mbox-0 clears PCI INTA# and
3536          * chip-internal interrupt pending events.
3537          * Writing non-zero to intr-mbox-0 additional tells the
3538          * NIC to stop sending us irqs, engaging "in-intr-handler"
3539          * event coalescing.
3540          */
3541         tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
3542         if (likely(!tg3_irq_sync(tp)))
3543                 netif_rx_schedule(dev);         /* schedule NAPI poll */
3544
3545         return IRQ_RETVAL(1);
3546 }
3547
3548 static irqreturn_t tg3_interrupt(int irq, void *dev_id)
3549 {
3550         struct net_device *dev = dev_id;
3551         struct tg3 *tp = netdev_priv(dev);
3552         struct tg3_hw_status *sblk = tp->hw_status;
3553         unsigned int handled = 1;
3554
3555         /* In INTx mode, it is possible for the interrupt to arrive at
3556          * the CPU before the status block posted prior to the interrupt.
3557          * Reading the PCI State register will confirm whether the
3558          * interrupt is ours and will flush the status block.
3559          */
3560         if ((sblk->status & SD_STATUS_UPDATED) ||
3561             !(tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
3562                 /*
3563                  * Writing any value to intr-mbox-0 clears PCI INTA# and
3564                  * chip-internal interrupt pending events.
3565                  * Writing non-zero to intr-mbox-0 additional tells the
3566                  * NIC to stop sending us irqs, engaging "in-intr-handler"
3567                  * event coalescing.
3568                  */
3569                 tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
3570                              0x00000001);
3571                 if (tg3_irq_sync(tp))
3572                         goto out;
3573                 sblk->status &= ~SD_STATUS_UPDATED;
3574                 if (likely(tg3_has_work(tp))) {
3575                         prefetch(&tp->rx_rcb[tp->rx_rcb_ptr]);
3576                         netif_rx_schedule(dev);         /* schedule NAPI poll */
3577                 } else {
3578                         /* No work, shared interrupt perhaps?  re-enable
3579                          * interrupts, and flush that PCI write
3580                          */
3581                         tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
3582                                 0x00000000);
3583                 }
3584         } else {        /* shared interrupt */
3585                 handled = 0;
3586         }
3587 out:
3588         return IRQ_RETVAL(handled);
3589 }
3590
3591 static irqreturn_t tg3_interrupt_tagged(int irq, void *dev_id)
3592 {
3593         struct net_device *dev = dev_id;
3594         struct tg3 *tp = netdev_priv(dev);
3595         struct tg3_hw_status *sblk = tp->hw_status;
3596         unsigned int handled = 1;
3597
3598         /* In INTx mode, it is possible for the interrupt to arrive at
3599          * the CPU before the status block posted prior to the interrupt.
3600          * Reading the PCI State register will confirm whether the
3601          * interrupt is ours and will flush the status block.
3602          */
3603         if ((sblk->status_tag != tp->last_tag) ||
3604             !(tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
3605                 /*
3606                  * writing any value to intr-mbox-0 clears PCI INTA# and
3607                  * chip-internal interrupt pending events.
3608                  * writing non-zero to intr-mbox-0 additional tells the
3609                  * NIC to stop sending us irqs, engaging "in-intr-handler"
3610                  * event coalescing.
3611                  */
3612                 tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
3613                              0x00000001);
3614                 if (tg3_irq_sync(tp))
3615                         goto out;
3616                 if (netif_rx_schedule_prep(dev)) {
3617                         prefetch(&tp->rx_rcb[tp->rx_rcb_ptr]);
3618                         /* Update last_tag to mark that this status has been
3619                          * seen. Because interrupt may be shared, we may be
3620                          * racing with tg3_poll(), so only update last_tag
3621                          * if tg3_poll() is not scheduled.
3622                          */
3623                         tp->last_tag = sblk->status_tag;
3624                         __netif_rx_schedule(dev);
3625                 }
3626         } else {        /* shared interrupt */
3627                 handled = 0;
3628         }
3629 out:
3630         return IRQ_RETVAL(handled);
3631 }
3632
3633 /* ISR for interrupt test */
3634 static irqreturn_t tg3_test_isr(int irq, void *dev_id)
3635 {
3636         struct net_device *dev = dev_id;
3637         struct tg3 *tp = netdev_priv(dev);
3638         struct tg3_hw_status *sblk = tp->hw_status;
3639
3640         if ((sblk->status & SD_STATUS_UPDATED) ||
3641             !(tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
3642                 tg3_disable_ints(tp);
3643                 return IRQ_RETVAL(1);
3644         }
3645         return IRQ_RETVAL(0);
3646 }
3647
3648 static int tg3_init_hw(struct tg3 *, int);
3649 static int tg3_halt(struct tg3 *, int, int);
3650
3651 /* Restart hardware after configuration changes, self-test, etc.
3652  * Invoked with tp->lock held.
3653  */
3654 static int tg3_restart_hw(struct tg3 *tp, int reset_phy)
3655 {
3656         int err;
3657
3658         err = tg3_init_hw(tp, reset_phy);
3659         if (err) {
3660                 printk(KERN_ERR PFX "%s: Failed to re-initialize device, "
3661                        "aborting.\n", tp->dev->name);
3662                 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
3663                 tg3_full_unlock(tp);
3664                 del_timer_sync(&tp->timer);
3665                 tp->irq_sync = 0;
3666                 netif_poll_enable(tp->dev);
3667                 dev_close(tp->dev);
3668                 tg3_full_lock(tp, 0);
3669         }
3670         return err;
3671 }
3672
3673 #ifdef CONFIG_NET_POLL_CONTROLLER
3674 static void tg3_poll_controller(struct net_device *dev)
3675 {
3676         struct tg3 *tp = netdev_priv(dev);
3677
3678         tg3_interrupt(tp->pdev->irq, dev);
3679 }
3680 #endif
3681
3682 static void tg3_reset_task(struct work_struct *work)
3683 {
3684         struct tg3 *tp = container_of(work, struct tg3, reset_task);
3685         unsigned int restart_timer;
3686
3687         tg3_full_lock(tp, 0);
3688         tp->tg3_flags |= TG3_FLAG_IN_RESET_TASK;
3689
3690         if (!netif_running(tp->dev)) {
3691                 tp->tg3_flags &= ~TG3_FLAG_IN_RESET_TASK;
3692                 tg3_full_unlock(tp);
3693                 return;
3694         }
3695
3696         tg3_full_unlock(tp);
3697
3698         tg3_netif_stop(tp);
3699
3700         tg3_full_lock(tp, 1);
3701
3702         restart_timer = tp->tg3_flags2 & TG3_FLG2_RESTART_TIMER;
3703         tp->tg3_flags2 &= ~TG3_FLG2_RESTART_TIMER;
3704
3705         if (tp->tg3_flags & TG3_FLAG_TX_RECOVERY_PENDING) {
3706                 tp->write32_tx_mbox = tg3_write32_tx_mbox;
3707                 tp->write32_rx_mbox = tg3_write_flush_reg32;
3708                 tp->tg3_flags |= TG3_FLAG_MBOX_WRITE_REORDER;
3709                 tp->tg3_flags &= ~TG3_FLAG_TX_RECOVERY_PENDING;
3710         }
3711
3712         tg3_halt(tp, RESET_KIND_SHUTDOWN, 0);
3713         if (tg3_init_hw(tp, 1))
3714                 goto out;
3715
3716         tg3_netif_start(tp);
3717
3718         if (restart_timer)
3719                 mod_timer(&tp->timer, jiffies + 1);
3720
3721 out:
3722         tp->tg3_flags &= ~TG3_FLAG_IN_RESET_TASK;
3723
3724         tg3_full_unlock(tp);
3725 }
3726
3727 static void tg3_tx_timeout(struct net_device *dev)
3728 {
3729         struct tg3 *tp = netdev_priv(dev);
3730
3731         if (netif_msg_tx_err(tp))
3732                 printk(KERN_ERR PFX "%s: transmit timed out, resetting\n",
3733                        dev->name);
3734
3735         schedule_work(&tp->reset_task);
3736 }
3737
3738 /* Test for DMA buffers crossing any 4GB boundaries: 4G, 8G, etc */
3739 static inline int tg3_4g_overflow_test(dma_addr_t mapping, int len)
3740 {
3741         u32 base = (u32) mapping & 0xffffffff;
3742
3743         return ((base > 0xffffdcc0) &&
3744                 (base + len + 8 < base));
3745 }
3746
3747 /* Test for DMA addresses > 40-bit */
3748 static inline int tg3_40bit_overflow_test(struct tg3 *tp, dma_addr_t mapping,
3749                                           int len)
3750 {
3751 #if defined(CONFIG_HIGHMEM) && (BITS_PER_LONG == 64)
3752         if (tp->tg3_flags & TG3_FLAG_40BIT_DMA_BUG)
3753                 return (((u64) mapping + len) > DMA_40BIT_MASK);
3754         return 0;
3755 #else
3756         return 0;
3757 #endif
3758 }
3759
3760 static void tg3_set_txd(struct tg3 *, int, dma_addr_t, int, u32, u32);
3761
3762 /* Workaround 4GB and 40-bit hardware DMA bugs. */
3763 static int tigon3_dma_hwbug_workaround(struct tg3 *tp, struct sk_buff *skb,
3764                                        u32 last_plus_one, u32 *start,
3765                                        u32 base_flags, u32 mss)
3766 {
3767         struct sk_buff *new_skb = skb_copy(skb, GFP_ATOMIC);
3768         dma_addr_t new_addr = 0;
3769         u32 entry = *start;
3770         int i, ret = 0;
3771
3772         if (!new_skb) {
3773                 ret = -1;
3774         } else {
3775                 /* New SKB is guaranteed to be linear. */
3776                 entry = *start;
3777                 new_addr = pci_map_single(tp->pdev, new_skb->data, new_skb->len,
3778                                           PCI_DMA_TODEVICE);
3779                 /* Make sure new skb does not cross any 4G boundaries.
3780                  * Drop the packet if it does.
3781                  */
3782                 if (tg3_4g_overflow_test(new_addr, new_skb->len)) {
3783                         ret = -1;
3784                         dev_kfree_skb(new_skb);
3785                         new_skb = NULL;
3786                 } else {
3787                         tg3_set_txd(tp, entry, new_addr, new_skb->len,
3788                                     base_flags, 1 | (mss << 1));
3789                         *start = NEXT_TX(entry);
3790                 }
3791         }
3792
3793         /* Now clean up the sw ring entries. */
3794         i = 0;
3795         while (entry != last_plus_one) {
3796                 int len;
3797
3798                 if (i == 0)
3799                         len = skb_headlen(skb);
3800                 else
3801                         len = skb_shinfo(skb)->frags[i-1].size;
3802                 pci_unmap_single(tp->pdev,
3803                                  pci_unmap_addr(&tp->tx_buffers[entry], mapping),
3804                                  len, PCI_DMA_TODEVICE);
3805                 if (i == 0) {
3806                         tp->tx_buffers[entry].skb = new_skb;
3807                         pci_unmap_addr_set(&tp->tx_buffers[entry], mapping, new_addr);
3808                 } else {
3809                         tp->tx_buffers[entry].skb = NULL;
3810                 }
3811                 entry = NEXT_TX(entry);
3812                 i++;
3813         }
3814
3815         dev_kfree_skb(skb);
3816
3817         return ret;
3818 }
3819
3820 static void tg3_set_txd(struct tg3 *tp, int entry,
3821                         dma_addr_t mapping, int len, u32 flags,
3822                         u32 mss_and_is_end)
3823 {
3824         struct tg3_tx_buffer_desc *txd = &tp->tx_ring[entry];
3825         int is_end = (mss_and_is_end & 0x1);
3826         u32 mss = (mss_and_is_end >> 1);
3827         u32 vlan_tag = 0;
3828
3829         if (is_end)
3830                 flags |= TXD_FLAG_END;
3831         if (flags & TXD_FLAG_VLAN) {
3832                 vlan_tag = flags >> 16;
3833                 flags &= 0xffff;
3834         }
3835         vlan_tag |= (mss << TXD_MSS_SHIFT);
3836
3837         txd->addr_hi = ((u64) mapping >> 32);
3838         txd->addr_lo = ((u64) mapping & 0xffffffff);
3839         txd->len_flags = (len << TXD_LEN_SHIFT) | flags;
3840         txd->vlan_tag = vlan_tag << TXD_VLAN_TAG_SHIFT;
3841 }
3842
3843 /* hard_start_xmit for devices that don't have any bugs and
3844  * support TG3_FLG2_HW_TSO_2 only.
3845  */
3846 static int tg3_start_xmit(struct sk_buff *skb, struct net_device *dev)
3847 {
3848         struct tg3 *tp = netdev_priv(dev);
3849         dma_addr_t mapping;
3850         u32 len, entry, base_flags, mss;
3851
3852         len = skb_headlen(skb);
3853
3854         /* We are running in BH disabled context with netif_tx_lock
3855          * and TX reclaim runs via tp->poll inside of a software
3856          * interrupt.  Furthermore, IRQ processing runs lockless so we have
3857          * no IRQ context deadlocks to worry about either.  Rejoice!
3858          */
3859         if (unlikely(tg3_tx_avail(tp) <= (skb_shinfo(skb)->nr_frags + 1))) {
3860                 if (!netif_queue_stopped(dev)) {
3861                         netif_stop_queue(dev);
3862
3863                         /* This is a hard error, log it. */
3864                         printk(KERN_ERR PFX "%s: BUG! Tx Ring full when "
3865                                "queue awake!\n", dev->name);
3866                 }
3867                 return NETDEV_TX_BUSY;
3868         }
3869
3870         entry = tp->tx_prod;
3871         base_flags = 0;
3872         mss = 0;
3873         if (skb->len > (tp->dev->mtu + ETH_HLEN) &&
3874             (mss = skb_shinfo(skb)->gso_size) != 0) {
3875                 int tcp_opt_len, ip_tcp_len;
3876
3877                 if (skb_header_cloned(skb) &&
3878                     pskb_expand_head(skb, 0, 0, GFP_ATOMIC)) {
3879                         dev_kfree_skb(skb);
3880                         goto out_unlock;
3881                 }
3882
3883                 if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6)
3884                         mss |= (skb_headlen(skb) - ETH_HLEN) << 9;
3885                 else {
3886                         tcp_opt_len = ((skb->h.th->doff - 5) * 4);
3887                         ip_tcp_len = (skb->nh.iph->ihl * 4) +
3888                                      sizeof(struct tcphdr);
3889
3890                         skb->nh.iph->check = 0;
3891                         skb->nh.iph->tot_len = htons(mss + ip_tcp_len +
3892                                                      tcp_opt_len);
3893                         mss |= (ip_tcp_len + tcp_opt_len) << 9;
3894                 }
3895
3896                 base_flags |= (TXD_FLAG_CPU_PRE_DMA |
3897                                TXD_FLAG_CPU_POST_DMA);
3898
3899                 skb->h.th->check = 0;
3900
3901         }
3902         else if (skb->ip_summed == CHECKSUM_PARTIAL)
3903                 base_flags |= TXD_FLAG_TCPUDP_CSUM;
3904 #if TG3_VLAN_TAG_USED
3905         if (tp->vlgrp != NULL && vlan_tx_tag_present(skb))
3906                 base_flags |= (TXD_FLAG_VLAN |
3907                                (vlan_tx_tag_get(skb) << 16));
3908 #endif
3909
3910         /* Queue skb data, a.k.a. the main skb fragment. */
3911         mapping = pci_map_single(tp->pdev, skb->data, len, PCI_DMA_TODEVICE);
3912
3913         tp->tx_buffers[entry].skb = skb;
3914         pci_unmap_addr_set(&tp->tx_buffers[entry], mapping, mapping);
3915
3916         tg3_set_txd(tp, entry, mapping, len, base_flags,
3917                     (skb_shinfo(skb)->nr_frags == 0) | (mss << 1));
3918
3919         entry = NEXT_TX(entry);
3920
3921         /* Now loop through additional data fragments, and queue them. */
3922         if (skb_shinfo(skb)->nr_frags > 0) {
3923                 unsigned int i, last;
3924
3925                 last = skb_shinfo(skb)->nr_frags - 1;
3926                 for (i = 0; i <= last; i++) {
3927                         skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
3928
3929                         len = frag->size;
3930                         mapping = pci_map_page(tp->pdev,
3931                                                frag->page,
3932                                                frag->page_offset,
3933                                                len, PCI_DMA_TODEVICE);
3934
3935                         tp->tx_buffers[entry].skb = NULL;
3936                         pci_unmap_addr_set(&tp->tx_buffers[entry], mapping, mapping);
3937
3938                         tg3_set_txd(tp, entry, mapping, len,
3939                                     base_flags, (i == last) | (mss << 1));
3940
3941                         entry = NEXT_TX(entry);
3942                 }
3943         }
3944
3945         /* Packets are ready, update Tx producer idx local and on card. */
3946         tw32_tx_mbox((MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW), entry);
3947
3948         tp->tx_prod = entry;
3949         if (unlikely(tg3_tx_avail(tp) <= (MAX_SKB_FRAGS + 1))) {
3950                 netif_stop_queue(dev);
3951                 if (tg3_tx_avail(tp) > TG3_TX_WAKEUP_THRESH(tp))
3952                         netif_wake_queue(tp->dev);
3953         }
3954
3955 out_unlock:
3956         mmiowb();
3957
3958         dev->trans_start = jiffies;
3959
3960         return NETDEV_TX_OK;
3961 }
3962
3963 static int tg3_start_xmit_dma_bug(struct sk_buff *, struct net_device *);
3964
3965 /* Use GSO to workaround a rare TSO bug that may be triggered when the
3966  * TSO header is greater than 80 bytes.
3967  */
3968 static int tg3_tso_bug(struct tg3 *tp, struct sk_buff *skb)
3969 {
3970         struct sk_buff *segs, *nskb;
3971
3972         /* Estimate the number of fragments in the worst case */
3973         if (unlikely(tg3_tx_avail(tp) <= (skb_shinfo(skb)->gso_segs * 3))) {
3974                 netif_stop_queue(tp->dev);
3975                 return NETDEV_TX_BUSY;
3976         }
3977
3978         segs = skb_gso_segment(skb, tp->dev->features & ~NETIF_F_TSO);
3979         if (unlikely(IS_ERR(segs)))
3980                 goto tg3_tso_bug_end;
3981
3982         do {
3983                 nskb = segs;
3984                 segs = segs->next;
3985                 nskb->next = NULL;
3986                 tg3_start_xmit_dma_bug(nskb, tp->dev);
3987         } while (segs);
3988
3989 tg3_tso_bug_end:
3990         dev_kfree_skb(skb);
3991
3992         return NETDEV_TX_OK;
3993 }
3994
3995 /* hard_start_xmit for devices that have the 4G bug and/or 40-bit bug and
3996  * support TG3_FLG2_HW_TSO_1 or firmware TSO only.
3997  */
3998 static int tg3_start_xmit_dma_bug(struct sk_buff *skb, struct net_device *dev)
3999 {
4000         struct tg3 *tp = netdev_priv(dev);
4001         dma_addr_t mapping;
4002         u32 len, entry, base_flags, mss;
4003         int would_hit_hwbug;
4004
4005         len = skb_headlen(skb);
4006
4007         /* We are running in BH disabled context with netif_tx_lock
4008          * and TX reclaim runs via tp->poll inside of a software
4009          * interrupt.  Furthermore, IRQ processing runs lockless so we have
4010          * no IRQ context deadlocks to worry about either.  Rejoice!
4011          */
4012         if (unlikely(tg3_tx_avail(tp) <= (skb_shinfo(skb)->nr_frags + 1))) {
4013                 if (!netif_queue_stopped(dev)) {
4014                         netif_stop_queue(dev);
4015
4016                         /* This is a hard error, log it. */
4017                         printk(KERN_ERR PFX "%s: BUG! Tx Ring full when "
4018                                "queue awake!\n", dev->name);
4019                 }
4020                 return NETDEV_TX_BUSY;
4021         }
4022
4023         entry = tp->tx_prod;
4024         base_flags = 0;
4025         if (skb->ip_summed == CHECKSUM_PARTIAL)
4026                 base_flags |= TXD_FLAG_TCPUDP_CSUM;
4027         mss = 0;
4028         if (skb->len > (tp->dev->mtu + ETH_HLEN) &&
4029             (mss = skb_shinfo(skb)->gso_size) != 0) {
4030                 int tcp_opt_len, ip_tcp_len, hdr_len;
4031
4032                 if (skb_header_cloned(skb) &&
4033                     pskb_expand_head(skb, 0, 0, GFP_ATOMIC)) {
4034                         dev_kfree_skb(skb);
4035                         goto out_unlock;
4036                 }
4037
4038                 tcp_opt_len = ((skb->h.th->doff - 5) * 4);
4039                 ip_tcp_len = (skb->nh.iph->ihl * 4) + sizeof(struct tcphdr);
4040
4041                 hdr_len = ip_tcp_len + tcp_opt_len;
4042                 if (unlikely((ETH_HLEN + hdr_len) > 80) &&
4043                              (tp->tg3_flags2 & TG3_FLG2_HW_TSO_1_BUG))
4044                         return (tg3_tso_bug(tp, skb));
4045
4046                 base_flags |= (TXD_FLAG_CPU_PRE_DMA |
4047                                TXD_FLAG_CPU_POST_DMA);
4048
4049                 skb->nh.iph->check = 0;
4050                 skb->nh.iph->tot_len = htons(mss + hdr_len);
4051                 if (tp->tg3_flags2 & TG3_FLG2_HW_TSO) {
4052                         skb->h.th->check = 0;
4053                         base_flags &= ~TXD_FLAG_TCPUDP_CSUM;
4054                 }
4055                 else {
4056                         skb->h.th->check =
4057                                 ~csum_tcpudp_magic(skb->nh.iph->saddr,
4058                                                    skb->nh.iph->daddr,
4059                                                    0, IPPROTO_TCP, 0);
4060                 }
4061
4062                 if ((tp->tg3_flags2 & TG3_FLG2_HW_TSO) ||
4063                     (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705)) {
4064                         if (tcp_opt_len || skb->nh.iph->ihl > 5) {
4065                                 int tsflags;
4066
4067                                 tsflags = ((skb->nh.iph->ihl - 5) +
4068                                            (tcp_opt_len >> 2));
4069                                 mss |= (tsflags << 11);
4070                         }
4071                 } else {
4072                         if (tcp_opt_len || skb->nh.iph->ihl > 5) {
4073                                 int tsflags;
4074
4075                                 tsflags = ((skb->nh.iph->ihl - 5) +
4076                                            (tcp_opt_len >> 2));
4077                                 base_flags |= tsflags << 12;
4078                         }
4079                 }
4080         }
4081 #if TG3_VLAN_TAG_USED
4082         if (tp->vlgrp != NULL && vlan_tx_tag_present(skb))
4083                 base_flags |= (TXD_FLAG_VLAN |
4084                                (vlan_tx_tag_get(skb) << 16));
4085 #endif
4086
4087         /* Queue skb data, a.k.a. the main skb fragment. */
4088         mapping = pci_map_single(tp->pdev, skb->data, len, PCI_DMA_TODEVICE);
4089
4090         tp->tx_buffers[entry].skb = skb;
4091         pci_unmap_addr_set(&tp->tx_buffers[entry], mapping, mapping);
4092
4093         would_hit_hwbug = 0;
4094
4095         if (tg3_4g_overflow_test(mapping, len))
4096                 would_hit_hwbug = 1;
4097
4098         tg3_set_txd(tp, entry, mapping, len, base_flags,
4099                     (skb_shinfo(skb)->nr_frags == 0) | (mss << 1));
4100
4101         entry = NEXT_TX(entry);
4102
4103         /* Now loop through additional data fragments, and queue them. */
4104         if (skb_shinfo(skb)->nr_frags > 0) {
4105                 unsigned int i, last;
4106
4107                 last = skb_shinfo(skb)->nr_frags - 1;
4108                 for (i = 0; i <= last; i++) {
4109                         skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
4110
4111                         len = frag->size;
4112                         mapping = pci_map_page(tp->pdev,
4113                                                frag->page,
4114                                                frag->page_offset,
4115                                                len, PCI_DMA_TODEVICE);
4116
4117                         tp->tx_buffers[entry].skb = NULL;
4118                         pci_unmap_addr_set(&tp->tx_buffers[entry], mapping, mapping);
4119
4120                         if (tg3_4g_overflow_test(mapping, len))
4121                                 would_hit_hwbug = 1;
4122
4123                         if (tg3_40bit_overflow_test(tp, mapping, len))
4124                                 would_hit_hwbug = 1;
4125
4126                         if (tp->tg3_flags2 & TG3_FLG2_HW_TSO)
4127                                 tg3_set_txd(tp, entry, mapping, len,
4128                                             base_flags, (i == last)|(mss << 1));
4129                         else
4130                                 tg3_set_txd(tp, entry, mapping, len,
4131                                             base_flags, (i == last));
4132
4133                         entry = NEXT_TX(entry);
4134                 }
4135         }
4136
4137         if (would_hit_hwbug) {
4138                 u32 last_plus_one = entry;
4139                 u32 start;
4140
4141                 start = entry - 1 - skb_shinfo(skb)->nr_frags;
4142                 start &= (TG3_TX_RING_SIZE - 1);
4143
4144                 /* If the workaround fails due to memory/mapping
4145                  * failure, silently drop this packet.
4146                  */
4147                 if (tigon3_dma_hwbug_workaround(tp, skb, last_plus_one,
4148                                                 &start, base_flags, mss))
4149                         goto out_unlock;
4150
4151                 entry = start;
4152         }
4153
4154         /* Packets are ready, update Tx producer idx local and on card. */
4155         tw32_tx_mbox((MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW), entry);
4156
4157         tp->tx_prod = entry;
4158         if (unlikely(tg3_tx_avail(tp) <= (MAX_SKB_FRAGS + 1))) {
4159                 netif_stop_queue(dev);
4160                 if (tg3_tx_avail(tp) > TG3_TX_WAKEUP_THRESH(tp))
4161                         netif_wake_queue(tp->dev);
4162         }
4163
4164 out_unlock:
4165         mmiowb();
4166
4167         dev->trans_start = jiffies;
4168
4169         return NETDEV_TX_OK;
4170 }
4171
4172 static inline void tg3_set_mtu(struct net_device *dev, struct tg3 *tp,
4173                                int new_mtu)
4174 {
4175         dev->mtu = new_mtu;
4176
4177         if (new_mtu > ETH_DATA_LEN) {
4178                 if (tp->tg3_flags2 & TG3_FLG2_5780_CLASS) {
4179                         tp->tg3_flags2 &= ~TG3_FLG2_TSO_CAPABLE;
4180                         ethtool_op_set_tso(dev, 0);
4181                 }
4182                 else
4183                         tp->tg3_flags |= TG3_FLAG_JUMBO_RING_ENABLE;
4184         } else {
4185                 if (tp->tg3_flags2 & TG3_FLG2_5780_CLASS)
4186                         tp->tg3_flags2 |= TG3_FLG2_TSO_CAPABLE;
4187                 tp->tg3_flags &= ~TG3_FLAG_JUMBO_RING_ENABLE;
4188         }
4189 }
4190
4191 static int tg3_change_mtu(struct net_device *dev, int new_mtu)
4192 {
4193         struct tg3 *tp = netdev_priv(dev);
4194         int err;
4195
4196         if (new_mtu < TG3_MIN_MTU || new_mtu > TG3_MAX_MTU(tp))
4197                 return -EINVAL;
4198
4199         if (!netif_running(dev)) {
4200                 /* We'll just catch it later when the
4201                  * device is up'd.
4202                  */
4203                 tg3_set_mtu(dev, tp, new_mtu);
4204                 return 0;
4205         }
4206
4207         tg3_netif_stop(tp);
4208
4209         tg3_full_lock(tp, 1);
4210
4211         tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
4212
4213         tg3_set_mtu(dev, tp, new_mtu);
4214
4215         err = tg3_restart_hw(tp, 0);
4216
4217         if (!err)
4218                 tg3_netif_start(tp);
4219
4220         tg3_full_unlock(tp);
4221
4222         return err;
4223 }
4224
4225 /* Free up pending packets in all rx/tx rings.
4226  *
4227  * The chip has been shut down and the driver detached from
4228  * the networking, so no interrupts or new tx packets will
4229  * end up in the driver.  tp->{tx,}lock is not held and we are not
4230  * in an interrupt context and thus may sleep.
4231  */
4232 static void tg3_free_rings(struct tg3 *tp)
4233 {
4234         struct ring_info *rxp;
4235         int i;
4236
4237         for (i = 0; i < TG3_RX_RING_SIZE; i++) {
4238                 rxp = &tp->rx_std_buffers[i];
4239
4240                 if (rxp->skb == NULL)
4241                         continue;
4242                 pci_unmap_single(tp->pdev,
4243                                  pci_unmap_addr(rxp, mapping),
4244                                  tp->rx_pkt_buf_sz - tp->rx_offset,
4245                                  PCI_DMA_FROMDEVICE);
4246                 dev_kfree_skb_any(rxp->skb);
4247                 rxp->skb = NULL;
4248         }
4249
4250         for (i = 0; i < TG3_RX_JUMBO_RING_SIZE; i++) {
4251                 rxp = &tp->rx_jumbo_buffers[i];
4252
4253                 if (rxp->skb == NULL)
4254                         continue;
4255                 pci_unmap_single(tp->pdev,
4256                                  pci_unmap_addr(rxp, mapping),
4257                                  RX_JUMBO_PKT_BUF_SZ - tp->rx_offset,
4258                                  PCI_DMA_FROMDEVICE);
4259                 dev_kfree_skb_any(rxp->skb);
4260                 rxp->skb = NULL;
4261         }
4262
4263         for (i = 0; i < TG3_TX_RING_SIZE; ) {
4264                 struct tx_ring_info *txp;
4265                 struct sk_buff *skb;
4266                 int j;
4267
4268                 txp = &tp->tx_buffers[i];
4269                 skb = txp->skb;
4270
4271                 if (skb == NULL) {
4272                         i++;
4273                         continue;
4274                 }
4275
4276                 pci_unmap_single(tp->pdev,
4277                                  pci_unmap_addr(txp, mapping),
4278                                  skb_headlen(skb),
4279                                  PCI_DMA_TODEVICE);
4280                 txp->skb = NULL;
4281
4282                 i++;
4283
4284                 for (j = 0; j < skb_shinfo(skb)->nr_frags; j++) {
4285                         txp = &tp->tx_buffers[i & (TG3_TX_RING_SIZE - 1)];
4286                         pci_unmap_page(tp->pdev,
4287                                        pci_unmap_addr(txp, mapping),
4288                                        skb_shinfo(skb)->frags[j].size,
4289                                        PCI_DMA_TODEVICE);
4290                         i++;
4291                 }
4292
4293                 dev_kfree_skb_any(skb);
4294         }
4295 }
4296
4297 /* Initialize tx/rx rings for packet processing.
4298  *
4299  * The chip has been shut down and the driver detached from
4300  * the networking, so no interrupts or new tx packets will
4301  * end up in the driver.  tp->{tx,}lock are held and thus
4302  * we may not sleep.
4303  */
4304 static int tg3_init_rings(struct tg3 *tp)
4305 {
4306         u32 i;
4307
4308         /* Free up all the SKBs. */
4309         tg3_free_rings(tp);
4310
4311         /* Zero out all descriptors. */
4312         memset(tp->rx_std, 0, TG3_RX_RING_BYTES);
4313         memset(tp->rx_jumbo, 0, TG3_RX_JUMBO_RING_BYTES);
4314         memset(tp->rx_rcb, 0, TG3_RX_RCB_RING_BYTES(tp));
4315         memset(tp->tx_ring, 0, TG3_TX_RING_BYTES);
4316
4317         tp->rx_pkt_buf_sz = RX_PKT_BUF_SZ;
4318         if ((tp->tg3_flags2 & TG3_FLG2_5780_CLASS) &&
4319             (tp->dev->mtu > ETH_DATA_LEN))
4320                 tp->rx_pkt_buf_sz = RX_JUMBO_PKT_BUF_SZ;
4321
4322         /* Initialize invariants of the rings, we only set this
4323          * stuff once.  This works because the card does not
4324          * write into the rx buffer posting rings.
4325          */
4326         for (i = 0; i < TG3_RX_RING_SIZE; i++) {
4327                 struct tg3_rx_buffer_desc *rxd;
4328
4329                 rxd = &tp->rx_std[i];
4330                 rxd->idx_len = (tp->rx_pkt_buf_sz - tp->rx_offset - 64)
4331                         << RXD_LEN_SHIFT;
4332                 rxd->type_flags = (RXD_FLAG_END << RXD_FLAGS_SHIFT);
4333                 rxd->opaque = (RXD_OPAQUE_RING_STD |
4334                                (i << RXD_OPAQUE_INDEX_SHIFT));
4335         }
4336
4337         if (tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE) {
4338                 for (i = 0; i < TG3_RX_JUMBO_RING_SIZE; i++) {
4339                         struct tg3_rx_buffer_desc *rxd;
4340
4341                         rxd = &tp->rx_jumbo[i];
4342                         rxd->idx_len = (RX_JUMBO_PKT_BUF_SZ - tp->rx_offset - 64)
4343                                 << RXD_LEN_SHIFT;
4344                         rxd->type_flags = (RXD_FLAG_END << RXD_FLAGS_SHIFT) |
4345                                 RXD_FLAG_JUMBO;
4346                         rxd->opaque = (RXD_OPAQUE_RING_JUMBO |
4347                                (i << RXD_OPAQUE_INDEX_SHIFT));
4348                 }
4349         }
4350
4351         /* Now allocate fresh SKBs for each rx ring. */
4352         for (i = 0; i < tp->rx_pending; i++) {
4353                 if (tg3_alloc_rx_skb(tp, RXD_OPAQUE_RING_STD, -1, i) < 0) {
4354                         printk(KERN_WARNING PFX
4355                                "%s: Using a smaller RX standard ring, "
4356                                "only %d out of %d buffers were allocated "
4357                                "successfully.\n",
4358                                tp->dev->name, i, tp->rx_pending);
4359                         if (i == 0)
4360                                 return -ENOMEM;
4361                         tp->rx_pending = i;
4362                         break;
4363                 }
4364         }
4365
4366         if (tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE) {
4367                 for (i = 0; i < tp->rx_jumbo_pending; i++) {
4368                         if (tg3_alloc_rx_skb(tp, RXD_OPAQUE_RING_JUMBO,
4369                                              -1, i) < 0) {
4370                                 printk(KERN_WARNING PFX
4371                                        "%s: Using a smaller RX jumbo ring, "
4372                                        "only %d out of %d buffers were "
4373                                        "allocated successfully.\n",
4374                                        tp->dev->name, i, tp->rx_jumbo_pending);
4375                                 if (i == 0) {
4376                                         tg3_free_rings(tp);
4377                                         return -ENOMEM;
4378                                 }
4379                                 tp->rx_jumbo_pending = i;
4380                                 break;
4381                         }
4382                 }
4383         }
4384         return 0;
4385 }
4386
4387 /*
4388  * Must not be invoked with interrupt sources disabled and
4389  * the hardware shutdown down.
4390  */
4391 static void tg3_free_consistent(struct tg3 *tp)
4392 {
4393         kfree(tp->rx_std_buffers);
4394         tp->rx_std_buffers = NULL;
4395         if (tp->rx_std) {
4396                 pci_free_consistent(tp->pdev, TG3_RX_RING_BYTES,
4397                                     tp->rx_std, tp->rx_std_mapping);
4398                 tp->rx_std = NULL;
4399         }
4400         if (tp->rx_jumbo) {
4401                 pci_free_consistent(tp->pdev, TG3_RX_JUMBO_RING_BYTES,
4402                                     tp->rx_jumbo, tp->rx_jumbo_mapping);
4403                 tp->rx_jumbo = NULL;
4404         }
4405         if (tp->rx_rcb) {
4406                 pci_free_consistent(tp->pdev, TG3_RX_RCB_RING_BYTES(tp),
4407                                     tp->rx_rcb, tp->rx_rcb_mapping);
4408                 tp->rx_rcb = NULL;
4409         }
4410         if (tp->tx_ring) {
4411                 pci_free_consistent(tp->pdev, TG3_TX_RING_BYTES,
4412                         tp->tx_ring, tp->tx_desc_mapping);
4413                 tp->tx_ring = NULL;
4414         }
4415         if (tp->hw_status) {
4416                 pci_free_consistent(tp->pdev, TG3_HW_STATUS_SIZE,
4417                                     tp->hw_status, tp->status_mapping);
4418                 tp->hw_status = NULL;
4419         }
4420         if (tp->hw_stats) {
4421                 pci_free_consistent(tp->pdev, sizeof(struct tg3_hw_stats),
4422                                     tp->hw_stats, tp->stats_mapping);
4423                 tp->hw_stats = NULL;
4424         }
4425 }
4426
4427 /*
4428  * Must not be invoked with interrupt sources disabled and
4429  * the hardware shutdown down.  Can sleep.
4430  */
4431 static int tg3_alloc_consistent(struct tg3 *tp)
4432 {
4433         tp->rx_std_buffers = kzalloc((sizeof(struct ring_info) *
4434                                       (TG3_RX_RING_SIZE +
4435                                        TG3_RX_JUMBO_RING_SIZE)) +
4436                                      (sizeof(struct tx_ring_info) *
4437                                       TG3_TX_RING_SIZE),
4438                                      GFP_KERNEL);
4439         if (!tp->rx_std_buffers)
4440                 return -ENOMEM;
4441
4442         tp->rx_jumbo_buffers = &tp->rx_std_buffers[TG3_RX_RING_SIZE];
4443         tp->tx_buffers = (struct tx_ring_info *)
4444                 &tp->rx_jumbo_buffers[TG3_RX_JUMBO_RING_SIZE];
4445
4446         tp->rx_std = pci_alloc_consistent(tp->pdev, TG3_RX_RING_BYTES,
4447                                           &tp->rx_std_mapping);
4448         if (!tp->rx_std)
4449                 goto err_out;
4450
4451         tp->rx_jumbo = pci_alloc_consistent(tp->pdev, TG3_RX_JUMBO_RING_BYTES,
4452                                             &tp->rx_jumbo_mapping);
4453
4454         if (!tp->rx_jumbo)
4455                 goto err_out;
4456
4457         tp->rx_rcb = pci_alloc_consistent(tp->pdev, TG3_RX_RCB_RING_BYTES(tp),
4458                                           &tp->rx_rcb_mapping);
4459         if (!tp->rx_rcb)
4460                 goto err_out;
4461
4462         tp->tx_ring = pci_alloc_consistent(tp->pdev, TG3_TX_RING_BYTES,
4463                                            &tp->tx_desc_mapping);
4464         if (!tp->tx_ring)
4465                 goto err_out;
4466
4467         tp->hw_status = pci_alloc_consistent(tp->pdev,
4468                                              TG3_HW_STATUS_SIZE,
4469                                              &tp->status_mapping);
4470         if (!tp->hw_status)
4471                 goto err_out;
4472
4473         tp->hw_stats = pci_alloc_consistent(tp->pdev,
4474                                             sizeof(struct tg3_hw_stats),
4475                                             &tp->stats_mapping);
4476         if (!tp->hw_stats)
4477                 goto err_out;
4478
4479         memset(tp->hw_status, 0, TG3_HW_STATUS_SIZE);
4480         memset(tp->hw_stats, 0, sizeof(struct tg3_hw_stats));
4481
4482         return 0;
4483
4484 err_out:
4485         tg3_free_consistent(tp);
4486         return -ENOMEM;
4487 }
4488
4489 #define MAX_WAIT_CNT 1000
4490
4491 /* To stop a block, clear the enable bit and poll till it
4492  * clears.  tp->lock is held.
4493  */
4494 static int tg3_stop_block(struct tg3 *tp, unsigned long ofs, u32 enable_bit, int silent)
4495 {
4496         unsigned int i;
4497         u32 val;
4498
4499         if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
4500                 switch (ofs) {
4501                 case RCVLSC_MODE:
4502                 case DMAC_MODE:
4503                 case MBFREE_MODE:
4504                 case BUFMGR_MODE:
4505                 case MEMARB_MODE:
4506                         /* We can't enable/disable these bits of the
4507                          * 5705/5750, just say success.
4508                          */
4509                         return 0;
4510
4511                 default:
4512                         break;
4513                 };
4514         }
4515
4516         val = tr32(ofs);
4517         val &= ~enable_bit;
4518         tw32_f(ofs, val);
4519
4520         for (i = 0; i < MAX_WAIT_CNT; i++) {
4521                 udelay(100);
4522                 val = tr32(ofs);
4523                 if ((val & enable_bit) == 0)
4524                         break;
4525         }
4526
4527         if (i == MAX_WAIT_CNT && !silent) {
4528                 printk(KERN_ERR PFX "tg3_stop_block timed out, "
4529                        "ofs=%lx enable_bit=%x\n",
4530                        ofs, enable_bit);
4531                 return -ENODEV;
4532         }
4533
4534         return 0;
4535 }
4536
4537 /* tp->lock is held. */
4538 static int tg3_abort_hw(struct tg3 *tp, int silent)
4539 {
4540         int i, err;
4541
4542         tg3_disable_ints(tp);
4543
4544         tp->rx_mode &= ~RX_MODE_ENABLE;
4545         tw32_f(MAC_RX_MODE, tp->rx_mode);
4546         udelay(10);
4547
4548         err  = tg3_stop_block(tp, RCVBDI_MODE, RCVBDI_MODE_ENABLE, silent);
4549         err |= tg3_stop_block(tp, RCVLPC_MODE, RCVLPC_MODE_ENABLE, silent);
4550         err |= tg3_stop_block(tp, RCVLSC_MODE, RCVLSC_MODE_ENABLE, silent);
4551         err |= tg3_stop_block(tp, RCVDBDI_MODE, RCVDBDI_MODE_ENABLE, silent);
4552         err |= tg3_stop_block(tp, RCVDCC_MODE, RCVDCC_MODE_ENABLE, silent);
4553         err |= tg3_stop_block(tp, RCVCC_MODE, RCVCC_MODE_ENABLE, silent);
4554
4555         err |= tg3_stop_block(tp, SNDBDS_MODE, SNDBDS_MODE_ENABLE, silent);
4556         err |= tg3_stop_block(tp, SNDBDI_MODE, SNDBDI_MODE_ENABLE, silent);
4557         err |= tg3_stop_block(tp, SNDDATAI_MODE, SNDDATAI_MODE_ENABLE, silent);
4558         err |= tg3_stop_block(tp, RDMAC_MODE, RDMAC_MODE_ENABLE, silent);
4559         err |= tg3_stop_block(tp, SNDDATAC_MODE, SNDDATAC_MODE_ENABLE, silent);
4560         err |= tg3_stop_block(tp, DMAC_MODE, DMAC_MODE_ENABLE, silent);
4561         err |= tg3_stop_block(tp, SNDBDC_MODE, SNDBDC_MODE_ENABLE, silent);
4562
4563         tp->mac_mode &= ~MAC_MODE_TDE_ENABLE;
4564         tw32_f(MAC_MODE, tp->mac_mode);
4565         udelay(40);
4566
4567         tp->tx_mode &= ~TX_MODE_ENABLE;
4568         tw32_f(MAC_TX_MODE, tp->tx_mode);
4569
4570         for (i = 0; i < MAX_WAIT_CNT; i++) {
4571                 udelay(100);
4572                 if (!(tr32(MAC_TX_MODE) & TX_MODE_ENABLE))
4573                         break;
4574         }
4575         if (i >= MAX_WAIT_CNT) {
4576                 printk(KERN_ERR PFX "tg3_abort_hw timed out for %s, "
4577                        "TX_MODE_ENABLE will not clear MAC_TX_MODE=%08x\n",
4578                        tp->dev->name, tr32(MAC_TX_MODE));
4579                 err |= -ENODEV;
4580         }
4581
4582         err |= tg3_stop_block(tp, HOSTCC_MODE, HOSTCC_MODE_ENABLE, silent);
4583         err |= tg3_stop_block(tp, WDMAC_MODE, WDMAC_MODE_ENABLE, silent);
4584         err |= tg3_stop_block(tp, MBFREE_MODE, MBFREE_MODE_ENABLE, silent);
4585
4586         tw32(FTQ_RESET, 0xffffffff);
4587         tw32(FTQ_RESET, 0x00000000);
4588
4589         err |= tg3_stop_block(tp, BUFMGR_MODE, BUFMGR_MODE_ENABLE, silent);
4590         err |= tg3_stop_block(tp, MEMARB_MODE, MEMARB_MODE_ENABLE, silent);
4591
4592         if (tp->hw_status)
4593                 memset(tp->hw_status, 0, TG3_HW_STATUS_SIZE);
4594         if (tp->hw_stats)
4595                 memset(tp->hw_stats, 0, sizeof(struct tg3_hw_stats));
4596
4597         return err;
4598 }
4599
4600 /* tp->lock is held. */
4601 static int tg3_nvram_lock(struct tg3 *tp)
4602 {
4603         if (tp->tg3_flags & TG3_FLAG_NVRAM) {
4604                 int i;
4605
4606                 if (tp->nvram_lock_cnt == 0) {
4607                         tw32(NVRAM_SWARB, SWARB_REQ_SET1);
4608                         for (i = 0; i < 8000; i++) {
4609                                 if (tr32(NVRAM_SWARB) & SWARB_GNT1)
4610                                         break;
4611                                 udelay(20);
4612                         }
4613                         if (i == 8000) {
4614                                 tw32(NVRAM_SWARB, SWARB_REQ_CLR1);
4615                                 return -ENODEV;
4616                         }
4617                 }
4618                 tp->nvram_lock_cnt++;
4619         }
4620         return 0;
4621 }
4622
4623 /* tp->lock is held. */
4624 static void tg3_nvram_unlock(struct tg3 *tp)
4625 {
4626         if (tp->tg3_flags & TG3_FLAG_NVRAM) {
4627                 if (tp->nvram_lock_cnt > 0)
4628                         tp->nvram_lock_cnt--;
4629                 if (tp->nvram_lock_cnt == 0)
4630                         tw32_f(NVRAM_SWARB, SWARB_REQ_CLR1);
4631         }
4632 }
4633
4634 /* tp->lock is held. */
4635 static void tg3_enable_nvram_access(struct tg3 *tp)
4636 {
4637         if ((tp->tg3_flags2 & TG3_FLG2_5750_PLUS) &&
4638             !(tp->tg3_flags2 & TG3_FLG2_PROTECTED_NVRAM)) {
4639                 u32 nvaccess = tr32(NVRAM_ACCESS);
4640
4641                 tw32(NVRAM_ACCESS, nvaccess | ACCESS_ENABLE);
4642         }
4643 }
4644
4645 /* tp->lock is held. */
4646 static void tg3_disable_nvram_access(struct tg3 *tp)
4647 {
4648         if ((tp->tg3_flags2 & TG3_FLG2_5750_PLUS) &&
4649             !(tp->tg3_flags2 & TG3_FLG2_PROTECTED_NVRAM)) {
4650                 u32 nvaccess = tr32(NVRAM_ACCESS);
4651
4652                 tw32(NVRAM_ACCESS, nvaccess & ~ACCESS_ENABLE);
4653         }
4654 }
4655
4656 /* tp->lock is held. */
4657 static void tg3_write_sig_pre_reset(struct tg3 *tp, int kind)
4658 {
4659         tg3_write_mem(tp, NIC_SRAM_FIRMWARE_MBOX,
4660                       NIC_SRAM_FIRMWARE_MBOX_MAGIC1);
4661
4662         if (tp->tg3_flags2 & TG3_FLG2_ASF_NEW_HANDSHAKE) {
4663                 switch (kind) {
4664                 case RESET_KIND_INIT:
4665                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
4666                                       DRV_STATE_START);
4667                         break;
4668
4669                 case RESET_KIND_SHUTDOWN:
4670                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
4671                                       DRV_STATE_UNLOAD);
4672                         break;
4673
4674                 case RESET_KIND_SUSPEND:
4675                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
4676                                       DRV_STATE_SUSPEND);
4677                         break;
4678
4679                 default:
4680                         break;
4681                 };
4682         }
4683 }
4684
4685 /* tp->lock is held. */
4686 static void tg3_write_sig_post_reset(struct tg3 *tp, int kind)
4687 {
4688         if (tp->tg3_flags2 & TG3_FLG2_ASF_NEW_HANDSHAKE) {
4689                 switch (kind) {
4690                 case RESET_KIND_INIT:
4691                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
4692                                       DRV_STATE_START_DONE);
4693                         break;
4694
4695                 case RESET_KIND_SHUTDOWN:
4696                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
4697                                       DRV_STATE_UNLOAD_DONE);
4698                         break;
4699
4700                 default:
4701                         break;
4702                 };
4703         }
4704 }
4705
4706 /* tp->lock is held. */
4707 static void tg3_write_sig_legacy(struct tg3 *tp, int kind)
4708 {
4709         if (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) {
4710                 switch (kind) {
4711                 case RESET_KIND_INIT:
4712                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
4713                                       DRV_STATE_START);
4714                         break;
4715
4716                 case RESET_KIND_SHUTDOWN:
4717                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
4718                                       DRV_STATE_UNLOAD);
4719                         break;
4720
4721                 case RESET_KIND_SUSPEND:
4722                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
4723                                       DRV_STATE_SUSPEND);
4724                         break;
4725
4726                 default:
4727                         break;
4728                 };
4729         }
4730 }
4731
4732 static int tg3_poll_fw(struct tg3 *tp)
4733 {
4734         int i;
4735         u32 val;
4736
4737         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
4738                 /* Wait up to 20ms for init done. */
4739                 for (i = 0; i < 200; i++) {
4740                         if (tr32(VCPU_STATUS) & VCPU_STATUS_INIT_DONE)
4741                                 return 0;
4742                         udelay(100);
4743                 }
4744                 return -ENODEV;
4745         }
4746
4747         /* Wait for firmware initialization to complete. */
4748         for (i = 0; i < 100000; i++) {
4749                 tg3_read_mem(tp, NIC_SRAM_FIRMWARE_MBOX, &val);
4750                 if (val == ~NIC_SRAM_FIRMWARE_MBOX_MAGIC1)
4751                         break;
4752                 udelay(10);
4753         }
4754
4755         /* Chip might not be fitted with firmware.  Some Sun onboard
4756          * parts are configured like that.  So don't signal the timeout
4757          * of the above loop as an error, but do report the lack of
4758          * running firmware once.
4759          */
4760         if (i >= 100000 &&
4761             !(tp->tg3_flags2 & TG3_FLG2_NO_FWARE_REPORTED)) {
4762                 tp->tg3_flags2 |= TG3_FLG2_NO_FWARE_REPORTED;
4763
4764                 printk(KERN_INFO PFX "%s: No firmware running.\n",
4765                        tp->dev->name);
4766         }
4767
4768         return 0;
4769 }
4770
4771 static void tg3_stop_fw(struct tg3 *);
4772
4773 /* tp->lock is held. */
4774 static int tg3_chip_reset(struct tg3 *tp)
4775 {
4776         u32 val;
4777         void (*write_op)(struct tg3 *, u32, u32);
4778         int err;
4779
4780         tg3_nvram_lock(tp);
4781
4782         /* No matching tg3_nvram_unlock() after this because
4783          * chip reset below will undo the nvram lock.
4784          */
4785         tp->nvram_lock_cnt = 0;
4786
4787         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752 ||
4788             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
4789             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787)
4790                 tw32(GRC_FASTBOOT_PC, 0);
4791
4792         /*
4793          * We must avoid the readl() that normally takes place.
4794          * It locks machines, causes machine checks, and other
4795          * fun things.  So, temporarily disable the 5701
4796          * hardware workaround, while we do the reset.
4797          */
4798         write_op = tp->write32;
4799         if (write_op == tg3_write_flush_reg32)
4800                 tp->write32 = tg3_write32;
4801
4802         /* do the reset */
4803         val = GRC_MISC_CFG_CORECLK_RESET;
4804
4805         if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) {
4806                 if (tr32(0x7e2c) == 0x60) {
4807                         tw32(0x7e2c, 0x20);
4808                 }
4809                 if (tp->pci_chip_rev_id != CHIPREV_ID_5750_A0) {
4810                         tw32(GRC_MISC_CFG, (1 << 29));
4811                         val |= (1 << 29);
4812                 }
4813         }
4814
4815         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
4816                 tw32(VCPU_STATUS, tr32(VCPU_STATUS) | VCPU_STATUS_DRV_RESET);
4817                 tw32(GRC_VCPU_EXT_CTRL,
4818                      tr32(GRC_VCPU_EXT_CTRL) & ~GRC_VCPU_EXT_CTRL_HALT_CPU);
4819         }
4820
4821         if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS)
4822                 val |= GRC_MISC_CFG_KEEP_GPHY_POWER;
4823         tw32(GRC_MISC_CFG, val);
4824
4825         /* restore 5701 hardware bug workaround write method */
4826         tp->write32 = write_op;
4827
4828         /* Unfortunately, we have to delay before the PCI read back.
4829          * Some 575X chips even will not respond to a PCI cfg access
4830          * when the reset command is given to the chip.
4831          *
4832          * How do these hardware designers expect things to work
4833          * properly if the PCI write is posted for a long period
4834          * of time?  It is always necessary to have some method by
4835          * which a register read back can occur to push the write
4836          * out which does the reset.
4837          *
4838          * For most tg3 variants the trick below was working.
4839          * Ho hum...
4840          */
4841         udelay(120);
4842
4843         /* Flush PCI posted writes.  The normal MMIO registers
4844          * are inaccessible at this time so this is the only
4845          * way to make this reliably (actually, this is no longer
4846          * the case, see above).  I tried to use indirect
4847          * register read/write but this upset some 5701 variants.
4848          */
4849         pci_read_config_dword(tp->pdev, PCI_COMMAND, &val);
4850
4851         udelay(120);
4852
4853         if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) {
4854                 if (tp->pci_chip_rev_id == CHIPREV_ID_5750_A0) {
4855                         int i;
4856                         u32 cfg_val;
4857
4858                         /* Wait for link training to complete.  */
4859                         for (i = 0; i < 5000; i++)
4860                                 udelay(100);
4861
4862                         pci_read_config_dword(tp->pdev, 0xc4, &cfg_val);
4863                         pci_write_config_dword(tp->pdev, 0xc4,
4864                                                cfg_val | (1 << 15));
4865                 }
4866                 /* Set PCIE max payload size and clear error status.  */
4867                 pci_write_config_dword(tp->pdev, 0xd8, 0xf5000);
4868         }
4869
4870         /* Re-enable indirect register accesses. */
4871         pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
4872                                tp->misc_host_ctrl);
4873
4874         /* Set MAX PCI retry to zero. */
4875         val = (PCISTATE_ROM_ENABLE | PCISTATE_ROM_RETRY_ENABLE);
4876         if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0 &&
4877             (tp->tg3_flags & TG3_FLAG_PCIX_MODE))
4878                 val |= PCISTATE_RETRY_SAME_DMA;
4879         pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE, val);
4880
4881         pci_restore_state(tp->pdev);
4882
4883         /* Make sure PCI-X relaxed ordering bit is clear. */
4884         pci_read_config_dword(tp->pdev, TG3PCI_X_CAPS, &val);
4885         val &= ~PCIX_CAPS_RELAXED_ORDERING;
4886         pci_write_config_dword(tp->pdev, TG3PCI_X_CAPS, val);
4887
4888         if (tp->tg3_flags2 & TG3_FLG2_5780_CLASS) {
4889                 u32 val;
4890
4891                 /* Chip reset on 5780 will reset MSI enable bit,
4892                  * so need to restore it.
4893                  */
4894                 if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
4895                         u16 ctrl;
4896
4897                         pci_read_config_word(tp->pdev,
4898                                              tp->msi_cap + PCI_MSI_FLAGS,
4899                                              &ctrl);
4900                         pci_write_config_word(tp->pdev,
4901                                               tp->msi_cap + PCI_MSI_FLAGS,
4902                                               ctrl | PCI_MSI_FLAGS_ENABLE);
4903                         val = tr32(MSGINT_MODE);
4904                         tw32(MSGINT_MODE, val | MSGINT_MODE_ENABLE);
4905                 }
4906
4907                 val = tr32(MEMARB_MODE);
4908                 tw32(MEMARB_MODE, val | MEMARB_MODE_ENABLE);
4909
4910         } else
4911                 tw32(MEMARB_MODE, MEMARB_MODE_ENABLE);
4912
4913         if (tp->pci_chip_rev_id == CHIPREV_ID_5750_A3) {
4914                 tg3_stop_fw(tp);
4915                 tw32(0x5000, 0x400);
4916         }
4917
4918         tw32(GRC_MODE, tp->grc_mode);
4919
4920         if (tp->pci_chip_rev_id == CHIPREV_ID_5705_A0) {
4921                 u32 val = tr32(0xc4);
4922
4923                 tw32(0xc4, val | (1 << 15));
4924         }
4925
4926         if ((tp->nic_sram_data_cfg & NIC_SRAM_DATA_CFG_MINI_PCI) != 0 &&
4927             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
4928                 tp->pci_clock_ctrl |= CLOCK_CTRL_CLKRUN_OENABLE;
4929                 if (tp->pci_chip_rev_id == CHIPREV_ID_5705_A0)
4930                         tp->pci_clock_ctrl |= CLOCK_CTRL_FORCE_CLKRUN;
4931                 tw32(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl);
4932         }
4933
4934         if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) {
4935                 tp->mac_mode = MAC_MODE_PORT_MODE_TBI;
4936                 tw32_f(MAC_MODE, tp->mac_mode);
4937         } else if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES) {
4938                 tp->mac_mode = MAC_MODE_PORT_MODE_GMII;
4939                 tw32_f(MAC_MODE, tp->mac_mode);
4940         } else
4941                 tw32_f(MAC_MODE, 0);
4942         udelay(40);
4943
4944         err = tg3_poll_fw(tp);
4945         if (err)
4946                 return err;
4947
4948         if ((tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) &&
4949             tp->pci_chip_rev_id != CHIPREV_ID_5750_A0) {
4950                 u32 val = tr32(0x7c00);
4951
4952                 tw32(0x7c00, val | (1 << 25));
4953         }
4954
4955         /* Reprobe ASF enable state.  */
4956         tp->tg3_flags &= ~TG3_FLAG_ENABLE_ASF;
4957         tp->tg3_flags2 &= ~TG3_FLG2_ASF_NEW_HANDSHAKE;
4958         tg3_read_mem(tp, NIC_SRAM_DATA_SIG, &val);
4959         if (val == NIC_SRAM_DATA_SIG_MAGIC) {
4960                 u32 nic_cfg;
4961
4962                 tg3_read_mem(tp, NIC_SRAM_DATA_CFG, &nic_cfg);
4963                 if (nic_cfg & NIC_SRAM_DATA_CFG_ASF_ENABLE) {
4964                         tp->tg3_flags |= TG3_FLAG_ENABLE_ASF;
4965                         if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS)
4966                                 tp->tg3_flags2 |= TG3_FLG2_ASF_NEW_HANDSHAKE;
4967                 }
4968         }
4969
4970         return 0;
4971 }
4972
4973 /* tp->lock is held. */
4974 static void tg3_stop_fw(struct tg3 *tp)
4975 {
4976         if (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) {
4977                 u32 val;
4978                 int i;
4979
4980                 tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX, FWCMD_NICDRV_PAUSE_FW);
4981                 val = tr32(GRC_RX_CPU_EVENT);
4982                 val |= (1 << 14);
4983                 tw32(GRC_RX_CPU_EVENT, val);
4984
4985                 /* Wait for RX cpu to ACK the event.  */
4986                 for (i = 0; i < 100; i++) {
4987                         if (!(tr32(GRC_RX_CPU_EVENT) & (1 << 14)))
4988                                 break;
4989                         udelay(1);
4990                 }
4991         }
4992 }
4993
4994 /* tp->lock is held. */
4995 static int tg3_halt(struct tg3 *tp, int kind, int silent)
4996 {
4997         int err;
4998
4999         tg3_stop_fw(tp);
5000
5001         tg3_write_sig_pre_reset(tp, kind);
5002
5003         tg3_abort_hw(tp, silent);
5004         err = tg3_chip_reset(tp);
5005
5006         tg3_write_sig_legacy(tp, kind);
5007         tg3_write_sig_post_reset(tp, kind);
5008
5009         if (err)
5010                 return err;
5011
5012         return 0;
5013 }
5014
5015 #define TG3_FW_RELEASE_MAJOR    0x0
5016 #define TG3_FW_RELASE_MINOR     0x0
5017 #define TG3_FW_RELEASE_FIX      0x0
5018 #define TG3_FW_START_ADDR       0x08000000
5019 #define TG3_FW_TEXT_ADDR        0x08000000
5020 #define TG3_FW_TEXT_LEN         0x9c0
5021 #define TG3_FW_RODATA_ADDR      0x080009c0
5022 #define TG3_FW_RODATA_LEN       0x60
5023 #define TG3_FW_DATA_ADDR        0x08000a40
5024 #define TG3_FW_DATA_LEN         0x20
5025 #define TG3_FW_SBSS_ADDR        0x08000a60
5026 #define TG3_FW_SBSS_LEN         0xc
5027 #define TG3_FW_BSS_ADDR         0x08000a70
5028 #define TG3_FW_BSS_LEN          0x10
5029
5030 static const u32 tg3FwText[(TG3_FW_TEXT_LEN / sizeof(u32)) + 1] = {
5031         0x00000000, 0x10000003, 0x00000000, 0x0000000d, 0x0000000d, 0x3c1d0800,
5032         0x37bd3ffc, 0x03a0f021, 0x3c100800, 0x26100000, 0x0e000018, 0x00000000,
5033         0x0000000d, 0x3c1d0800, 0x37bd3ffc, 0x03a0f021, 0x3c100800, 0x26100034,
5034         0x0e00021c, 0x00000000, 0x0000000d, 0x00000000, 0x00000000, 0x00000000,
5035         0x27bdffe0, 0x3c1cc000, 0xafbf0018, 0xaf80680c, 0x0e00004c, 0x241b2105,
5036         0x97850000, 0x97870002, 0x9782002c, 0x9783002e, 0x3c040800, 0x248409c0,
5037         0xafa00014, 0x00021400, 0x00621825, 0x00052c00, 0xafa30010, 0x8f860010,
5038         0x00e52825, 0x0e000060, 0x24070102, 0x3c02ac00, 0x34420100, 0x3c03ac01,
5039         0x34630100, 0xaf820490, 0x3c02ffff, 0xaf820494, 0xaf830498, 0xaf82049c,
5040         0x24020001, 0xaf825ce0, 0x0e00003f, 0xaf825d00, 0x0e000140, 0x00000000,
5041         0x8fbf0018, 0x03e00008, 0x27bd0020, 0x2402ffff, 0xaf825404, 0x8f835400,
5042         0x34630400, 0xaf835400, 0xaf825404, 0x3c020800, 0x24420034, 0xaf82541c,
5043         0x03e00008, 0xaf805400, 0x00000000, 0x00000000, 0x3c020800, 0x34423000,
5044         0x3c030800, 0x34633000, 0x3c040800, 0x348437ff, 0x3c010800, 0xac220a64,
5045         0x24020040, 0x3c010800, 0xac220a68, 0x3c010800, 0xac200a60, 0xac600000,
5046         0x24630004, 0x0083102b, 0x5040fffd, 0xac600000, 0x03e00008, 0x00000000,
5047         0x00804821, 0x8faa0010, 0x3c020800, 0x8c420a60, 0x3c040800, 0x8c840a68,
5048         0x8fab0014, 0x24430001, 0x0044102b, 0x3c010800, 0xac230a60, 0x14400003,
5049         0x00004021, 0x3c010800, 0xac200a60, 0x3c020800, 0x8c420a60, 0x3c030800,
5050         0x8c630a64, 0x91240000, 0x00021140, 0x00431021, 0x00481021, 0x25080001,
5051         0xa0440000, 0x29020008, 0x1440fff4, 0x25290001, 0x3c020800, 0x8c420a60,
5052         0x3c030800, 0x8c630a64, 0x8f84680c, 0x00021140, 0x00431021, 0xac440008,
5053         0xac45000c, 0xac460010, 0xac470014, 0xac4a0018, 0x03e00008, 0xac4b001c,
5054         0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
5055         0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
5056         0, 0, 0, 0, 0, 0,
5057         0x02000008, 0x00000000, 0x0a0001e3, 0x3c0a0001, 0x0a0001e3, 0x3c0a0002,
5058         0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000,
5059         0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000,
5060         0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000,
5061         0x0a0001e3, 0x3c0a0007, 0x0a0001e3, 0x3c0a0008, 0x0a0001e3, 0x3c0a0009,
5062         0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x3c0a000b,
5063         0x0a0001e3, 0x3c0a000c, 0x0a0001e3, 0x3c0a000d, 0x0a0001e3, 0x00000000,
5064         0x0a0001e3, 0x00000000, 0x0a0001e3, 0x3c0a000e, 0x0a0001e3, 0x00000000,
5065         0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000,
5066         0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000,
5067         0x0a0001e3, 0x00000000, 0x0a0001e3, 0x3c0a0013, 0x0a0001e3, 0x3c0a0014,
5068         0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
5069         0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
5070         0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
5071         0x27bdffe0, 0x00001821, 0x00001021, 0xafbf0018, 0xafb10014, 0xafb00010,
5072         0x3c010800, 0x00220821, 0xac200a70, 0x3c010800, 0x00220821, 0xac200a74,
5073         0x3c010800, 0x00220821, 0xac200a78, 0x24630001, 0x1860fff5, 0x2442000c,
5074         0x24110001, 0x8f906810, 0x32020004, 0x14400005, 0x24040001, 0x3c020800,
5075         0x8c420a78, 0x18400003, 0x00002021, 0x0e000182, 0x00000000, 0x32020001,
5076         0x10400003, 0x00000000, 0x0e000169, 0x00000000, 0x0a000153, 0xaf915028,
5077         0x8fbf0018, 0x8fb10014, 0x8fb00010, 0x03e00008, 0x27bd0020, 0x3c050800,
5078         0x8ca50a70, 0x3c060800, 0x8cc60a80, 0x3c070800, 0x8ce70a78, 0x27bdffe0,
5079         0x3c040800, 0x248409d0, 0xafbf0018, 0xafa00010, 0x0e000060, 0xafa00014,
5080         0x0e00017b, 0x00002021, 0x8fbf0018, 0x03e00008, 0x27bd0020, 0x24020001,
5081         0x8f836810, 0x00821004, 0x00021027, 0x00621824, 0x03e00008, 0xaf836810,
5082         0x27bdffd8, 0xafbf0024, 0x1080002e, 0xafb00020, 0x8f825cec, 0xafa20018,
5083         0x8f825cec, 0x3c100800, 0x26100a78, 0xafa2001c, 0x34028000, 0xaf825cec,
5084         0x8e020000, 0x18400016, 0x00000000, 0x3c020800, 0x94420a74, 0x8fa3001c,
5085         0x000221c0, 0xac830004, 0x8fa2001c, 0x3c010800, 0x0e000201, 0xac220a74,
5086         0x10400005, 0x00000000, 0x8e020000, 0x24420001, 0x0a0001df, 0xae020000,
5087         0x3c020800, 0x8c420a70, 0x00021c02, 0x000321c0, 0x0a0001c5, 0xafa2001c,
5088         0x0e000201, 0x00000000, 0x1040001f, 0x00000000, 0x8e020000, 0x8fa3001c,
5089         0x24420001, 0x3c010800, 0xac230a70, 0x3c010800, 0xac230a74, 0x0a0001df,
5090         0xae020000, 0x3c100800, 0x26100a78, 0x8e020000, 0x18400028, 0x00000000,
5091         0x0e000201, 0x00000000, 0x14400024, 0x00000000, 0x8e020000, 0x3c030800,
5092         0x8c630a70, 0x2442ffff, 0xafa3001c, 0x18400006, 0xae020000, 0x00031402,
5093         0x000221c0, 0x8c820004, 0x3c010800, 0xac220a70, 0x97a2001e, 0x2442ff00,
5094         0x2c420300, 0x1440000b, 0x24024000, 0x3c040800, 0x248409dc, 0xafa00010,
5095         0xafa00014, 0x8fa6001c, 0x24050008, 0x0e000060, 0x00003821, 0x0a0001df,
5096         0x00000000, 0xaf825cf8, 0x3c020800, 0x8c420a40, 0x8fa3001c, 0x24420001,
5097         0xaf835cf8, 0x3c010800, 0xac220a40, 0x8fbf0024, 0x8fb00020, 0x03e00008,
5098         0x27bd0028, 0x27bdffe0, 0x3c040800, 0x248409e8, 0x00002821, 0x00003021,
5099         0x00003821, 0xafbf0018, 0xafa00010, 0x0e000060, 0xafa00014, 0x8fbf0018,
5100         0x03e00008, 0x27bd0020, 0x8f82680c, 0x8f85680c, 0x00021827, 0x0003182b,
5101         0x00031823, 0x00431024, 0x00441021, 0x00a2282b, 0x10a00006, 0x00000000,
5102         0x00401821, 0x8f82680c, 0x0043102b, 0x1440fffd, 0x00000000, 0x03e00008,
5103         0x00000000, 0x3c040800, 0x8c840000, 0x3c030800, 0x8c630a40, 0x0064102b,
5104         0x54400002, 0x00831023, 0x00641023, 0x2c420008, 0x03e00008, 0x38420001,
5105         0x27bdffe0, 0x00802821, 0x3c040800, 0x24840a00, 0x00003021, 0x00003821,
5106         0xafbf0018, 0xafa00010, 0x0e000060, 0xafa00014, 0x0a000216, 0x00000000,
5107         0x8fbf0018, 0x03e00008, 0x27bd0020, 0x00000000, 0x27bdffe0, 0x3c1cc000,
5108         0xafbf0018, 0x0e00004c, 0xaf80680c, 0x3c040800, 0x24840a10, 0x03802821,
5109         0x00003021, 0x00003821, 0xafa00010, 0x0e000060, 0xafa00014, 0x2402ffff,
5110         0xaf825404, 0x3c0200aa, 0x0e000234, 0xaf825434, 0x8fbf0018, 0x03e00008,
5111         0x27bd0020, 0x00000000, 0x00000000, 0x00000000, 0x27bdffe8, 0xafb00010,
5112         0x24100001, 0xafbf0014, 0x3c01c003, 0xac200000, 0x8f826810, 0x30422000,
5113         0x10400003, 0x00000000, 0x0e000246, 0x00000000, 0x0a00023a, 0xaf905428,
5114         0x8fbf0014, 0x8fb00010, 0x03e00008, 0x27bd0018, 0x27bdfff8, 0x8f845d0c,
5115         0x3c0200ff, 0x3c030800, 0x8c630a50, 0x3442fff8, 0x00821024, 0x1043001e,
5116         0x3c0500ff, 0x34a5fff8, 0x3c06c003, 0x3c074000, 0x00851824, 0x8c620010,
5117         0x3c010800, 0xac230a50, 0x30420008, 0x10400005, 0x00871025, 0x8cc20000,
5118         0x24420001, 0xacc20000, 0x00871025, 0xaf825d0c, 0x8fa20000, 0x24420001,
5119         0xafa20000, 0x8fa20000, 0x8fa20000, 0x24420001, 0xafa20000, 0x8fa20000,
5120         0x8f845d0c, 0x3c030800, 0x8c630a50, 0x00851024, 0x1443ffe8, 0x00851824,
5121         0x27bd0008, 0x03e00008, 0x00000000, 0x00000000, 0x00000000
5122 };
5123
5124 static const u32 tg3FwRodata[(TG3_FW_RODATA_LEN / sizeof(u32)) + 1] = {
5125         0x35373031, 0x726c7341, 0x00000000, 0x00000000, 0x53774576, 0x656e7430,
5126         0x00000000, 0x726c7045, 0x76656e74, 0x31000000, 0x556e6b6e, 0x45766e74,
5127         0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x66617461, 0x6c457272,
5128         0x00000000, 0x00000000, 0x4d61696e, 0x43707542, 0x00000000, 0x00000000,
5129         0x00000000
5130 };
5131
5132 #if 0 /* All zeros, don't eat up space with it. */
5133 u32 tg3FwData[(TG3_FW_DATA_LEN / sizeof(u32)) + 1] = {
5134         0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
5135         0x00000000, 0x00000000, 0x00000000, 0x00000000
5136 };
5137 #endif
5138
5139 #define RX_CPU_SCRATCH_BASE     0x30000
5140 #define RX_CPU_SCRATCH_SIZE     0x04000
5141 #define TX_CPU_SCRATCH_BASE     0x34000
5142 #define TX_CPU_SCRATCH_SIZE     0x04000
5143
5144 /* tp->lock is held. */
5145 static int tg3_halt_cpu(struct tg3 *tp, u32 offset)
5146 {
5147         int i;
5148
5149         BUG_ON(offset == TX_CPU_BASE &&
5150             (tp->tg3_flags2 & TG3_FLG2_5705_PLUS));
5151
5152         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
5153                 u32 val = tr32(GRC_VCPU_EXT_CTRL);
5154
5155                 tw32(GRC_VCPU_EXT_CTRL, val | GRC_VCPU_EXT_CTRL_HALT_CPU);
5156                 return 0;
5157         }
5158         if (offset == RX_CPU_BASE) {
5159                 for (i = 0; i < 10000; i++) {
5160                         tw32(offset + CPU_STATE, 0xffffffff);
5161                         tw32(offset + CPU_MODE,  CPU_MODE_HALT);
5162                         if (tr32(offset + CPU_MODE) & CPU_MODE_HALT)
5163                                 break;
5164                 }
5165
5166                 tw32(offset + CPU_STATE, 0xffffffff);
5167                 tw32_f(offset + CPU_MODE,  CPU_MODE_HALT);
5168                 udelay(10);
5169         } else {
5170                 for (i = 0; i < 10000; i++) {
5171                         tw32(offset + CPU_STATE, 0xffffffff);
5172                         tw32(offset + CPU_MODE,  CPU_MODE_HALT);
5173                         if (tr32(offset + CPU_MODE) & CPU_MODE_HALT)
5174                                 break;
5175                 }
5176         }
5177
5178         if (i >= 10000) {
5179                 printk(KERN_ERR PFX "tg3_reset_cpu timed out for %s, "
5180                        "and %s CPU\n",
5181                        tp->dev->name,
5182                        (offset == RX_CPU_BASE ? "RX" : "TX"));
5183                 return -ENODEV;
5184         }
5185
5186         /* Clear firmware's nvram arbitration. */
5187         if (tp->tg3_flags & TG3_FLAG_NVRAM)
5188                 tw32(NVRAM_SWARB, SWARB_REQ_CLR0);
5189         return 0;
5190 }
5191
5192 struct fw_info {
5193         unsigned int text_base;
5194         unsigned int text_len;
5195         const u32 *text_data;
5196         unsigned int rodata_base;
5197         unsigned int rodata_len;
5198         const u32 *rodata_data;
5199         unsigned int data_base;
5200         unsigned int data_len;
5201         const u32 *data_data;
5202 };
5203
5204 /* tp->lock is held. */
5205 static int tg3_load_firmware_cpu(struct tg3 *tp, u32 cpu_base, u32 cpu_scratch_base,
5206                                  int cpu_scratch_size, struct fw_info *info)
5207 {
5208         int err, lock_err, i;
5209         void (*write_op)(struct tg3 *, u32, u32);
5210
5211         if (cpu_base == TX_CPU_BASE &&
5212             (tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
5213                 printk(KERN_ERR PFX "tg3_load_firmware_cpu: Trying to load "
5214                        "TX cpu firmware on %s which is 5705.\n",
5215                        tp->dev->name);
5216                 return -EINVAL;
5217         }
5218
5219         if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS)
5220                 write_op = tg3_write_mem;
5221         else
5222                 write_op = tg3_write_indirect_reg32;
5223
5224         /* It is possible that bootcode is still loading at this point.
5225          * Get the nvram lock first before halting the cpu.
5226          */
5227         lock_err = tg3_nvram_lock(tp);
5228         err = tg3_halt_cpu(tp, cpu_base);
5229         if (!lock_err)
5230                 tg3_nvram_unlock(tp);
5231         if (err)
5232                 goto out;
5233
5234         for (i = 0; i < cpu_scratch_size; i += sizeof(u32))
5235                 write_op(tp, cpu_scratch_base + i, 0);
5236         tw32(cpu_base + CPU_STATE, 0xffffffff);
5237         tw32(cpu_base + CPU_MODE, tr32(cpu_base+CPU_MODE)|CPU_MODE_HALT);
5238         for (i = 0; i < (info->text_len / sizeof(u32)); i++)
5239                 write_op(tp, (cpu_scratch_base +
5240                               (info->text_base & 0xffff) +
5241                               (i * sizeof(u32))),
5242                          (info->text_data ?
5243                           info->text_data[i] : 0));
5244         for (i = 0; i < (info->rodata_len / sizeof(u32)); i++)
5245                 write_op(tp, (cpu_scratch_base +
5246                               (info->rodata_base & 0xffff) +
5247                               (i * sizeof(u32))),
5248                          (info->rodata_data ?
5249                           info->rodata_data[i] : 0));
5250         for (i = 0; i < (info->data_len / sizeof(u32)); i++)
5251                 write_op(tp, (cpu_scratch_base +
5252                               (info->data_base & 0xffff) +
5253                               (i * sizeof(u32))),
5254                          (info->data_data ?
5255                           info->data_data[i] : 0));
5256
5257         err = 0;
5258
5259 out:
5260         return err;
5261 }
5262
5263 /* tp->lock is held. */
5264 static int tg3_load_5701_a0_firmware_fix(struct tg3 *tp)
5265 {
5266         struct fw_info info;
5267         int err, i;
5268
5269         info.text_base = TG3_FW_TEXT_ADDR;
5270         info.text_len = TG3_FW_TEXT_LEN;
5271         info.text_data = &tg3FwText[0];
5272         info.rodata_base = TG3_FW_RODATA_ADDR;
5273         info.rodata_len = TG3_FW_RODATA_LEN;
5274         info.rodata_data = &tg3FwRodata[0];
5275         info.data_base = TG3_FW_DATA_ADDR;
5276         info.data_len = TG3_FW_DATA_LEN;
5277         info.data_data = NULL;
5278
5279         err = tg3_load_firmware_cpu(tp, RX_CPU_BASE,
5280                                     RX_CPU_SCRATCH_BASE, RX_CPU_SCRATCH_SIZE,
5281                                     &info);
5282         if (err)
5283                 return err;
5284
5285         err = tg3_load_firmware_cpu(tp, TX_CPU_BASE,
5286                                     TX_CPU_SCRATCH_BASE, TX_CPU_SCRATCH_SIZE,
5287                                     &info);
5288         if (err)
5289                 return err;
5290
5291         /* Now startup only the RX cpu. */
5292         tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
5293         tw32_f(RX_CPU_BASE + CPU_PC,    TG3_FW_TEXT_ADDR);
5294
5295         for (i = 0; i < 5; i++) {
5296                 if (tr32(RX_CPU_BASE + CPU_PC) == TG3_FW_TEXT_ADDR)
5297                         break;
5298                 tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
5299                 tw32(RX_CPU_BASE + CPU_MODE,  CPU_MODE_HALT);
5300                 tw32_f(RX_CPU_BASE + CPU_PC,    TG3_FW_TEXT_ADDR);
5301                 udelay(1000);
5302         }
5303         if (i >= 5) {
5304                 printk(KERN_ERR PFX "tg3_load_firmware fails for %s "
5305                        "to set RX CPU PC, is %08x should be %08x\n",
5306                        tp->dev->name, tr32(RX_CPU_BASE + CPU_PC),
5307                        TG3_FW_TEXT_ADDR);
5308                 return -ENODEV;
5309         }
5310         tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
5311         tw32_f(RX_CPU_BASE + CPU_MODE,  0x00000000);
5312
5313         return 0;
5314 }
5315
5316
5317 #define TG3_TSO_FW_RELEASE_MAJOR        0x1
5318 #define TG3_TSO_FW_RELASE_MINOR         0x6
5319 #define TG3_TSO_FW_RELEASE_FIX          0x0
5320 #define TG3_TSO_FW_START_ADDR           0x08000000
5321 #define TG3_TSO_FW_TEXT_ADDR            0x08000000
5322 #define TG3_TSO_FW_TEXT_LEN             0x1aa0
5323 #define TG3_TSO_FW_RODATA_ADDR          0x08001aa0
5324 #define TG3_TSO_FW_RODATA_LEN           0x60
5325 #define TG3_TSO_FW_DATA_ADDR            0x08001b20
5326 #define TG3_TSO_FW_DATA_LEN             0x30
5327 #define TG3_TSO_FW_SBSS_ADDR            0x08001b50
5328 #define TG3_TSO_FW_SBSS_LEN             0x2c
5329 #define TG3_TSO_FW_BSS_ADDR             0x08001b80
5330 #define TG3_TSO_FW_BSS_LEN              0x894
5331
5332 static const u32 tg3TsoFwText[(TG3_TSO_FW_TEXT_LEN / 4) + 1] = {
5333         0x0e000003, 0x00000000, 0x08001b24, 0x00000000, 0x10000003, 0x00000000,
5334         0x0000000d, 0x0000000d, 0x3c1d0800, 0x37bd4000, 0x03a0f021, 0x3c100800,
5335         0x26100000, 0x0e000010, 0x00000000, 0x0000000d, 0x27bdffe0, 0x3c04fefe,
5336         0xafbf0018, 0x0e0005d8, 0x34840002, 0x0e000668, 0x00000000, 0x3c030800,
5337         0x90631b68, 0x24020002, 0x3c040800, 0x24841aac, 0x14620003, 0x24050001,
5338         0x3c040800, 0x24841aa0, 0x24060006, 0x00003821, 0xafa00010, 0x0e00067c,
5339         0xafa00014, 0x8f625c50, 0x34420001, 0xaf625c50, 0x8f625c90, 0x34420001,
5340         0xaf625c90, 0x2402ffff, 0x0e000034, 0xaf625404, 0x8fbf0018, 0x03e00008,
5341         0x27bd0020, 0x00000000, 0x00000000, 0x00000000, 0x27bdffe0, 0xafbf001c,
5342         0xafb20018, 0xafb10014, 0x0e00005b, 0xafb00010, 0x24120002, 0x24110001,
5343         0x8f706820, 0x32020100, 0x10400003, 0x00000000, 0x0e0000bb, 0x00000000,
5344         0x8f706820, 0x32022000, 0x10400004, 0x32020001, 0x0e0001f0, 0x24040001,
5345         0x32020001, 0x10400003, 0x00000000, 0x0e0000a3, 0x00000000, 0x3c020800,
5346         0x90421b98, 0x14520003, 0x00000000, 0x0e0004c0, 0x00000000, 0x0a00003c,
5347         0xaf715028, 0x8fbf001c, 0x8fb20018, 0x8fb10014, 0x8fb00010, 0x03e00008,
5348         0x27bd0020, 0x27bdffe0, 0x3c040800, 0x24841ac0, 0x00002821, 0x00003021,
5349         0x00003821, 0xafbf0018, 0xafa00010, 0x0e00067c, 0xafa00014, 0x3c040800,
5350         0x248423d8, 0xa4800000, 0x3c010800, 0xa0201b98, 0x3c010800, 0xac201b9c,
5351         0x3c010800, 0xac201ba0, 0x3c010800, 0xac201ba4, 0x3c010800, 0xac201bac,
5352         0x3c010800, 0xac201bb8, 0x3c010800, 0xac201bbc, 0x8f624434, 0x3c010800,
5353         0xac221b88, 0x8f624438, 0x3c010800, 0xac221b8c, 0x8f624410, 0xac80f7a8,
5354         0x3c010800, 0xac201b84, 0x3c010800, 0xac2023e0, 0x3c010800, 0xac2023c8,
5355         0x3c010800, 0xac2023cc, 0x3c010800, 0xac202400, 0x3c010800, 0xac221b90,
5356         0x8f620068, 0x24030007, 0x00021702, 0x10430005, 0x00000000, 0x8f620068,
5357         0x00021702, 0x14400004, 0x24020001, 0x3c010800, 0x0a000097, 0xac20240c,
5358         0xac820034, 0x3c040800, 0x24841acc, 0x3c050800, 0x8ca5240c, 0x00003021,
5359         0x00003821, 0xafa00010, 0x0e00067c, 0xafa00014, 0x8fbf0018, 0x03e00008,
5360         0x27bd0020, 0x27bdffe0, 0x3c040800, 0x24841ad8, 0x00002821, 0x00003021,
5361         0x00003821, 0xafbf0018, 0xafa00010, 0x0e00067c, 0xafa00014, 0x0e00005b,
5362         0x00000000, 0x0e0000b4, 0x00002021, 0x8fbf0018, 0x03e00008, 0x27bd0020,
5363         0x24020001, 0x8f636820, 0x00821004, 0x00021027, 0x00621824, 0x03e00008,
5364         0xaf636820, 0x27bdffd0, 0xafbf002c, 0xafb60028, 0xafb50024, 0xafb40020,
5365         0xafb3001c, 0xafb20018, 0xafb10014, 0xafb00010, 0x8f675c5c, 0x3c030800,
5366         0x24631bbc, 0x8c620000, 0x14470005, 0x3c0200ff, 0x3c020800, 0x90421b98,
5367         0x14400119, 0x3c0200ff, 0x3442fff8, 0x00e28824, 0xac670000, 0x00111902,
5368         0x306300ff, 0x30e20003, 0x000211c0, 0x00622825, 0x00a04021, 0x00071602,
5369         0x3c030800, 0x90631b98, 0x3044000f, 0x14600036, 0x00804821, 0x24020001,
5370         0x3c010800, 0xa0221b98, 0x00051100, 0x00821025, 0x3c010800, 0xac201b9c,
5371         0x3c010800, 0xac201ba0, 0x3c010800, 0xac201ba4, 0x3c010800, 0xac201bac,
5372         0x3c010800, 0xac201bb8, 0x3c010800, 0xac201bb0, 0x3c010800, 0xac201bb4,
5373         0x3c010800, 0xa42223d8, 0x9622000c, 0x30437fff, 0x3c010800, 0xa4222410,
5374         0x30428000, 0x3c010800, 0xa4231bc6, 0x10400005, 0x24020001, 0x3c010800,
5375         0xac2223f4, 0x0a000102, 0x2406003e, 0x24060036, 0x3c010800, 0xac2023f4,
5376         0x9622000a, 0x3c030800, 0x94631bc6, 0x3c010800, 0xac2023f0, 0x3c010800,
5377         0xac2023f8, 0x00021302, 0x00021080, 0x00c21021, 0x00621821, 0x3c010800,
5378         0xa42223d0, 0x3c010800, 0x0a000115, 0xa4231b96, 0x9622000c, 0x3c010800,
5379         0xa42223ec, 0x3c040800, 0x24841b9c, 0x8c820000, 0x00021100, 0x3c010800,
5380         0x00220821, 0xac311bc8, 0x8c820000, 0x00021100, 0x3c010800, 0x00220821,
5381         0xac271bcc, 0x8c820000, 0x25030001, 0x306601ff, 0x00021100, 0x3c010800,
5382         0x00220821, 0xac261bd0, 0x8c820000, 0x00021100, 0x3c010800, 0x00220821,
5383         0xac291bd4, 0x96230008, 0x3c020800, 0x8c421bac, 0x00432821, 0x3c010800,
5384         0xac251bac, 0x9622000a, 0x30420004, 0x14400018, 0x00061100, 0x8f630c14,
5385         0x3063000f, 0x2c620002, 0x1440000b, 0x3c02c000, 0x8f630c14, 0x3c020800,
5386         0x8c421b40, 0x3063000f, 0x24420001, 0x3c010800, 0xac221b40, 0x2c620002,
5387         0x1040fff7, 0x3c02c000, 0x00e21825, 0xaf635c5c, 0x8f625c50, 0x30420002,
5388         0x10400014, 0x00000000, 0x0a000147, 0x00000000, 0x3c030800, 0x8c631b80,
5389         0x3c040800, 0x94841b94, 0x01221025, 0x3c010800, 0xa42223da, 0x24020001,
5390         0x3c010800, 0xac221bb8, 0x24630001, 0x0085202a, 0x3c010800, 0x10800003,
5391         0xac231b80, 0x3c010800, 0xa4251b94, 0x3c060800, 0x24c61b9c, 0x8cc20000,
5392         0x24420001, 0xacc20000, 0x28420080, 0x14400005, 0x00000000, 0x0e000656,
5393         0x24040002, 0x0a0001e6, 0x00000000, 0x3c020800, 0x8c421bb8, 0x10400078,
5394         0x24020001, 0x3c050800, 0x90a51b98, 0x14a20072, 0x00000000, 0x3c150800,
5395         0x96b51b96, 0x3c040800, 0x8c841bac, 0x32a3ffff, 0x0083102a, 0x1440006c,
5396         0x00000000, 0x14830003, 0x00000000, 0x3c010800, 0xac2523f0, 0x1060005c,
5397         0x00009021, 0x24d60004, 0x0060a021, 0x24d30014, 0x8ec20000, 0x00028100,
5398         0x3c110800, 0x02308821, 0x0e000625, 0x8e311bc8, 0x00402821, 0x10a00054,
5399         0x00000000, 0x9628000a, 0x31020040, 0x10400005, 0x2407180c, 0x8e22000c,
5400         0x2407188c, 0x00021400, 0xaca20018, 0x3c030800, 0x00701821, 0x8c631bd0,
5401         0x3c020800, 0x00501021, 0x8c421bd4, 0x00031d00, 0x00021400, 0x00621825,
5402         0xaca30014, 0x8ec30004, 0x96220008, 0x00432023, 0x3242ffff, 0x3083ffff,
5403         0x00431021, 0x0282102a, 0x14400002, 0x02b23023, 0x00803021, 0x8e620000,
5404         0x30c4ffff, 0x00441021, 0xae620000, 0x8e220000, 0xaca20000, 0x8e220004,
5405         0x8e63fff4, 0x00431021, 0xaca20004, 0xa4a6000e, 0x8e62fff4, 0x00441021,
5406         0xae62fff4, 0x96230008, 0x0043102a, 0x14400005, 0x02469021, 0x8e62fff0,
5407         0xae60fff4, 0x24420001, 0xae62fff0, 0xaca00008, 0x3242ffff, 0x14540008,
5408         0x24020305, 0x31020080, 0x54400001, 0x34e70010, 0x24020905, 0xa4a2000c,
5409         0x0a0001cb, 0x34e70020, 0xa4a2000c, 0x3c020800, 0x8c4223f0, 0x10400003,
5410         0x3c024b65, 0x0a0001d3, 0x34427654, 0x3c02b49a, 0x344289ab, 0xaca2001c,
5411         0x30e2ffff, 0xaca20010, 0x0e0005a2, 0x00a02021, 0x3242ffff, 0x0054102b,
5412         0x1440ffa9, 0x00000000, 0x24020002, 0x3c010800, 0x0a0001e6, 0xa0221b98,
5413         0x8ec2083c, 0x24420001, 0x0a0001e6, 0xaec2083c, 0x0e0004c0, 0x00000000,
5414         0x8fbf002c, 0x8fb60028, 0x8fb50024, 0x8fb40020, 0x8fb3001c, 0x8fb20018,
5415         0x8fb10014, 0x8fb00010, 0x03e00008, 0x27bd0030, 0x27bdffd0, 0xafbf0028,
5416         0xafb30024, 0xafb20020, 0xafb1001c, 0xafb00018, 0x8f725c9c, 0x3c0200ff,
5417         0x3442fff8, 0x3c070800, 0x24e71bb4, 0x02428824, 0x9623000e, 0x8ce20000,
5418         0x00431021, 0xace20000, 0x8e220010, 0x30420020, 0x14400011, 0x00809821,
5419         0x0e00063b, 0x02202021, 0x3c02c000, 0x02421825, 0xaf635c9c, 0x8f625c90,
5420         0x30420002, 0x1040011e, 0x00000000, 0xaf635c9c, 0x8f625c90, 0x30420002,
5421         0x10400119, 0x00000000, 0x0a00020d, 0x00000000, 0x8e240008, 0x8e230014,
5422         0x00041402, 0x000231c0, 0x00031502, 0x304201ff, 0x2442ffff, 0x3042007f,
5423         0x00031942, 0x30637800, 0x00021100, 0x24424000, 0x00624821, 0x9522000a,
5424         0x3084ffff, 0x30420008, 0x104000b0, 0x000429c0, 0x3c020800, 0x8c422400,
5425         0x14400024, 0x24c50008, 0x94c20014, 0x3c010800, 0xa42223d0, 0x8cc40010,
5426         0x00041402, 0x3c010800, 0xa42223d2, 0x3c010800, 0xa42423d4, 0x94c2000e,
5427         0x3083ffff, 0x00431023, 0x3c010800, 0xac222408, 0x94c2001a, 0x3c010800,
5428         0xac262400, 0x3c010800, 0xac322404, 0x3c010800, 0xac2223fc, 0x3c02c000,
5429         0x02421825, 0xaf635c9c, 0x8f625c90, 0x30420002, 0x104000e5, 0x00000000,
5430         0xaf635c9c, 0x8f625c90, 0x30420002, 0x104000e0, 0x00000000, 0x0a000246,
5431         0x00000000, 0x94c2000e, 0x3c030800, 0x946323d4, 0x00434023, 0x3103ffff,
5432         0x2c620008, 0x1040001c, 0x00000000, 0x94c20014, 0x24420028, 0x00a22821,
5433         0x00031042, 0x1840000b, 0x00002021, 0x24e60848, 0x00403821, 0x94a30000,
5434         0x8cc20000, 0x24840001, 0x00431021, 0xacc20000, 0x0087102a, 0x1440fff9,
5435         0x24a50002, 0x31020001, 0x1040001f, 0x3c024000, 0x3c040800, 0x248423fc,
5436         0xa0a00001, 0x94a30000, 0x8c820000, 0x00431021, 0x0a000285, 0xac820000,
5437         0x8f626800, 0x3c030010, 0x00431024, 0x10400009, 0x00000000, 0x94c2001a,
5438         0x3c030800, 0x8c6323fc, 0x00431021, 0x3c010800, 0xac2223fc, 0x0a000286,
5439         0x3c024000, 0x94c2001a, 0x94c4001c, 0x3c030800, 0x8c6323fc, 0x00441023,
5440         0x00621821, 0x3c010800, 0xac2323fc, 0x3c024000, 0x02421825, 0xaf635c9c,
5441         0x8f625c90, 0x30420002, 0x1440fffc, 0x00000000, 0x9522000a, 0x30420010,
5442         0x1040009b, 0x00000000, 0x3c030800, 0x946323d4, 0x3c070800, 0x24e72400,
5443         0x8ce40000, 0x8f626800, 0x24630030, 0x00832821, 0x3c030010, 0x00431024,
5444         0x1440000a, 0x00000000, 0x94a20004, 0x3c040800, 0x8c842408, 0x3c030800,
5445         0x8c6323fc, 0x00441023, 0x00621821, 0x3c010800, 0xac2323fc, 0x3c040800,
5446         0x8c8423fc, 0x00041c02, 0x3082ffff, 0x00622021, 0x00041402, 0x00822021,
5447         0x00041027, 0xa4a20006, 0x3c030800, 0x8c632404, 0x3c0200ff, 0x3442fff8,
5448         0x00628824, 0x96220008, 0x24050001, 0x24034000, 0x000231c0, 0x00801021,
5449         0xa4c2001a, 0xa4c0001c, 0xace00000, 0x3c010800, 0xac251b60, 0xaf635cb8,
5450         0x8f625cb0, 0x30420002, 0x10400003, 0x00000000, 0x3c010800, 0xac201b60,
5451         0x8e220008, 0xaf625cb8, 0x8f625cb0, 0x30420002, 0x10400003, 0x00000000,
5452         0x3c010800, 0xac201b60, 0x3c020800, 0x8c421b60, 0x1040ffec, 0x00000000,
5453         0x3c040800, 0x0e00063b, 0x8c842404, 0x0a00032a, 0x00000000, 0x3c030800,
5454         0x90631b98, 0x24020002, 0x14620003, 0x3c034b65, 0x0a0002e1, 0x00008021,
5455         0x8e22001c, 0x34637654, 0x10430002, 0x24100002, 0x24100001, 0x00c02021,
5456         0x0e000350, 0x02003021, 0x24020003, 0x3c010800, 0xa0221b98, 0x24020002,
5457         0x1202000a, 0x24020001, 0x3c030800, 0x8c6323f0, 0x10620006, 0x00000000,
5458         0x3c020800, 0x944223d8, 0x00021400, 0x0a00031f, 0xae220014, 0x3c040800,
5459         0x248423da, 0x94820000, 0x00021400, 0xae220014, 0x3c020800, 0x8c421bbc,
5460         0x3c03c000, 0x3c010800, 0xa0201b98, 0x00431025, 0xaf625c5c, 0x8f625c50,
5461         0x30420002, 0x10400009, 0x00000000, 0x2484f7e2, 0x8c820000, 0x00431025,
5462         0xaf625c5c, 0x8f625c50, 0x30420002, 0x1440fffa, 0x00000000, 0x3c020800,
5463         0x24421b84, 0x8c430000, 0x24630001, 0xac430000, 0x8f630c14, 0x3063000f,
5464         0x2c620002, 0x1440000c, 0x3c024000, 0x8f630c14, 0x3c020800, 0x8c421b40,
5465         0x3063000f, 0x24420001, 0x3c010800, 0xac221b40, 0x2c620002, 0x1040fff7,
5466         0x00000000, 0x3c024000, 0x02421825, 0xaf635c9c, 0x8f625c90, 0x30420002,
5467         0x1440fffc, 0x00000000, 0x12600003, 0x00000000, 0x0e0004c0, 0x00000000,
5468         0x8fbf0028, 0x8fb30024, 0x8fb20020, 0x8fb1001c, 0x8fb00018, 0x03e00008,
5469         0x27bd0030, 0x8f634450, 0x3c040800, 0x24841b88, 0x8c820000, 0x00031c02,
5470         0x0043102b, 0x14400007, 0x3c038000, 0x8c840004, 0x8f624450, 0x00021c02,
5471         0x0083102b, 0x1040fffc, 0x3c038000, 0xaf634444, 0x8f624444, 0x00431024,
5472         0x1440fffd, 0x00000000, 0x8f624448, 0x03e00008, 0x3042ffff, 0x3c024000,
5473         0x00822025, 0xaf645c38, 0x8f625c30, 0x30420002, 0x1440fffc, 0x00000000,
5474         0x03e00008, 0x00000000, 0x27bdffe0, 0x00805821, 0x14c00011, 0x256e0008,
5475         0x3c020800, 0x8c4223f4, 0x10400007, 0x24020016, 0x3c010800, 0xa42223d2,
5476         0x2402002a, 0x3c010800, 0x0a000364, 0xa42223d4, 0x8d670010, 0x00071402,
5477         0x3c010800, 0xa42223d2, 0x3c010800, 0xa42723d4, 0x3c040800, 0x948423d4,
5478         0x3c030800, 0x946323d2, 0x95cf0006, 0x3c020800, 0x944223d0, 0x00832023,
5479         0x01e2c023, 0x3065ffff, 0x24a20028, 0x01c24821, 0x3082ffff, 0x14c0001a,
5480         0x01226021, 0x9582000c, 0x3042003f, 0x3c010800, 0xa42223d6, 0x95820004,
5481         0x95830006, 0x3c010800, 0xac2023e4, 0x3c010800, 0xac2023e8, 0x00021400,
5482         0x00431025, 0x3c010800, 0xac221bc0, 0x95220004, 0x3c010800, 0xa4221bc4,
5483         0x95230002, 0x01e51023, 0x0043102a, 0x10400010, 0x24020001, 0x3c010800,
5484         0x0a000398, 0xac2223f8, 0x3c030800, 0x8c6323e8, 0x3c020800, 0x94421bc4,
5485         0x00431021, 0xa5220004, 0x3c020800, 0x94421bc0, 0xa5820004, 0x3c020800,
5486         0x8c421bc0, 0xa5820006, 0x3c020800, 0x8c4223f0, 0x3c0d0800, 0x8dad23e4,
5487         0x3c0a0800, 0x144000e5, 0x8d4a23e8, 0x3c020800, 0x94421bc4, 0x004a1821,
5488         0x3063ffff, 0x0062182b, 0x24020002, 0x10c2000d, 0x01435023, 0x3c020800,
5489         0x944223d6, 0x30420009, 0x10400008, 0x00000000, 0x9582000c, 0x3042fff6,
5490         0xa582000c, 0x3c020800, 0x944223d6, 0x30420009, 0x01a26823, 0x3c020800,
5491         0x8c4223f8, 0x1040004a, 0x01203821, 0x3c020800, 0x944223d2, 0x00004021,
5492         0xa520000a, 0x01e21023, 0xa5220002, 0x3082ffff, 0x00021042, 0x18400008,
5493         0x00003021, 0x00401821, 0x94e20000, 0x25080001, 0x00c23021, 0x0103102a,
5494         0x1440fffb, 0x24e70002, 0x00061c02, 0x30c2ffff, 0x00623021, 0x00061402,
5495         0x00c23021, 0x00c02821, 0x00061027, 0xa522000a, 0x00003021, 0x2527000c,
5496         0x00004021, 0x94e20000, 0x25080001, 0x00c23021, 0x2d020004, 0x1440fffb,
5497         0x24e70002, 0x95220002, 0x00004021, 0x91230009, 0x00442023, 0x01803821,
5498         0x3082ffff, 0xa4e00010, 0x00621821, 0x00021042, 0x18400010, 0x00c33021,
5499         0x00404821, 0x94e20000, 0x24e70002, 0x00c23021, 0x30e2007f, 0x14400006,
5500         0x25080001, 0x8d630000, 0x3c02007f, 0x3442ff80, 0x00625824, 0x25670008,
5501         0x0109102a, 0x1440fff3, 0x00000000, 0x30820001, 0x10400005, 0x00061c02,
5502         0xa0e00001, 0x94e20000, 0x00c23021, 0x00061c02, 0x30c2ffff, 0x00623021,
5503         0x00061402, 0x00c23021, 0x0a00047d, 0x30c6ffff, 0x24020002, 0x14c20081,
5504         0x00000000, 0x3c020800, 0x8c42240c, 0x14400007, 0x00000000, 0x3c020800,
5505         0x944223d2, 0x95230002, 0x01e21023, 0x10620077, 0x00000000, 0x3c020800,
5506         0x944223d2, 0x01e21023, 0xa5220002, 0x3c020800, 0x8c42240c, 0x1040001a,
5507         0x31e3ffff, 0x8dc70010, 0x3c020800, 0x94421b96, 0x00e04021, 0x00072c02,
5508         0x00aa2021, 0x00431023, 0x00823823, 0x00072402, 0x30e2ffff, 0x00823821,
5509         0x00071027, 0xa522000a, 0x3102ffff, 0x3c040800, 0x948423d4, 0x00453023,
5510         0x00e02821, 0x00641823, 0x006d1821, 0x00c33021, 0x00061c02, 0x30c2ffff,
5511         0x0a00047d, 0x00623021, 0x01203821, 0x00004021, 0x3082ffff, 0x00021042,
5512         0x18400008, 0x00003021, 0x00401821, 0x94e20000, 0x25080001, 0x00c23021,
5513         0x0103102a, 0x1440fffb, 0x24e70002, 0x00061c02, 0x30c2ffff, 0x00623021,
5514         0x00061402, 0x00c23021, 0x00c02821, 0x00061027, 0xa522000a, 0x00003021,
5515         0x2527000c, 0x00004021, 0x94e20000, 0x25080001, 0x00c23021, 0x2d020004,
5516         0x1440fffb, 0x24e70002, 0x95220002, 0x00004021, 0x91230009, 0x00442023,
5517         0x01803821, 0x3082ffff, 0xa4e00010, 0x3c040800, 0x948423d4, 0x00621821,
5518         0x00c33021, 0x00061c02, 0x30c2ffff, 0x00623021, 0x00061c02, 0x3c020800,
5519         0x944223d0, 0x00c34821, 0x00441023, 0x00021fc2, 0x00431021, 0x00021043,
5520         0x18400010, 0x00003021, 0x00402021, 0x94e20000, 0x24e70002, 0x00c23021,
5521         0x30e2007f, 0x14400006, 0x25080001, 0x8d630000, 0x3c02007f, 0x3442ff80,
5522         0x00625824, 0x25670008, 0x0104102a, 0x1440fff3, 0x00000000, 0x3c020800,
5523         0x944223ec, 0x00c23021, 0x3122ffff, 0x00c23021, 0x00061c02, 0x30c2ffff,
5524         0x00623021, 0x00061402, 0x00c23021, 0x00c04021, 0x00061027, 0xa5820010,
5525         0xadc00014, 0x0a00049d, 0xadc00000, 0x8dc70010, 0x00e04021, 0x11400007,
5526         0x00072c02, 0x00aa3021, 0x00061402, 0x30c3ffff, 0x00433021, 0x00061402,
5527         0x00c22821, 0x00051027, 0xa522000a, 0x3c030800, 0x946323d4, 0x3102ffff,
5528         0x01e21021, 0x00433023, 0x00cd3021, 0x00061c02, 0x30c2ffff, 0x00623021,
5529         0x00061402, 0x00c23021, 0x00c04021, 0x00061027, 0xa5820010, 0x3102ffff,
5530         0x00051c00, 0x00431025, 0xadc20010, 0x3c020800, 0x8c4223f4, 0x10400005,
5531         0x2de205eb, 0x14400002, 0x25e2fff2, 0x34028870, 0xa5c20034, 0x3c030800,
5532         0x246323e8, 0x8c620000, 0x24420001, 0xac620000, 0x3c040800, 0x8c8423e4,
5533         0x3c020800, 0x8c421bc0, 0x3303ffff, 0x00832021, 0x00431821, 0x0062102b,
5534         0x3c010800, 0xac2423e4, 0x10400003, 0x2482ffff, 0x3c010800, 0xac2223e4,
5535         0x3c010800, 0xac231bc0, 0x03e00008, 0x27bd0020, 0x27bdffb8, 0x3c050800,
5536         0x24a51b96, 0xafbf0044, 0xafbe0040, 0xafb7003c, 0xafb60038, 0xafb50034,
5537         0xafb40030, 0xafb3002c, 0xafb20028, 0xafb10024, 0xafb00020, 0x94a90000,
5538         0x3c020800, 0x944223d0, 0x3c030800, 0x8c631bb0, 0x3c040800, 0x8c841bac,
5539         0x01221023, 0x0064182a, 0xa7a9001e, 0x106000be, 0xa7a20016, 0x24be0022,
5540         0x97b6001e, 0x24b3001a, 0x24b70016, 0x8fc20000, 0x14400008, 0x00000000,
5541         0x8fc2fff8, 0x97a30016, 0x8fc4fff4, 0x00431021, 0x0082202a, 0x148000b0,
5542         0x00000000, 0x97d50818, 0x32a2ffff, 0x104000a3, 0x00009021, 0x0040a021,
5543         0x00008821, 0x0e000625, 0x00000000, 0x00403021, 0x14c00007, 0x00000000,
5544         0x3c020800, 0x8c4223dc, 0x24420001, 0x3c010800, 0x0a000596, 0xac2223dc,
5545         0x3c100800, 0x02118021, 0x8e101bc8, 0x9608000a, 0x31020040, 0x10400005,
5546         0x2407180c, 0x8e02000c, 0x2407188c, 0x00021400, 0xacc20018, 0x31020080,
5547         0x54400001, 0x34e70010, 0x3c020800, 0x00511021, 0x8c421bd0, 0x3c030800,
5548         0x00711821, 0x8c631bd4, 0x00021500, 0x00031c00, 0x00431025, 0xacc20014,
5549         0x96040008, 0x3242ffff, 0x00821021, 0x0282102a, 0x14400002, 0x02b22823,
5550         0x00802821, 0x8e020000, 0x02459021, 0xacc20000, 0x8e020004, 0x00c02021,
5551         0x26310010, 0xac820004, 0x30e2ffff, 0xac800008, 0xa485000e, 0xac820010,
5552         0x24020305, 0x0e0005a2, 0xa482000c, 0x3242ffff, 0x0054102b, 0x1440ffc5,
5553         0x3242ffff, 0x0a00058e, 0x00000000, 0x8e620000, 0x8e63fffc, 0x0043102a,
5554         0x10400067, 0x00000000, 0x8e62fff0, 0x00028900, 0x3c100800, 0x02118021,
5555         0x0e000625, 0x8e101bc8, 0x00403021, 0x14c00005, 0x00000000, 0x8e62082c,
5556         0x24420001, 0x0a000596, 0xae62082c, 0x9608000a, 0x31020040, 0x10400005,
5557         0x2407180c, 0x8e02000c, 0x2407188c, 0x00021400, 0xacc20018, 0x3c020800,
5558         0x00511021, 0x8c421bd0, 0x3c030800, 0x00711821, 0x8c631bd4, 0x00021500,
5559         0x00031c00, 0x00431025, 0xacc20014, 0x8e63fff4, 0x96020008, 0x00432023,
5560         0x3242ffff, 0x3083ffff, 0x00431021, 0x02c2102a, 0x10400003, 0x00802821,
5561         0x97a9001e, 0x01322823, 0x8e620000, 0x30a4ffff, 0x00441021, 0xae620000,
5562         0xa4c5000e, 0x8e020000, 0xacc20000, 0x8e020004, 0x8e63fff4, 0x00431021,
5563         0xacc20004, 0x8e63fff4, 0x96020008, 0x00641821, 0x0062102a, 0x14400006,
5564         0x02459021, 0x8e62fff0, 0xae60fff4, 0x24420001, 0x0a000571, 0xae62fff0,
5565         0xae63fff4, 0xacc00008, 0x3242ffff, 0x10560003, 0x31020004, 0x10400006,
5566         0x24020305, 0x31020080, 0x54400001, 0x34e70010, 0x34e70020, 0x24020905,
5567         0xa4c2000c, 0x8ee30000, 0x8ee20004, 0x14620007, 0x3c02b49a, 0x8ee20860,
5568         0x54400001, 0x34e70400, 0x3c024b65, 0x0a000588, 0x34427654, 0x344289ab,
5569         0xacc2001c, 0x30e2ffff, 0xacc20010, 0x0e0005a2, 0x00c02021, 0x3242ffff,
5570         0x0056102b, 0x1440ff9b, 0x00000000, 0x8e620000, 0x8e63fffc, 0x0043102a,
5571         0x1440ff48, 0x00000000, 0x8fbf0044, 0x8fbe0040, 0x8fb7003c, 0x8fb60038,
5572         0x8fb50034, 0x8fb40030, 0x8fb3002c, 0x8fb20028, 0x8fb10024, 0x8fb00020,
5573         0x03e00008, 0x27bd0048, 0x27bdffe8, 0xafbf0014, 0xafb00010, 0x8f624450,
5574         0x8f634410, 0x0a0005b1, 0x00808021, 0x8f626820, 0x30422000, 0x10400003,
5575         0x00000000, 0x0e0001f0, 0x00002021, 0x8f624450, 0x8f634410, 0x3042ffff,
5576         0x0043102b, 0x1440fff5, 0x00000000, 0x8f630c14, 0x3063000f, 0x2c620002,
5577         0x1440000b, 0x00000000, 0x8f630c14, 0x3c020800, 0x8c421b40, 0x3063000f,
5578         0x24420001, 0x3c010800, 0xac221b40, 0x2c620002, 0x1040fff7, 0x00000000,
5579         0xaf705c18, 0x8f625c10, 0x30420002, 0x10400009, 0x00000000, 0x8f626820,
5580         0x30422000, 0x1040fff8, 0x00000000, 0x0e0001f0, 0x00002021, 0x0a0005c4,
5581         0x00000000, 0x8fbf0014, 0x8fb00010, 0x03e00008, 0x27bd0018, 0x00000000,
5582         0x00000000, 0x00000000, 0x27bdffe8, 0x3c1bc000, 0xafbf0014, 0xafb00010,
5583         0xaf60680c, 0x8f626804, 0x34420082, 0xaf626804, 0x8f634000, 0x24020b50,
5584         0x3c010800, 0xac221b54, 0x24020b78, 0x3c010800, 0xac221b64, 0x34630002,
5585         0xaf634000, 0x0e000605, 0x00808021, 0x3c010800, 0xa0221b68, 0x304200ff,
5586         0x24030002, 0x14430005, 0x00000000, 0x3c020800, 0x8c421b54, 0x0a0005f8,
5587         0xac5000c0, 0x3c020800, 0x8c421b54, 0xac5000bc, 0x8f624434, 0x8f634438,
5588         0x8f644410, 0x3c010800, 0xac221b5c, 0x3c010800, 0xac231b6c, 0x3c010800,
5589         0xac241b58, 0x8fbf0014, 0x8fb00010, 0x03e00008, 0x27bd0018, 0x3c040800,
5590         0x8c870000, 0x3c03aa55, 0x3463aa55, 0x3c06c003, 0xac830000, 0x8cc20000,
5591         0x14430007, 0x24050002, 0x3c0355aa, 0x346355aa, 0xac830000, 0x8cc20000,
5592         0x50430001, 0x24050001, 0x3c020800, 0xac470000, 0x03e00008, 0x00a01021,
5593         0x27bdfff8, 0x18800009, 0x00002821, 0x8f63680c, 0x8f62680c, 0x1043fffe,
5594         0x00000000, 0x24a50001, 0x00a4102a, 0x1440fff9, 0x00000000, 0x03e00008,
5595         0x27bd0008, 0x8f634450, 0x3c020800, 0x8c421b5c, 0x00031c02, 0x0043102b,
5596         0x14400008, 0x3c038000, 0x3c040800, 0x8c841b6c, 0x8f624450, 0x00021c02,
5597         0x0083102b, 0x1040fffc, 0x3c038000, 0xaf634444, 0x8f624444, 0x00431024,
5598         0x1440fffd, 0x00000000, 0x8f624448, 0x03e00008, 0x3042ffff, 0x3082ffff,
5599         0x2442e000, 0x2c422001, 0x14400003, 0x3c024000, 0x0a000648, 0x2402ffff,
5600         0x00822025, 0xaf645c38, 0x8f625c30, 0x30420002, 0x1440fffc, 0x00001021,
5601         0x03e00008, 0x00000000, 0x8f624450, 0x3c030800, 0x8c631b58, 0x0a000651,
5602         0x3042ffff, 0x8f624450, 0x3042ffff, 0x0043102b, 0x1440fffc, 0x00000000,
5603         0x03e00008, 0x00000000, 0x27bdffe0, 0x00802821, 0x3c040800, 0x24841af0,
5604         0x00003021, 0x00003821, 0xafbf0018, 0xafa00010, 0x0e00067c, 0xafa00014,
5605         0x0a000660, 0x00000000, 0x8fbf0018, 0x03e00008, 0x27bd0020, 0x00000000,
5606         0x00000000, 0x00000000, 0x3c020800, 0x34423000, 0x3c030800, 0x34633000,
5607         0x3c040800, 0x348437ff, 0x3c010800, 0xac221b74, 0x24020040, 0x3c010800,
5608         0xac221b78, 0x3c010800, 0xac201b70, 0xac600000, 0x24630004, 0x0083102b,
5609         0x5040fffd, 0xac600000, 0x03e00008, 0x00000000, 0x00804821, 0x8faa0010,
5610         0x3c020800, 0x8c421b70, 0x3c040800, 0x8c841b78, 0x8fab0014, 0x24430001,
5611         0x0044102b, 0x3c010800, 0xac231b70, 0x14400003, 0x00004021, 0x3c010800,
5612         0xac201b70, 0x3c020800, 0x8c421b70, 0x3c030800, 0x8c631b74, 0x91240000,
5613         0x00021140, 0x00431021, 0x00481021, 0x25080001, 0xa0440000, 0x29020008,
5614         0x1440fff4, 0x25290001, 0x3c020800, 0x8c421b70, 0x3c030800, 0x8c631b74,
5615         0x8f64680c, 0x00021140, 0x00431021, 0xac440008, 0xac45000c, 0xac460010,
5616         0xac470014, 0xac4a0018, 0x03e00008, 0xac4b001c, 0x00000000, 0x00000000,
5617 };
5618
5619 static const u32 tg3TsoFwRodata[] = {
5620         0x4d61696e, 0x43707542, 0x00000000, 0x4d61696e, 0x43707541, 0x00000000,
5621         0x00000000, 0x00000000, 0x73746b6f, 0x66666c64, 0x496e0000, 0x73746b6f,
5622         0x66662a2a, 0x00000000, 0x53774576, 0x656e7430, 0x00000000, 0x00000000,
5623         0x00000000, 0x00000000, 0x66617461, 0x6c457272, 0x00000000, 0x00000000,
5624         0x00000000,
5625 };
5626
5627 static const u32 tg3TsoFwData[] = {
5628         0x00000000, 0x73746b6f, 0x66666c64, 0x5f76312e, 0x362e3000, 0x00000000,
5629         0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
5630         0x00000000,
5631 };
5632
5633 /* 5705 needs a special version of the TSO firmware.  */
5634 #define TG3_TSO5_FW_RELEASE_MAJOR