tg3: Code cleanup.
[linux-2.6.git] / drivers / net / tg3.c
1 /*
2  * tg3.c: Broadcom Tigon3 ethernet driver.
3  *
4  * Copyright (C) 2001, 2002, 2003, 2004 David S. Miller (davem@redhat.com)
5  * Copyright (C) 2001, 2002, 2003 Jeff Garzik (jgarzik@pobox.com)
6  * Copyright (C) 2004 Sun Microsystems Inc.
7  * Copyright (C) 2005-2007 Broadcom Corporation.
8  *
9  * Firmware is:
10  *      Derived from proprietary unpublished source code,
11  *      Copyright (C) 2000-2003 Broadcom Corporation.
12  *
13  *      Permission is hereby granted for the distribution of this firmware
14  *      data in hexadecimal or equivalent format, provided this copyright
15  *      notice is accompanying it.
16  */
17
18
19 #include <linux/module.h>
20 #include <linux/moduleparam.h>
21 #include <linux/kernel.h>
22 #include <linux/types.h>
23 #include <linux/compiler.h>
24 #include <linux/slab.h>
25 #include <linux/delay.h>
26 #include <linux/in.h>
27 #include <linux/init.h>
28 #include <linux/ioport.h>
29 #include <linux/pci.h>
30 #include <linux/netdevice.h>
31 #include <linux/etherdevice.h>
32 #include <linux/skbuff.h>
33 #include <linux/ethtool.h>
34 #include <linux/mii.h>
35 #include <linux/if_vlan.h>
36 #include <linux/ip.h>
37 #include <linux/tcp.h>
38 #include <linux/workqueue.h>
39 #include <linux/prefetch.h>
40 #include <linux/dma-mapping.h>
41
42 #include <net/checksum.h>
43 #include <net/ip.h>
44
45 #include <asm/system.h>
46 #include <asm/io.h>
47 #include <asm/byteorder.h>
48 #include <asm/uaccess.h>
49
50 #ifdef CONFIG_SPARC
51 #include <asm/idprom.h>
52 #include <asm/prom.h>
53 #endif
54
55 #if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE)
56 #define TG3_VLAN_TAG_USED 1
57 #else
58 #define TG3_VLAN_TAG_USED 0
59 #endif
60
61 #define TG3_TSO_SUPPORT 1
62
63 #include "tg3.h"
64
65 #define DRV_MODULE_NAME         "tg3"
66 #define PFX DRV_MODULE_NAME     ": "
67 #define DRV_MODULE_VERSION      "3.92"
68 #define DRV_MODULE_RELDATE      "May 2, 2008"
69
70 #define TG3_DEF_MAC_MODE        0
71 #define TG3_DEF_RX_MODE         0
72 #define TG3_DEF_TX_MODE         0
73 #define TG3_DEF_MSG_ENABLE        \
74         (NETIF_MSG_DRV          | \
75          NETIF_MSG_PROBE        | \
76          NETIF_MSG_LINK         | \
77          NETIF_MSG_TIMER        | \
78          NETIF_MSG_IFDOWN       | \
79          NETIF_MSG_IFUP         | \
80          NETIF_MSG_RX_ERR       | \
81          NETIF_MSG_TX_ERR)
82
83 /* length of time before we decide the hardware is borked,
84  * and dev->tx_timeout() should be called to fix the problem
85  */
86 #define TG3_TX_TIMEOUT                  (5 * HZ)
87
88 /* hardware minimum and maximum for a single frame's data payload */
89 #define TG3_MIN_MTU                     60
90 #define TG3_MAX_MTU(tp) \
91         ((tp->tg3_flags2 & TG3_FLG2_JUMBO_CAPABLE) ? 9000 : 1500)
92
93 /* These numbers seem to be hard coded in the NIC firmware somehow.
94  * You can't change the ring sizes, but you can change where you place
95  * them in the NIC onboard memory.
96  */
97 #define TG3_RX_RING_SIZE                512
98 #define TG3_DEF_RX_RING_PENDING         200
99 #define TG3_RX_JUMBO_RING_SIZE          256
100 #define TG3_DEF_RX_JUMBO_RING_PENDING   100
101
102 /* Do not place this n-ring entries value into the tp struct itself,
103  * we really want to expose these constants to GCC so that modulo et
104  * al.  operations are done with shifts and masks instead of with
105  * hw multiply/modulo instructions.  Another solution would be to
106  * replace things like '% foo' with '& (foo - 1)'.
107  */
108 #define TG3_RX_RCB_RING_SIZE(tp)        \
109         ((tp->tg3_flags2 & TG3_FLG2_5705_PLUS) ?  512 : 1024)
110
111 #define TG3_TX_RING_SIZE                512
112 #define TG3_DEF_TX_RING_PENDING         (TG3_TX_RING_SIZE - 1)
113
114 #define TG3_RX_RING_BYTES       (sizeof(struct tg3_rx_buffer_desc) * \
115                                  TG3_RX_RING_SIZE)
116 #define TG3_RX_JUMBO_RING_BYTES (sizeof(struct tg3_rx_buffer_desc) * \
117                                  TG3_RX_JUMBO_RING_SIZE)
118 #define TG3_RX_RCB_RING_BYTES(tp) (sizeof(struct tg3_rx_buffer_desc) * \
119                                    TG3_RX_RCB_RING_SIZE(tp))
120 #define TG3_TX_RING_BYTES       (sizeof(struct tg3_tx_buffer_desc) * \
121                                  TG3_TX_RING_SIZE)
122 #define NEXT_TX(N)              (((N) + 1) & (TG3_TX_RING_SIZE - 1))
123
124 #define RX_PKT_BUF_SZ           (1536 + tp->rx_offset + 64)
125 #define RX_JUMBO_PKT_BUF_SZ     (9046 + tp->rx_offset + 64)
126
127 /* minimum number of free TX descriptors required to wake up TX process */
128 #define TG3_TX_WAKEUP_THRESH(tp)                ((tp)->tx_pending / 4)
129
130 /* number of ETHTOOL_GSTATS u64's */
131 #define TG3_NUM_STATS           (sizeof(struct tg3_ethtool_stats)/sizeof(u64))
132
133 #define TG3_NUM_TEST            6
134
135 static char version[] __devinitdata =
136         DRV_MODULE_NAME ".c:v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
137
138 MODULE_AUTHOR("David S. Miller (davem@redhat.com) and Jeff Garzik (jgarzik@pobox.com)");
139 MODULE_DESCRIPTION("Broadcom Tigon3 ethernet driver");
140 MODULE_LICENSE("GPL");
141 MODULE_VERSION(DRV_MODULE_VERSION);
142
143 static int tg3_debug = -1;      /* -1 == use TG3_DEF_MSG_ENABLE as value */
144 module_param(tg3_debug, int, 0);
145 MODULE_PARM_DESC(tg3_debug, "Tigon3 bitmapped debugging message enable value");
146
147 static struct pci_device_id tg3_pci_tbl[] = {
148         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5700)},
149         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5701)},
150         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702)},
151         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703)},
152         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704)},
153         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702FE)},
154         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705)},
155         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705_2)},
156         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705M)},
157         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705M_2)},
158         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702X)},
159         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703X)},
160         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704S)},
161         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702A3)},
162         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703A3)},
163         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5782)},
164         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5788)},
165         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5789)},
166         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5901)},
167         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5901_2)},
168         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704S_2)},
169         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705F)},
170         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5720)},
171         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5721)},
172         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5722)},
173         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5750)},
174         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751)},
175         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5750M)},
176         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751M)},
177         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751F)},
178         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5752)},
179         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5752M)},
180         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753)},
181         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753M)},
182         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753F)},
183         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5754)},
184         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5754M)},
185         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5755)},
186         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5755M)},
187         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5756)},
188         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5786)},
189         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787)},
190         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787M)},
191         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787F)},
192         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5714)},
193         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5714S)},
194         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5715)},
195         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5715S)},
196         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5780)},
197         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5780S)},
198         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5781)},
199         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5906)},
200         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5906M)},
201         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5784)},
202         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5764)},
203         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5723)},
204         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5761)},
205         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5761E)},
206         {PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9DXX)},
207         {PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9MXX)},
208         {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1000)},
209         {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1001)},
210         {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1003)},
211         {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC9100)},
212         {PCI_DEVICE(PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_TIGON3)},
213         {}
214 };
215
216 MODULE_DEVICE_TABLE(pci, tg3_pci_tbl);
217
218 static const struct {
219         const char string[ETH_GSTRING_LEN];
220 } ethtool_stats_keys[TG3_NUM_STATS] = {
221         { "rx_octets" },
222         { "rx_fragments" },
223         { "rx_ucast_packets" },
224         { "rx_mcast_packets" },
225         { "rx_bcast_packets" },
226         { "rx_fcs_errors" },
227         { "rx_align_errors" },
228         { "rx_xon_pause_rcvd" },
229         { "rx_xoff_pause_rcvd" },
230         { "rx_mac_ctrl_rcvd" },
231         { "rx_xoff_entered" },
232         { "rx_frame_too_long_errors" },
233         { "rx_jabbers" },
234         { "rx_undersize_packets" },
235         { "rx_in_length_errors" },
236         { "rx_out_length_errors" },
237         { "rx_64_or_less_octet_packets" },
238         { "rx_65_to_127_octet_packets" },
239         { "rx_128_to_255_octet_packets" },
240         { "rx_256_to_511_octet_packets" },
241         { "rx_512_to_1023_octet_packets" },
242         { "rx_1024_to_1522_octet_packets" },
243         { "rx_1523_to_2047_octet_packets" },
244         { "rx_2048_to_4095_octet_packets" },
245         { "rx_4096_to_8191_octet_packets" },
246         { "rx_8192_to_9022_octet_packets" },
247
248         { "tx_octets" },
249         { "tx_collisions" },
250
251         { "tx_xon_sent" },
252         { "tx_xoff_sent" },
253         { "tx_flow_control" },
254         { "tx_mac_errors" },
255         { "tx_single_collisions" },
256         { "tx_mult_collisions" },
257         { "tx_deferred" },
258         { "tx_excessive_collisions" },
259         { "tx_late_collisions" },
260         { "tx_collide_2times" },
261         { "tx_collide_3times" },
262         { "tx_collide_4times" },
263         { "tx_collide_5times" },
264         { "tx_collide_6times" },
265         { "tx_collide_7times" },
266         { "tx_collide_8times" },
267         { "tx_collide_9times" },
268         { "tx_collide_10times" },
269         { "tx_collide_11times" },
270         { "tx_collide_12times" },
271         { "tx_collide_13times" },
272         { "tx_collide_14times" },
273         { "tx_collide_15times" },
274         { "tx_ucast_packets" },
275         { "tx_mcast_packets" },
276         { "tx_bcast_packets" },
277         { "tx_carrier_sense_errors" },
278         { "tx_discards" },
279         { "tx_errors" },
280
281         { "dma_writeq_full" },
282         { "dma_write_prioq_full" },
283         { "rxbds_empty" },
284         { "rx_discards" },
285         { "rx_errors" },
286         { "rx_threshold_hit" },
287
288         { "dma_readq_full" },
289         { "dma_read_prioq_full" },
290         { "tx_comp_queue_full" },
291
292         { "ring_set_send_prod_index" },
293         { "ring_status_update" },
294         { "nic_irqs" },
295         { "nic_avoided_irqs" },
296         { "nic_tx_threshold_hit" }
297 };
298
299 static const struct {
300         const char string[ETH_GSTRING_LEN];
301 } ethtool_test_keys[TG3_NUM_TEST] = {
302         { "nvram test     (online) " },
303         { "link test      (online) " },
304         { "register test  (offline)" },
305         { "memory test    (offline)" },
306         { "loopback test  (offline)" },
307         { "interrupt test (offline)" },
308 };
309
310 static void tg3_write32(struct tg3 *tp, u32 off, u32 val)
311 {
312         writel(val, tp->regs + off);
313 }
314
315 static u32 tg3_read32(struct tg3 *tp, u32 off)
316 {
317         return (readl(tp->regs + off));
318 }
319
320 static void tg3_ape_write32(struct tg3 *tp, u32 off, u32 val)
321 {
322         writel(val, tp->aperegs + off);
323 }
324
325 static u32 tg3_ape_read32(struct tg3 *tp, u32 off)
326 {
327         return (readl(tp->aperegs + off));
328 }
329
330 static void tg3_write_indirect_reg32(struct tg3 *tp, u32 off, u32 val)
331 {
332         unsigned long flags;
333
334         spin_lock_irqsave(&tp->indirect_lock, flags);
335         pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off);
336         pci_write_config_dword(tp->pdev, TG3PCI_REG_DATA, val);
337         spin_unlock_irqrestore(&tp->indirect_lock, flags);
338 }
339
340 static void tg3_write_flush_reg32(struct tg3 *tp, u32 off, u32 val)
341 {
342         writel(val, tp->regs + off);
343         readl(tp->regs + off);
344 }
345
346 static u32 tg3_read_indirect_reg32(struct tg3 *tp, u32 off)
347 {
348         unsigned long flags;
349         u32 val;
350
351         spin_lock_irqsave(&tp->indirect_lock, flags);
352         pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off);
353         pci_read_config_dword(tp->pdev, TG3PCI_REG_DATA, &val);
354         spin_unlock_irqrestore(&tp->indirect_lock, flags);
355         return val;
356 }
357
358 static void tg3_write_indirect_mbox(struct tg3 *tp, u32 off, u32 val)
359 {
360         unsigned long flags;
361
362         if (off == (MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW)) {
363                 pci_write_config_dword(tp->pdev, TG3PCI_RCV_RET_RING_CON_IDX +
364                                        TG3_64BIT_REG_LOW, val);
365                 return;
366         }
367         if (off == (MAILBOX_RCV_STD_PROD_IDX + TG3_64BIT_REG_LOW)) {
368                 pci_write_config_dword(tp->pdev, TG3PCI_STD_RING_PROD_IDX +
369                                        TG3_64BIT_REG_LOW, val);
370                 return;
371         }
372
373         spin_lock_irqsave(&tp->indirect_lock, flags);
374         pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off + 0x5600);
375         pci_write_config_dword(tp->pdev, TG3PCI_REG_DATA, val);
376         spin_unlock_irqrestore(&tp->indirect_lock, flags);
377
378         /* In indirect mode when disabling interrupts, we also need
379          * to clear the interrupt bit in the GRC local ctrl register.
380          */
381         if ((off == (MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW)) &&
382             (val == 0x1)) {
383                 pci_write_config_dword(tp->pdev, TG3PCI_MISC_LOCAL_CTRL,
384                                        tp->grc_local_ctrl|GRC_LCLCTRL_CLEARINT);
385         }
386 }
387
388 static u32 tg3_read_indirect_mbox(struct tg3 *tp, u32 off)
389 {
390         unsigned long flags;
391         u32 val;
392
393         spin_lock_irqsave(&tp->indirect_lock, flags);
394         pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off + 0x5600);
395         pci_read_config_dword(tp->pdev, TG3PCI_REG_DATA, &val);
396         spin_unlock_irqrestore(&tp->indirect_lock, flags);
397         return val;
398 }
399
400 /* usec_wait specifies the wait time in usec when writing to certain registers
401  * where it is unsafe to read back the register without some delay.
402  * GRC_LOCAL_CTRL is one example if the GPIOs are toggled to switch power.
403  * TG3PCI_CLOCK_CTRL is another example if the clock frequencies are changed.
404  */
405 static void _tw32_flush(struct tg3 *tp, u32 off, u32 val, u32 usec_wait)
406 {
407         if ((tp->tg3_flags & TG3_FLAG_PCIX_TARGET_HWBUG) ||
408             (tp->tg3_flags2 & TG3_FLG2_ICH_WORKAROUND))
409                 /* Non-posted methods */
410                 tp->write32(tp, off, val);
411         else {
412                 /* Posted method */
413                 tg3_write32(tp, off, val);
414                 if (usec_wait)
415                         udelay(usec_wait);
416                 tp->read32(tp, off);
417         }
418         /* Wait again after the read for the posted method to guarantee that
419          * the wait time is met.
420          */
421         if (usec_wait)
422                 udelay(usec_wait);
423 }
424
425 static inline void tw32_mailbox_flush(struct tg3 *tp, u32 off, u32 val)
426 {
427         tp->write32_mbox(tp, off, val);
428         if (!(tp->tg3_flags & TG3_FLAG_MBOX_WRITE_REORDER) &&
429             !(tp->tg3_flags2 & TG3_FLG2_ICH_WORKAROUND))
430                 tp->read32_mbox(tp, off);
431 }
432
433 static void tg3_write32_tx_mbox(struct tg3 *tp, u32 off, u32 val)
434 {
435         void __iomem *mbox = tp->regs + off;
436         writel(val, mbox);
437         if (tp->tg3_flags & TG3_FLAG_TXD_MBOX_HWBUG)
438                 writel(val, mbox);
439         if (tp->tg3_flags & TG3_FLAG_MBOX_WRITE_REORDER)
440                 readl(mbox);
441 }
442
443 static u32 tg3_read32_mbox_5906(struct tg3 *tp, u32 off)
444 {
445         return (readl(tp->regs + off + GRCMBOX_BASE));
446 }
447
448 static void tg3_write32_mbox_5906(struct tg3 *tp, u32 off, u32 val)
449 {
450         writel(val, tp->regs + off + GRCMBOX_BASE);
451 }
452
453 #define tw32_mailbox(reg, val)  tp->write32_mbox(tp, reg, val)
454 #define tw32_mailbox_f(reg, val)        tw32_mailbox_flush(tp, (reg), (val))
455 #define tw32_rx_mbox(reg, val)  tp->write32_rx_mbox(tp, reg, val)
456 #define tw32_tx_mbox(reg, val)  tp->write32_tx_mbox(tp, reg, val)
457 #define tr32_mailbox(reg)       tp->read32_mbox(tp, reg)
458
459 #define tw32(reg,val)           tp->write32(tp, reg, val)
460 #define tw32_f(reg,val)         _tw32_flush(tp,(reg),(val), 0)
461 #define tw32_wait_f(reg,val,us) _tw32_flush(tp,(reg),(val), (us))
462 #define tr32(reg)               tp->read32(tp, reg)
463
464 static void tg3_write_mem(struct tg3 *tp, u32 off, u32 val)
465 {
466         unsigned long flags;
467
468         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) &&
469             (off >= NIC_SRAM_STATS_BLK) && (off < NIC_SRAM_TX_BUFFER_DESC))
470                 return;
471
472         spin_lock_irqsave(&tp->indirect_lock, flags);
473         if (tp->tg3_flags & TG3_FLAG_SRAM_USE_CONFIG) {
474                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off);
475                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
476
477                 /* Always leave this as zero. */
478                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
479         } else {
480                 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, off);
481                 tw32_f(TG3PCI_MEM_WIN_DATA, val);
482
483                 /* Always leave this as zero. */
484                 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, 0);
485         }
486         spin_unlock_irqrestore(&tp->indirect_lock, flags);
487 }
488
489 static void tg3_read_mem(struct tg3 *tp, u32 off, u32 *val)
490 {
491         unsigned long flags;
492
493         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) &&
494             (off >= NIC_SRAM_STATS_BLK) && (off < NIC_SRAM_TX_BUFFER_DESC)) {
495                 *val = 0;
496                 return;
497         }
498
499         spin_lock_irqsave(&tp->indirect_lock, flags);
500         if (tp->tg3_flags & TG3_FLAG_SRAM_USE_CONFIG) {
501                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off);
502                 pci_read_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
503
504                 /* Always leave this as zero. */
505                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
506         } else {
507                 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, off);
508                 *val = tr32(TG3PCI_MEM_WIN_DATA);
509
510                 /* Always leave this as zero. */
511                 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, 0);
512         }
513         spin_unlock_irqrestore(&tp->indirect_lock, flags);
514 }
515
516 static void tg3_ape_lock_init(struct tg3 *tp)
517 {
518         int i;
519
520         /* Make sure the driver hasn't any stale locks. */
521         for (i = 0; i < 8; i++)
522                 tg3_ape_write32(tp, TG3_APE_LOCK_GRANT + 4 * i,
523                                 APE_LOCK_GRANT_DRIVER);
524 }
525
526 static int tg3_ape_lock(struct tg3 *tp, int locknum)
527 {
528         int i, off;
529         int ret = 0;
530         u32 status;
531
532         if (!(tp->tg3_flags3 & TG3_FLG3_ENABLE_APE))
533                 return 0;
534
535         switch (locknum) {
536                 case TG3_APE_LOCK_MEM:
537                         break;
538                 default:
539                         return -EINVAL;
540         }
541
542         off = 4 * locknum;
543
544         tg3_ape_write32(tp, TG3_APE_LOCK_REQ + off, APE_LOCK_REQ_DRIVER);
545
546         /* Wait for up to 1 millisecond to acquire lock. */
547         for (i = 0; i < 100; i++) {
548                 status = tg3_ape_read32(tp, TG3_APE_LOCK_GRANT + off);
549                 if (status == APE_LOCK_GRANT_DRIVER)
550                         break;
551                 udelay(10);
552         }
553
554         if (status != APE_LOCK_GRANT_DRIVER) {
555                 /* Revoke the lock request. */
556                 tg3_ape_write32(tp, TG3_APE_LOCK_GRANT + off,
557                                 APE_LOCK_GRANT_DRIVER);
558
559                 ret = -EBUSY;
560         }
561
562         return ret;
563 }
564
565 static void tg3_ape_unlock(struct tg3 *tp, int locknum)
566 {
567         int off;
568
569         if (!(tp->tg3_flags3 & TG3_FLG3_ENABLE_APE))
570                 return;
571
572         switch (locknum) {
573                 case TG3_APE_LOCK_MEM:
574                         break;
575                 default:
576                         return;
577         }
578
579         off = 4 * locknum;
580         tg3_ape_write32(tp, TG3_APE_LOCK_GRANT + off, APE_LOCK_GRANT_DRIVER);
581 }
582
583 static void tg3_disable_ints(struct tg3 *tp)
584 {
585         tw32(TG3PCI_MISC_HOST_CTRL,
586              (tp->misc_host_ctrl | MISC_HOST_CTRL_MASK_PCI_INT));
587         tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
588 }
589
590 static inline void tg3_cond_int(struct tg3 *tp)
591 {
592         if (!(tp->tg3_flags & TG3_FLAG_TAGGED_STATUS) &&
593             (tp->hw_status->status & SD_STATUS_UPDATED))
594                 tw32(GRC_LOCAL_CTRL, tp->grc_local_ctrl | GRC_LCLCTRL_SETINT);
595         else
596                 tw32(HOSTCC_MODE, tp->coalesce_mode |
597                      (HOSTCC_MODE_ENABLE | HOSTCC_MODE_NOW));
598 }
599
600 static void tg3_enable_ints(struct tg3 *tp)
601 {
602         tp->irq_sync = 0;
603         wmb();
604
605         tw32(TG3PCI_MISC_HOST_CTRL,
606              (tp->misc_host_ctrl & ~MISC_HOST_CTRL_MASK_PCI_INT));
607         tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
608                        (tp->last_tag << 24));
609         if (tp->tg3_flags2 & TG3_FLG2_1SHOT_MSI)
610                 tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
611                                (tp->last_tag << 24));
612         tg3_cond_int(tp);
613 }
614
615 static inline unsigned int tg3_has_work(struct tg3 *tp)
616 {
617         struct tg3_hw_status *sblk = tp->hw_status;
618         unsigned int work_exists = 0;
619
620         /* check for phy events */
621         if (!(tp->tg3_flags &
622               (TG3_FLAG_USE_LINKCHG_REG |
623                TG3_FLAG_POLL_SERDES))) {
624                 if (sblk->status & SD_STATUS_LINK_CHG)
625                         work_exists = 1;
626         }
627         /* check for RX/TX work to do */
628         if (sblk->idx[0].tx_consumer != tp->tx_cons ||
629             sblk->idx[0].rx_producer != tp->rx_rcb_ptr)
630                 work_exists = 1;
631
632         return work_exists;
633 }
634
635 /* tg3_restart_ints
636  *  similar to tg3_enable_ints, but it accurately determines whether there
637  *  is new work pending and can return without flushing the PIO write
638  *  which reenables interrupts
639  */
640 static void tg3_restart_ints(struct tg3 *tp)
641 {
642         tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
643                      tp->last_tag << 24);
644         mmiowb();
645
646         /* When doing tagged status, this work check is unnecessary.
647          * The last_tag we write above tells the chip which piece of
648          * work we've completed.
649          */
650         if (!(tp->tg3_flags & TG3_FLAG_TAGGED_STATUS) &&
651             tg3_has_work(tp))
652                 tw32(HOSTCC_MODE, tp->coalesce_mode |
653                      (HOSTCC_MODE_ENABLE | HOSTCC_MODE_NOW));
654 }
655
656 static inline void tg3_netif_stop(struct tg3 *tp)
657 {
658         tp->dev->trans_start = jiffies; /* prevent tx timeout */
659         napi_disable(&tp->napi);
660         netif_tx_disable(tp->dev);
661 }
662
663 static inline void tg3_netif_start(struct tg3 *tp)
664 {
665         netif_wake_queue(tp->dev);
666         /* NOTE: unconditional netif_wake_queue is only appropriate
667          * so long as all callers are assured to have free tx slots
668          * (such as after tg3_init_hw)
669          */
670         napi_enable(&tp->napi);
671         tp->hw_status->status |= SD_STATUS_UPDATED;
672         tg3_enable_ints(tp);
673 }
674
675 static void tg3_switch_clocks(struct tg3 *tp)
676 {
677         u32 clock_ctrl = tr32(TG3PCI_CLOCK_CTRL);
678         u32 orig_clock_ctrl;
679
680         if ((tp->tg3_flags & TG3_FLAG_CPMU_PRESENT) ||
681             (tp->tg3_flags2 & TG3_FLG2_5780_CLASS))
682                 return;
683
684         orig_clock_ctrl = clock_ctrl;
685         clock_ctrl &= (CLOCK_CTRL_FORCE_CLKRUN |
686                        CLOCK_CTRL_CLKRUN_OENABLE |
687                        0x1f);
688         tp->pci_clock_ctrl = clock_ctrl;
689
690         if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
691                 if (orig_clock_ctrl & CLOCK_CTRL_625_CORE) {
692                         tw32_wait_f(TG3PCI_CLOCK_CTRL,
693                                     clock_ctrl | CLOCK_CTRL_625_CORE, 40);
694                 }
695         } else if ((orig_clock_ctrl & CLOCK_CTRL_44MHZ_CORE) != 0) {
696                 tw32_wait_f(TG3PCI_CLOCK_CTRL,
697                             clock_ctrl |
698                             (CLOCK_CTRL_44MHZ_CORE | CLOCK_CTRL_ALTCLK),
699                             40);
700                 tw32_wait_f(TG3PCI_CLOCK_CTRL,
701                             clock_ctrl | (CLOCK_CTRL_ALTCLK),
702                             40);
703         }
704         tw32_wait_f(TG3PCI_CLOCK_CTRL, clock_ctrl, 40);
705 }
706
707 #define PHY_BUSY_LOOPS  5000
708
709 static int tg3_readphy(struct tg3 *tp, int reg, u32 *val)
710 {
711         u32 frame_val;
712         unsigned int loops;
713         int ret;
714
715         if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
716                 tw32_f(MAC_MI_MODE,
717                      (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
718                 udelay(80);
719         }
720
721         *val = 0x0;
722
723         frame_val  = ((PHY_ADDR << MI_COM_PHY_ADDR_SHIFT) &
724                       MI_COM_PHY_ADDR_MASK);
725         frame_val |= ((reg << MI_COM_REG_ADDR_SHIFT) &
726                       MI_COM_REG_ADDR_MASK);
727         frame_val |= (MI_COM_CMD_READ | MI_COM_START);
728
729         tw32_f(MAC_MI_COM, frame_val);
730
731         loops = PHY_BUSY_LOOPS;
732         while (loops != 0) {
733                 udelay(10);
734                 frame_val = tr32(MAC_MI_COM);
735
736                 if ((frame_val & MI_COM_BUSY) == 0) {
737                         udelay(5);
738                         frame_val = tr32(MAC_MI_COM);
739                         break;
740                 }
741                 loops -= 1;
742         }
743
744         ret = -EBUSY;
745         if (loops != 0) {
746                 *val = frame_val & MI_COM_DATA_MASK;
747                 ret = 0;
748         }
749
750         if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
751                 tw32_f(MAC_MI_MODE, tp->mi_mode);
752                 udelay(80);
753         }
754
755         return ret;
756 }
757
758 static int tg3_writephy(struct tg3 *tp, int reg, u32 val)
759 {
760         u32 frame_val;
761         unsigned int loops;
762         int ret;
763
764         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906 &&
765             (reg == MII_TG3_CTRL || reg == MII_TG3_AUX_CTRL))
766                 return 0;
767
768         if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
769                 tw32_f(MAC_MI_MODE,
770                      (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
771                 udelay(80);
772         }
773
774         frame_val  = ((PHY_ADDR << MI_COM_PHY_ADDR_SHIFT) &
775                       MI_COM_PHY_ADDR_MASK);
776         frame_val |= ((reg << MI_COM_REG_ADDR_SHIFT) &
777                       MI_COM_REG_ADDR_MASK);
778         frame_val |= (val & MI_COM_DATA_MASK);
779         frame_val |= (MI_COM_CMD_WRITE | MI_COM_START);
780
781         tw32_f(MAC_MI_COM, frame_val);
782
783         loops = PHY_BUSY_LOOPS;
784         while (loops != 0) {
785                 udelay(10);
786                 frame_val = tr32(MAC_MI_COM);
787                 if ((frame_val & MI_COM_BUSY) == 0) {
788                         udelay(5);
789                         frame_val = tr32(MAC_MI_COM);
790                         break;
791                 }
792                 loops -= 1;
793         }
794
795         ret = -EBUSY;
796         if (loops != 0)
797                 ret = 0;
798
799         if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
800                 tw32_f(MAC_MI_MODE, tp->mi_mode);
801                 udelay(80);
802         }
803
804         return ret;
805 }
806
807 static int tg3_bmcr_reset(struct tg3 *tp)
808 {
809         u32 phy_control;
810         int limit, err;
811
812         /* OK, reset it, and poll the BMCR_RESET bit until it
813          * clears or we time out.
814          */
815         phy_control = BMCR_RESET;
816         err = tg3_writephy(tp, MII_BMCR, phy_control);
817         if (err != 0)
818                 return -EBUSY;
819
820         limit = 5000;
821         while (limit--) {
822                 err = tg3_readphy(tp, MII_BMCR, &phy_control);
823                 if (err != 0)
824                         return -EBUSY;
825
826                 if ((phy_control & BMCR_RESET) == 0) {
827                         udelay(40);
828                         break;
829                 }
830                 udelay(10);
831         }
832         if (limit <= 0)
833                 return -EBUSY;
834
835         return 0;
836 }
837
838 /* tp->lock is held. */
839 static void tg3_wait_for_event_ack(struct tg3 *tp)
840 {
841         int i;
842
843         /* Wait for up to 2.5 milliseconds */
844         for (i = 0; i < 250000; i++) {
845                 if (!(tr32(GRC_RX_CPU_EVENT) & GRC_RX_CPU_DRIVER_EVENT))
846                         break;
847                 udelay(10);
848         }
849 }
850
851 /* tp->lock is held. */
852 static void tg3_ump_link_report(struct tg3 *tp)
853 {
854         u32 reg;
855         u32 val;
856
857         if (!(tp->tg3_flags2 & TG3_FLG2_5780_CLASS) ||
858             !(tp->tg3_flags  & TG3_FLAG_ENABLE_ASF))
859                 return;
860
861         tg3_wait_for_event_ack(tp);
862
863         tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX, FWCMD_NICDRV_LINK_UPDATE);
864
865         tg3_write_mem(tp, NIC_SRAM_FW_CMD_LEN_MBOX, 14);
866
867         val = 0;
868         if (!tg3_readphy(tp, MII_BMCR, &reg))
869                 val = reg << 16;
870         if (!tg3_readphy(tp, MII_BMSR, &reg))
871                 val |= (reg & 0xffff);
872         tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX, val);
873
874         val = 0;
875         if (!tg3_readphy(tp, MII_ADVERTISE, &reg))
876                 val = reg << 16;
877         if (!tg3_readphy(tp, MII_LPA, &reg))
878                 val |= (reg & 0xffff);
879         tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 4, val);
880
881         val = 0;
882         if (!(tp->tg3_flags2 & TG3_FLG2_MII_SERDES)) {
883                 if (!tg3_readphy(tp, MII_CTRL1000, &reg))
884                         val = reg << 16;
885                 if (!tg3_readphy(tp, MII_STAT1000, &reg))
886                         val |= (reg & 0xffff);
887         }
888         tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 8, val);
889
890         if (!tg3_readphy(tp, MII_PHYADDR, &reg))
891                 val = reg << 16;
892         else
893                 val = 0;
894         tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 12, val);
895
896         val = tr32(GRC_RX_CPU_EVENT);
897         val |= GRC_RX_CPU_DRIVER_EVENT;
898         tw32_f(GRC_RX_CPU_EVENT, val);
899 }
900
901 static void tg3_link_report(struct tg3 *tp)
902 {
903         if (!netif_carrier_ok(tp->dev)) {
904                 if (netif_msg_link(tp))
905                         printk(KERN_INFO PFX "%s: Link is down.\n",
906                                tp->dev->name);
907                 tg3_ump_link_report(tp);
908         } else if (netif_msg_link(tp)) {
909                 printk(KERN_INFO PFX "%s: Link is up at %d Mbps, %s duplex.\n",
910                        tp->dev->name,
911                        (tp->link_config.active_speed == SPEED_1000 ?
912                         1000 :
913                         (tp->link_config.active_speed == SPEED_100 ?
914                          100 : 10)),
915                        (tp->link_config.active_duplex == DUPLEX_FULL ?
916                         "full" : "half"));
917
918                 printk(KERN_INFO PFX
919                        "%s: Flow control is %s for TX and %s for RX.\n",
920                        tp->dev->name,
921                        (tp->link_config.active_flowctrl & TG3_FLOW_CTRL_TX) ?
922                        "on" : "off",
923                        (tp->link_config.active_flowctrl & TG3_FLOW_CTRL_RX) ?
924                        "on" : "off");
925                 tg3_ump_link_report(tp);
926         }
927 }
928
929 static u16 tg3_advert_flowctrl_1000T(u8 flow_ctrl)
930 {
931         u16 miireg;
932
933         if ((flow_ctrl & TG3_FLOW_CTRL_TX) && (flow_ctrl & TG3_FLOW_CTRL_RX))
934                 miireg = ADVERTISE_PAUSE_CAP;
935         else if (flow_ctrl & TG3_FLOW_CTRL_TX)
936                 miireg = ADVERTISE_PAUSE_ASYM;
937         else if (flow_ctrl & TG3_FLOW_CTRL_RX)
938                 miireg = ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM;
939         else
940                 miireg = 0;
941
942         return miireg;
943 }
944
945 static u16 tg3_advert_flowctrl_1000X(u8 flow_ctrl)
946 {
947         u16 miireg;
948
949         if ((flow_ctrl & TG3_FLOW_CTRL_TX) && (flow_ctrl & TG3_FLOW_CTRL_RX))
950                 miireg = ADVERTISE_1000XPAUSE;
951         else if (flow_ctrl & TG3_FLOW_CTRL_TX)
952                 miireg = ADVERTISE_1000XPSE_ASYM;
953         else if (flow_ctrl & TG3_FLOW_CTRL_RX)
954                 miireg = ADVERTISE_1000XPAUSE | ADVERTISE_1000XPSE_ASYM;
955         else
956                 miireg = 0;
957
958         return miireg;
959 }
960
961 static u8 tg3_resolve_flowctrl_1000T(u16 lcladv, u16 rmtadv)
962 {
963         u8 cap = 0;
964
965         if (lcladv & ADVERTISE_PAUSE_CAP) {
966                 if (lcladv & ADVERTISE_PAUSE_ASYM) {
967                         if (rmtadv & LPA_PAUSE_CAP)
968                                 cap = TG3_FLOW_CTRL_TX | TG3_FLOW_CTRL_RX;
969                         else if (rmtadv & LPA_PAUSE_ASYM)
970                                 cap = TG3_FLOW_CTRL_RX;
971                 } else {
972                         if (rmtadv & LPA_PAUSE_CAP)
973                                 cap = TG3_FLOW_CTRL_TX | TG3_FLOW_CTRL_RX;
974                 }
975         } else if (lcladv & ADVERTISE_PAUSE_ASYM) {
976                 if ((rmtadv & LPA_PAUSE_CAP) && (rmtadv & LPA_PAUSE_ASYM))
977                         cap = TG3_FLOW_CTRL_TX;
978         }
979
980         return cap;
981 }
982
983 static u8 tg3_resolve_flowctrl_1000X(u16 lcladv, u16 rmtadv)
984 {
985         u8 cap = 0;
986
987         if (lcladv & ADVERTISE_1000XPAUSE) {
988                 if (lcladv & ADVERTISE_1000XPSE_ASYM) {
989                         if (rmtadv & LPA_1000XPAUSE)
990                                 cap = TG3_FLOW_CTRL_TX | TG3_FLOW_CTRL_RX;
991                         else if (rmtadv & LPA_1000XPAUSE_ASYM)
992                                 cap = TG3_FLOW_CTRL_RX;
993                 } else {
994                         if (rmtadv & LPA_1000XPAUSE)
995                                 cap = TG3_FLOW_CTRL_TX | TG3_FLOW_CTRL_RX;
996                 }
997         } else if (lcladv & ADVERTISE_1000XPSE_ASYM) {
998                 if ((rmtadv & LPA_1000XPAUSE) && (rmtadv & LPA_1000XPAUSE_ASYM))
999                         cap = TG3_FLOW_CTRL_TX;
1000         }
1001
1002         return cap;
1003 }
1004
1005 static void tg3_setup_flow_control(struct tg3 *tp, u32 lcladv, u32 rmtadv)
1006 {
1007         u8 flowctrl = 0;
1008         u32 old_rx_mode = tp->rx_mode;
1009         u32 old_tx_mode = tp->tx_mode;
1010
1011         if (tp->link_config.autoneg == AUTONEG_ENABLE &&
1012             (tp->tg3_flags & TG3_FLAG_PAUSE_AUTONEG)) {
1013                 if (tp->tg3_flags2 & TG3_FLG2_ANY_SERDES)
1014                         flowctrl = tg3_resolve_flowctrl_1000X(lcladv, rmtadv);
1015                 else
1016                         flowctrl = tg3_resolve_flowctrl_1000T(lcladv, rmtadv);
1017         } else
1018                 flowctrl = tp->link_config.flowctrl;
1019
1020         tp->link_config.active_flowctrl = flowctrl;
1021
1022         if (flowctrl & TG3_FLOW_CTRL_RX)
1023                 tp->rx_mode |= RX_MODE_FLOW_CTRL_ENABLE;
1024         else
1025                 tp->rx_mode &= ~RX_MODE_FLOW_CTRL_ENABLE;
1026
1027         if (old_rx_mode != tp->rx_mode)
1028                 tw32_f(MAC_RX_MODE, tp->rx_mode);
1029
1030         if (flowctrl & TG3_FLOW_CTRL_TX)
1031                 tp->tx_mode |= TX_MODE_FLOW_CTRL_ENABLE;
1032         else
1033                 tp->tx_mode &= ~TX_MODE_FLOW_CTRL_ENABLE;
1034
1035         if (old_tx_mode != tp->tx_mode)
1036                 tw32_f(MAC_TX_MODE, tp->tx_mode);
1037 }
1038
1039 static void tg3_phydsp_write(struct tg3 *tp, u32 reg, u32 val)
1040 {
1041         tg3_writephy(tp, MII_TG3_DSP_ADDRESS, reg);
1042         tg3_writephy(tp, MII_TG3_DSP_RW_PORT, val);
1043 }
1044
1045 static void tg3_phy_toggle_automdix(struct tg3 *tp, int enable)
1046 {
1047         u32 phy;
1048
1049         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS) ||
1050             (tp->tg3_flags2 & TG3_FLG2_ANY_SERDES))
1051                 return;
1052
1053         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
1054                 u32 ephy;
1055
1056                 if (!tg3_readphy(tp, MII_TG3_EPHY_TEST, &ephy)) {
1057                         tg3_writephy(tp, MII_TG3_EPHY_TEST,
1058                                      ephy | MII_TG3_EPHY_SHADOW_EN);
1059                         if (!tg3_readphy(tp, MII_TG3_EPHYTST_MISCCTRL, &phy)) {
1060                                 if (enable)
1061                                         phy |= MII_TG3_EPHYTST_MISCCTRL_MDIX;
1062                                 else
1063                                         phy &= ~MII_TG3_EPHYTST_MISCCTRL_MDIX;
1064                                 tg3_writephy(tp, MII_TG3_EPHYTST_MISCCTRL, phy);
1065                         }
1066                         tg3_writephy(tp, MII_TG3_EPHY_TEST, ephy);
1067                 }
1068         } else {
1069                 phy = MII_TG3_AUXCTL_MISC_RDSEL_MISC |
1070                       MII_TG3_AUXCTL_SHDWSEL_MISC;
1071                 if (!tg3_writephy(tp, MII_TG3_AUX_CTRL, phy) &&
1072                     !tg3_readphy(tp, MII_TG3_AUX_CTRL, &phy)) {
1073                         if (enable)
1074                                 phy |= MII_TG3_AUXCTL_MISC_FORCE_AMDIX;
1075                         else
1076                                 phy &= ~MII_TG3_AUXCTL_MISC_FORCE_AMDIX;
1077                         phy |= MII_TG3_AUXCTL_MISC_WREN;
1078                         tg3_writephy(tp, MII_TG3_AUX_CTRL, phy);
1079                 }
1080         }
1081 }
1082
1083 static void tg3_phy_set_wirespeed(struct tg3 *tp)
1084 {
1085         u32 val;
1086
1087         if (tp->tg3_flags2 & TG3_FLG2_NO_ETH_WIRE_SPEED)
1088                 return;
1089
1090         if (!tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x7007) &&
1091             !tg3_readphy(tp, MII_TG3_AUX_CTRL, &val))
1092                 tg3_writephy(tp, MII_TG3_AUX_CTRL,
1093                              (val | (1 << 15) | (1 << 4)));
1094 }
1095
1096 static void tg3_phy_apply_otp(struct tg3 *tp)
1097 {
1098         u32 otp, phy;
1099
1100         if (!tp->phy_otp)
1101                 return;
1102
1103         otp = tp->phy_otp;
1104
1105         /* Enable SM_DSP clock and tx 6dB coding. */
1106         phy = MII_TG3_AUXCTL_SHDWSEL_AUXCTL |
1107               MII_TG3_AUXCTL_ACTL_SMDSP_ENA |
1108               MII_TG3_AUXCTL_ACTL_TX_6DB;
1109         tg3_writephy(tp, MII_TG3_AUX_CTRL, phy);
1110
1111         phy = ((otp & TG3_OTP_AGCTGT_MASK) >> TG3_OTP_AGCTGT_SHIFT);
1112         phy |= MII_TG3_DSP_TAP1_AGCTGT_DFLT;
1113         tg3_phydsp_write(tp, MII_TG3_DSP_TAP1, phy);
1114
1115         phy = ((otp & TG3_OTP_HPFFLTR_MASK) >> TG3_OTP_HPFFLTR_SHIFT) |
1116               ((otp & TG3_OTP_HPFOVER_MASK) >> TG3_OTP_HPFOVER_SHIFT);
1117         tg3_phydsp_write(tp, MII_TG3_DSP_AADJ1CH0, phy);
1118
1119         phy = ((otp & TG3_OTP_LPFDIS_MASK) >> TG3_OTP_LPFDIS_SHIFT);
1120         phy |= MII_TG3_DSP_AADJ1CH3_ADCCKADJ;
1121         tg3_phydsp_write(tp, MII_TG3_DSP_AADJ1CH3, phy);
1122
1123         phy = ((otp & TG3_OTP_VDAC_MASK) >> TG3_OTP_VDAC_SHIFT);
1124         tg3_phydsp_write(tp, MII_TG3_DSP_EXP75, phy);
1125
1126         phy = ((otp & TG3_OTP_10BTAMP_MASK) >> TG3_OTP_10BTAMP_SHIFT);
1127         tg3_phydsp_write(tp, MII_TG3_DSP_EXP96, phy);
1128
1129         phy = ((otp & TG3_OTP_ROFF_MASK) >> TG3_OTP_ROFF_SHIFT) |
1130               ((otp & TG3_OTP_RCOFF_MASK) >> TG3_OTP_RCOFF_SHIFT);
1131         tg3_phydsp_write(tp, MII_TG3_DSP_EXP97, phy);
1132
1133         /* Turn off SM_DSP clock. */
1134         phy = MII_TG3_AUXCTL_SHDWSEL_AUXCTL |
1135               MII_TG3_AUXCTL_ACTL_TX_6DB;
1136         tg3_writephy(tp, MII_TG3_AUX_CTRL, phy);
1137 }
1138
1139 static int tg3_wait_macro_done(struct tg3 *tp)
1140 {
1141         int limit = 100;
1142
1143         while (limit--) {
1144                 u32 tmp32;
1145
1146                 if (!tg3_readphy(tp, 0x16, &tmp32)) {
1147                         if ((tmp32 & 0x1000) == 0)
1148                                 break;
1149                 }
1150         }
1151         if (limit <= 0)
1152                 return -EBUSY;
1153
1154         return 0;
1155 }
1156
1157 static int tg3_phy_write_and_check_testpat(struct tg3 *tp, int *resetp)
1158 {
1159         static const u32 test_pat[4][6] = {
1160         { 0x00005555, 0x00000005, 0x00002aaa, 0x0000000a, 0x00003456, 0x00000003 },
1161         { 0x00002aaa, 0x0000000a, 0x00003333, 0x00000003, 0x0000789a, 0x00000005 },
1162         { 0x00005a5a, 0x00000005, 0x00002a6a, 0x0000000a, 0x00001bcd, 0x00000003 },
1163         { 0x00002a5a, 0x0000000a, 0x000033c3, 0x00000003, 0x00002ef1, 0x00000005 }
1164         };
1165         int chan;
1166
1167         for (chan = 0; chan < 4; chan++) {
1168                 int i;
1169
1170                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
1171                              (chan * 0x2000) | 0x0200);
1172                 tg3_writephy(tp, 0x16, 0x0002);
1173
1174                 for (i = 0; i < 6; i++)
1175                         tg3_writephy(tp, MII_TG3_DSP_RW_PORT,
1176                                      test_pat[chan][i]);
1177
1178                 tg3_writephy(tp, 0x16, 0x0202);
1179                 if (tg3_wait_macro_done(tp)) {
1180                         *resetp = 1;
1181                         return -EBUSY;
1182                 }
1183
1184                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
1185                              (chan * 0x2000) | 0x0200);
1186                 tg3_writephy(tp, 0x16, 0x0082);
1187                 if (tg3_wait_macro_done(tp)) {
1188                         *resetp = 1;
1189                         return -EBUSY;
1190                 }
1191
1192                 tg3_writephy(tp, 0x16, 0x0802);
1193                 if (tg3_wait_macro_done(tp)) {
1194                         *resetp = 1;
1195                         return -EBUSY;
1196                 }
1197
1198                 for (i = 0; i < 6; i += 2) {
1199                         u32 low, high;
1200
1201                         if (tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &low) ||
1202                             tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &high) ||
1203                             tg3_wait_macro_done(tp)) {
1204                                 *resetp = 1;
1205                                 return -EBUSY;
1206                         }
1207                         low &= 0x7fff;
1208                         high &= 0x000f;
1209                         if (low != test_pat[chan][i] ||
1210                             high != test_pat[chan][i+1]) {
1211                                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000b);
1212                                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x4001);
1213                                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x4005);
1214
1215                                 return -EBUSY;
1216                         }
1217                 }
1218         }
1219
1220         return 0;
1221 }
1222
1223 static int tg3_phy_reset_chanpat(struct tg3 *tp)
1224 {
1225         int chan;
1226
1227         for (chan = 0; chan < 4; chan++) {
1228                 int i;
1229
1230                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
1231                              (chan * 0x2000) | 0x0200);
1232                 tg3_writephy(tp, 0x16, 0x0002);
1233                 for (i = 0; i < 6; i++)
1234                         tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x000);
1235                 tg3_writephy(tp, 0x16, 0x0202);
1236                 if (tg3_wait_macro_done(tp))
1237                         return -EBUSY;
1238         }
1239
1240         return 0;
1241 }
1242
1243 static int tg3_phy_reset_5703_4_5(struct tg3 *tp)
1244 {
1245         u32 reg32, phy9_orig;
1246         int retries, do_phy_reset, err;
1247
1248         retries = 10;
1249         do_phy_reset = 1;
1250         do {
1251                 if (do_phy_reset) {
1252                         err = tg3_bmcr_reset(tp);
1253                         if (err)
1254                                 return err;
1255                         do_phy_reset = 0;
1256                 }
1257
1258                 /* Disable transmitter and interrupt.  */
1259                 if (tg3_readphy(tp, MII_TG3_EXT_CTRL, &reg32))
1260                         continue;
1261
1262                 reg32 |= 0x3000;
1263                 tg3_writephy(tp, MII_TG3_EXT_CTRL, reg32);
1264
1265                 /* Set full-duplex, 1000 mbps.  */
1266                 tg3_writephy(tp, MII_BMCR,
1267                              BMCR_FULLDPLX | TG3_BMCR_SPEED1000);
1268
1269                 /* Set to master mode.  */
1270                 if (tg3_readphy(tp, MII_TG3_CTRL, &phy9_orig))
1271                         continue;
1272
1273                 tg3_writephy(tp, MII_TG3_CTRL,
1274                              (MII_TG3_CTRL_AS_MASTER |
1275                               MII_TG3_CTRL_ENABLE_AS_MASTER));
1276
1277                 /* Enable SM_DSP_CLOCK and 6dB.  */
1278                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0c00);
1279
1280                 /* Block the PHY control access.  */
1281                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8005);
1282                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0800);
1283
1284                 err = tg3_phy_write_and_check_testpat(tp, &do_phy_reset);
1285                 if (!err)
1286                         break;
1287         } while (--retries);
1288
1289         err = tg3_phy_reset_chanpat(tp);
1290         if (err)
1291                 return err;
1292
1293         tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8005);
1294         tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0000);
1295
1296         tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8200);
1297         tg3_writephy(tp, 0x16, 0x0000);
1298
1299         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
1300             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
1301                 /* Set Extended packet length bit for jumbo frames */
1302                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x4400);
1303         }
1304         else {
1305                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0400);
1306         }
1307
1308         tg3_writephy(tp, MII_TG3_CTRL, phy9_orig);
1309
1310         if (!tg3_readphy(tp, MII_TG3_EXT_CTRL, &reg32)) {
1311                 reg32 &= ~0x3000;
1312                 tg3_writephy(tp, MII_TG3_EXT_CTRL, reg32);
1313         } else if (!err)
1314                 err = -EBUSY;
1315
1316         return err;
1317 }
1318
1319 /* This will reset the tigon3 PHY if there is no valid
1320  * link unless the FORCE argument is non-zero.
1321  */
1322 static int tg3_phy_reset(struct tg3 *tp)
1323 {
1324         u32 cpmuctrl;
1325         u32 phy_status;
1326         int err;
1327
1328         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
1329                 u32 val;
1330
1331                 val = tr32(GRC_MISC_CFG);
1332                 tw32_f(GRC_MISC_CFG, val & ~GRC_MISC_CFG_EPHY_IDDQ);
1333                 udelay(40);
1334         }
1335         err  = tg3_readphy(tp, MII_BMSR, &phy_status);
1336         err |= tg3_readphy(tp, MII_BMSR, &phy_status);
1337         if (err != 0)
1338                 return -EBUSY;
1339
1340         if (netif_running(tp->dev) && netif_carrier_ok(tp->dev)) {
1341                 netif_carrier_off(tp->dev);
1342                 tg3_link_report(tp);
1343         }
1344
1345         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
1346             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
1347             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
1348                 err = tg3_phy_reset_5703_4_5(tp);
1349                 if (err)
1350                         return err;
1351                 goto out;
1352         }
1353
1354         cpmuctrl = 0;
1355         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 &&
1356             GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5784_AX) {
1357                 cpmuctrl = tr32(TG3_CPMU_CTRL);
1358                 if (cpmuctrl & CPMU_CTRL_GPHY_10MB_RXONLY)
1359                         tw32(TG3_CPMU_CTRL,
1360                              cpmuctrl & ~CPMU_CTRL_GPHY_10MB_RXONLY);
1361         }
1362
1363         err = tg3_bmcr_reset(tp);
1364         if (err)
1365                 return err;
1366
1367         if (cpmuctrl & CPMU_CTRL_GPHY_10MB_RXONLY) {
1368                 u32 phy;
1369
1370                 phy = MII_TG3_DSP_EXP8_AEDW | MII_TG3_DSP_EXP8_REJ2MHz;
1371                 tg3_phydsp_write(tp, MII_TG3_DSP_EXP8, phy);
1372
1373                 tw32(TG3_CPMU_CTRL, cpmuctrl);
1374         }
1375
1376         if (tp->tg3_flags3 & TG3_FLG3_5761_5784_AX_FIXES) {
1377                 u32 val;
1378
1379                 val = tr32(TG3_CPMU_LSPD_1000MB_CLK);
1380                 if ((val & CPMU_LSPD_1000MB_MACCLK_MASK) ==
1381                     CPMU_LSPD_1000MB_MACCLK_12_5) {
1382                         val &= ~CPMU_LSPD_1000MB_MACCLK_MASK;
1383                         udelay(40);
1384                         tw32_f(TG3_CPMU_LSPD_1000MB_CLK, val);
1385                 }
1386
1387                 /* Disable GPHY autopowerdown. */
1388                 tg3_writephy(tp, MII_TG3_MISC_SHDW,
1389                              MII_TG3_MISC_SHDW_WREN |
1390                              MII_TG3_MISC_SHDW_APD_SEL |
1391                              MII_TG3_MISC_SHDW_APD_WKTM_84MS);
1392         }
1393
1394         tg3_phy_apply_otp(tp);
1395
1396 out:
1397         if (tp->tg3_flags2 & TG3_FLG2_PHY_ADC_BUG) {
1398                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0c00);
1399                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x201f);
1400                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x2aaa);
1401                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000a);
1402                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0323);
1403                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0400);
1404         }
1405         if (tp->tg3_flags2 & TG3_FLG2_PHY_5704_A0_BUG) {
1406                 tg3_writephy(tp, 0x1c, 0x8d68);
1407                 tg3_writephy(tp, 0x1c, 0x8d68);
1408         }
1409         if (tp->tg3_flags2 & TG3_FLG2_PHY_BER_BUG) {
1410                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0c00);
1411                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000a);
1412                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x310b);
1413                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x201f);
1414                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x9506);
1415                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x401f);
1416                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x14e2);
1417                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0400);
1418         }
1419         else if (tp->tg3_flags2 & TG3_FLG2_PHY_JITTER_BUG) {
1420                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0c00);
1421                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000a);
1422                 if (tp->tg3_flags2 & TG3_FLG2_PHY_ADJUST_TRIM) {
1423                         tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x110b);
1424                         tg3_writephy(tp, MII_TG3_TEST1,
1425                                      MII_TG3_TEST1_TRIM_EN | 0x4);
1426                 } else
1427                         tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x010b);
1428                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0400);
1429         }
1430         /* Set Extended packet length bit (bit 14) on all chips that */
1431         /* support jumbo frames */
1432         if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5401) {
1433                 /* Cannot do read-modify-write on 5401 */
1434                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x4c20);
1435         } else if (tp->tg3_flags2 & TG3_FLG2_JUMBO_CAPABLE) {
1436                 u32 phy_reg;
1437
1438                 /* Set bit 14 with read-modify-write to preserve other bits */
1439                 if (!tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0007) &&
1440                     !tg3_readphy(tp, MII_TG3_AUX_CTRL, &phy_reg))
1441                         tg3_writephy(tp, MII_TG3_AUX_CTRL, phy_reg | 0x4000);
1442         }
1443
1444         /* Set phy register 0x10 bit 0 to high fifo elasticity to support
1445          * jumbo frames transmission.
1446          */
1447         if (tp->tg3_flags2 & TG3_FLG2_JUMBO_CAPABLE) {
1448                 u32 phy_reg;
1449
1450                 if (!tg3_readphy(tp, MII_TG3_EXT_CTRL, &phy_reg))
1451                     tg3_writephy(tp, MII_TG3_EXT_CTRL,
1452                                  phy_reg | MII_TG3_EXT_CTRL_FIFO_ELASTIC);
1453         }
1454
1455         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
1456                 /* adjust output voltage */
1457                 tg3_writephy(tp, MII_TG3_EPHY_PTEST, 0x12);
1458         }
1459
1460         tg3_phy_toggle_automdix(tp, 1);
1461         tg3_phy_set_wirespeed(tp);
1462         return 0;
1463 }
1464
1465 static void tg3_frob_aux_power(struct tg3 *tp)
1466 {
1467         struct tg3 *tp_peer = tp;
1468
1469         if ((tp->tg3_flags2 & TG3_FLG2_IS_NIC) == 0)
1470                 return;
1471
1472         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) ||
1473             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714)) {
1474                 struct net_device *dev_peer;
1475
1476                 dev_peer = pci_get_drvdata(tp->pdev_peer);
1477                 /* remove_one() may have been run on the peer. */
1478                 if (!dev_peer)
1479                         tp_peer = tp;
1480                 else
1481                         tp_peer = netdev_priv(dev_peer);
1482         }
1483
1484         if ((tp->tg3_flags & TG3_FLAG_WOL_ENABLE) != 0 ||
1485             (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) != 0 ||
1486             (tp_peer->tg3_flags & TG3_FLAG_WOL_ENABLE) != 0 ||
1487             (tp_peer->tg3_flags & TG3_FLAG_ENABLE_ASF) != 0) {
1488                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
1489                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
1490                         tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1491                                     (GRC_LCLCTRL_GPIO_OE0 |
1492                                      GRC_LCLCTRL_GPIO_OE1 |
1493                                      GRC_LCLCTRL_GPIO_OE2 |
1494                                      GRC_LCLCTRL_GPIO_OUTPUT0 |
1495                                      GRC_LCLCTRL_GPIO_OUTPUT1),
1496                                     100);
1497                 } else {
1498                         u32 no_gpio2;
1499                         u32 grc_local_ctrl = 0;
1500
1501                         if (tp_peer != tp &&
1502                             (tp_peer->tg3_flags & TG3_FLAG_INIT_COMPLETE) != 0)
1503                                 return;
1504
1505                         /* Workaround to prevent overdrawing Amps. */
1506                         if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
1507                             ASIC_REV_5714) {
1508                                 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE3;
1509                                 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1510                                             grc_local_ctrl, 100);
1511                         }
1512
1513                         /* On 5753 and variants, GPIO2 cannot be used. */
1514                         no_gpio2 = tp->nic_sram_data_cfg &
1515                                     NIC_SRAM_DATA_CFG_NO_GPIO2;
1516
1517                         grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE0 |
1518                                          GRC_LCLCTRL_GPIO_OE1 |
1519                                          GRC_LCLCTRL_GPIO_OE2 |
1520                                          GRC_LCLCTRL_GPIO_OUTPUT1 |
1521                                          GRC_LCLCTRL_GPIO_OUTPUT2;
1522                         if (no_gpio2) {
1523                                 grc_local_ctrl &= ~(GRC_LCLCTRL_GPIO_OE2 |
1524                                                     GRC_LCLCTRL_GPIO_OUTPUT2);
1525                         }
1526                         tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1527                                                     grc_local_ctrl, 100);
1528
1529                         grc_local_ctrl |= GRC_LCLCTRL_GPIO_OUTPUT0;
1530
1531                         tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1532                                                     grc_local_ctrl, 100);
1533
1534                         if (!no_gpio2) {
1535                                 grc_local_ctrl &= ~GRC_LCLCTRL_GPIO_OUTPUT2;
1536                                 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1537                                             grc_local_ctrl, 100);
1538                         }
1539                 }
1540         } else {
1541                 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
1542                     GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701) {
1543                         if (tp_peer != tp &&
1544                             (tp_peer->tg3_flags & TG3_FLAG_INIT_COMPLETE) != 0)
1545                                 return;
1546
1547                         tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1548                                     (GRC_LCLCTRL_GPIO_OE1 |
1549                                      GRC_LCLCTRL_GPIO_OUTPUT1), 100);
1550
1551                         tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1552                                     GRC_LCLCTRL_GPIO_OE1, 100);
1553
1554                         tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1555                                     (GRC_LCLCTRL_GPIO_OE1 |
1556                                      GRC_LCLCTRL_GPIO_OUTPUT1), 100);
1557                 }
1558         }
1559 }
1560
1561 static int tg3_5700_link_polarity(struct tg3 *tp, u32 speed)
1562 {
1563         if (tp->led_ctrl == LED_CTRL_MODE_PHY_2)
1564                 return 1;
1565         else if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5411) {
1566                 if (speed != SPEED_10)
1567                         return 1;
1568         } else if (speed == SPEED_10)
1569                 return 1;
1570
1571         return 0;
1572 }
1573
1574 static int tg3_setup_phy(struct tg3 *, int);
1575
1576 #define RESET_KIND_SHUTDOWN     0
1577 #define RESET_KIND_INIT         1
1578 #define RESET_KIND_SUSPEND      2
1579
1580 static void tg3_write_sig_post_reset(struct tg3 *, int);
1581 static int tg3_halt_cpu(struct tg3 *, u32);
1582 static int tg3_nvram_lock(struct tg3 *);
1583 static void tg3_nvram_unlock(struct tg3 *);
1584
1585 static void tg3_power_down_phy(struct tg3 *tp)
1586 {
1587         u32 val;
1588
1589         if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) {
1590                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
1591                         u32 sg_dig_ctrl = tr32(SG_DIG_CTRL);
1592                         u32 serdes_cfg = tr32(MAC_SERDES_CFG);
1593
1594                         sg_dig_ctrl |=
1595                                 SG_DIG_USING_HW_AUTONEG | SG_DIG_SOFT_RESET;
1596                         tw32(SG_DIG_CTRL, sg_dig_ctrl);
1597                         tw32(MAC_SERDES_CFG, serdes_cfg | (1 << 15));
1598                 }
1599                 return;
1600         }
1601
1602         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
1603                 tg3_bmcr_reset(tp);
1604                 val = tr32(GRC_MISC_CFG);
1605                 tw32_f(GRC_MISC_CFG, val | GRC_MISC_CFG_EPHY_IDDQ);
1606                 udelay(40);
1607                 return;
1608         } else {
1609                 tg3_writephy(tp, MII_TG3_EXT_CTRL,
1610                              MII_TG3_EXT_CTRL_FORCE_LED_OFF);
1611                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x01b2);
1612         }
1613
1614         /* The PHY should not be powered down on some chips because
1615          * of bugs.
1616          */
1617         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
1618             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
1619             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780 &&
1620              (tp->tg3_flags2 & TG3_FLG2_MII_SERDES)))
1621                 return;
1622
1623         if (tp->tg3_flags3 & TG3_FLG3_5761_5784_AX_FIXES) {
1624                 val = tr32(TG3_CPMU_LSPD_1000MB_CLK);
1625                 val &= ~CPMU_LSPD_1000MB_MACCLK_MASK;
1626                 val |= CPMU_LSPD_1000MB_MACCLK_12_5;
1627                 tw32_f(TG3_CPMU_LSPD_1000MB_CLK, val);
1628         }
1629
1630         tg3_writephy(tp, MII_BMCR, BMCR_PDOWN);
1631 }
1632
1633 static int tg3_set_power_state(struct tg3 *tp, pci_power_t state)
1634 {
1635         u32 misc_host_ctrl;
1636         u16 power_control, power_caps;
1637         int pm = tp->pm_cap;
1638
1639         /* Make sure register accesses (indirect or otherwise)
1640          * will function correctly.
1641          */
1642         pci_write_config_dword(tp->pdev,
1643                                TG3PCI_MISC_HOST_CTRL,
1644                                tp->misc_host_ctrl);
1645
1646         pci_read_config_word(tp->pdev,
1647                              pm + PCI_PM_CTRL,
1648                              &power_control);
1649         power_control |= PCI_PM_CTRL_PME_STATUS;
1650         power_control &= ~(PCI_PM_CTRL_STATE_MASK);
1651         switch (state) {
1652         case PCI_D0:
1653                 power_control |= 0;
1654                 pci_write_config_word(tp->pdev,
1655                                       pm + PCI_PM_CTRL,
1656                                       power_control);
1657                 udelay(100);    /* Delay after power state change */
1658
1659                 /* Switch out of Vaux if it is a NIC */
1660                 if (tp->tg3_flags2 & TG3_FLG2_IS_NIC)
1661                         tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl, 100);
1662
1663                 return 0;
1664
1665         case PCI_D1:
1666                 power_control |= 1;
1667                 break;
1668
1669         case PCI_D2:
1670                 power_control |= 2;
1671                 break;
1672
1673         case PCI_D3hot:
1674                 power_control |= 3;
1675                 break;
1676
1677         default:
1678                 printk(KERN_WARNING PFX "%s: Invalid power state (%d) "
1679                        "requested.\n",
1680                        tp->dev->name, state);
1681                 return -EINVAL;
1682         };
1683
1684         power_control |= PCI_PM_CTRL_PME_ENABLE;
1685
1686         misc_host_ctrl = tr32(TG3PCI_MISC_HOST_CTRL);
1687         tw32(TG3PCI_MISC_HOST_CTRL,
1688              misc_host_ctrl | MISC_HOST_CTRL_MASK_PCI_INT);
1689
1690         if (tp->link_config.phy_is_low_power == 0) {
1691                 tp->link_config.phy_is_low_power = 1;
1692                 tp->link_config.orig_speed = tp->link_config.speed;
1693                 tp->link_config.orig_duplex = tp->link_config.duplex;
1694                 tp->link_config.orig_autoneg = tp->link_config.autoneg;
1695         }
1696
1697         if (!(tp->tg3_flags2 & TG3_FLG2_ANY_SERDES)) {
1698                 tp->link_config.speed = SPEED_10;
1699                 tp->link_config.duplex = DUPLEX_HALF;
1700                 tp->link_config.autoneg = AUTONEG_ENABLE;
1701                 tg3_setup_phy(tp, 0);
1702         }
1703
1704         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
1705                 u32 val;
1706
1707                 val = tr32(GRC_VCPU_EXT_CTRL);
1708                 tw32(GRC_VCPU_EXT_CTRL, val | GRC_VCPU_EXT_CTRL_DISABLE_WOL);
1709         } else if (!(tp->tg3_flags & TG3_FLAG_ENABLE_ASF)) {
1710                 int i;
1711                 u32 val;
1712
1713                 for (i = 0; i < 200; i++) {
1714                         tg3_read_mem(tp, NIC_SRAM_FW_ASF_STATUS_MBOX, &val);
1715                         if (val == ~NIC_SRAM_FIRMWARE_MBOX_MAGIC1)
1716                                 break;
1717                         msleep(1);
1718                 }
1719         }
1720         if (tp->tg3_flags & TG3_FLAG_WOL_CAP)
1721                 tg3_write_mem(tp, NIC_SRAM_WOL_MBOX, WOL_SIGNATURE |
1722                                                      WOL_DRV_STATE_SHUTDOWN |
1723                                                      WOL_DRV_WOL |
1724                                                      WOL_SET_MAGIC_PKT);
1725
1726         pci_read_config_word(tp->pdev, pm + PCI_PM_PMC, &power_caps);
1727
1728         if (tp->tg3_flags & TG3_FLAG_WOL_ENABLE) {
1729                 u32 mac_mode;
1730
1731                 if (!(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)) {
1732                         tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x5a);
1733                         udelay(40);
1734
1735                         if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES)
1736                                 mac_mode = MAC_MODE_PORT_MODE_GMII;
1737                         else
1738                                 mac_mode = MAC_MODE_PORT_MODE_MII;
1739
1740                         mac_mode |= tp->mac_mode & MAC_MODE_LINK_POLARITY;
1741                         if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
1742                             ASIC_REV_5700) {
1743                                 u32 speed = (tp->tg3_flags &
1744                                              TG3_FLAG_WOL_SPEED_100MB) ?
1745                                              SPEED_100 : SPEED_10;
1746                                 if (tg3_5700_link_polarity(tp, speed))
1747                                         mac_mode |= MAC_MODE_LINK_POLARITY;
1748                                 else
1749                                         mac_mode &= ~MAC_MODE_LINK_POLARITY;
1750                         }
1751                 } else {
1752                         mac_mode = MAC_MODE_PORT_MODE_TBI;
1753                 }
1754
1755                 if (!(tp->tg3_flags2 & TG3_FLG2_5750_PLUS))
1756                         tw32(MAC_LED_CTRL, tp->led_ctrl);
1757
1758                 if (((power_caps & PCI_PM_CAP_PME_D3cold) &&
1759                      (tp->tg3_flags & TG3_FLAG_WOL_ENABLE)))
1760                         mac_mode |= MAC_MODE_MAGIC_PKT_ENABLE;
1761
1762                 tw32_f(MAC_MODE, mac_mode);
1763                 udelay(100);
1764
1765                 tw32_f(MAC_RX_MODE, RX_MODE_ENABLE);
1766                 udelay(10);
1767         }
1768
1769         if (!(tp->tg3_flags & TG3_FLAG_WOL_SPEED_100MB) &&
1770             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
1771              GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)) {
1772                 u32 base_val;
1773
1774                 base_val = tp->pci_clock_ctrl;
1775                 base_val |= (CLOCK_CTRL_RXCLK_DISABLE |
1776                              CLOCK_CTRL_TXCLK_DISABLE);
1777
1778                 tw32_wait_f(TG3PCI_CLOCK_CTRL, base_val | CLOCK_CTRL_ALTCLK |
1779                             CLOCK_CTRL_PWRDOWN_PLL133, 40);
1780         } else if ((tp->tg3_flags2 & TG3_FLG2_5780_CLASS) ||
1781                    (tp->tg3_flags & TG3_FLAG_CPMU_PRESENT) ||
1782                    (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)) {
1783                 /* do nothing */
1784         } else if (!((tp->tg3_flags2 & TG3_FLG2_5750_PLUS) &&
1785                      (tp->tg3_flags & TG3_FLAG_ENABLE_ASF))) {
1786                 u32 newbits1, newbits2;
1787
1788                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
1789                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
1790                         newbits1 = (CLOCK_CTRL_RXCLK_DISABLE |
1791                                     CLOCK_CTRL_TXCLK_DISABLE |
1792                                     CLOCK_CTRL_ALTCLK);
1793                         newbits2 = newbits1 | CLOCK_CTRL_44MHZ_CORE;
1794                 } else if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
1795                         newbits1 = CLOCK_CTRL_625_CORE;
1796                         newbits2 = newbits1 | CLOCK_CTRL_ALTCLK;
1797                 } else {
1798                         newbits1 = CLOCK_CTRL_ALTCLK;
1799                         newbits2 = newbits1 | CLOCK_CTRL_44MHZ_CORE;
1800                 }
1801
1802                 tw32_wait_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl | newbits1,
1803                             40);
1804
1805                 tw32_wait_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl | newbits2,
1806                             40);
1807
1808                 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
1809                         u32 newbits3;
1810
1811                         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
1812                             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
1813                                 newbits3 = (CLOCK_CTRL_RXCLK_DISABLE |
1814                                             CLOCK_CTRL_TXCLK_DISABLE |
1815                                             CLOCK_CTRL_44MHZ_CORE);
1816                         } else {
1817                                 newbits3 = CLOCK_CTRL_44MHZ_CORE;
1818                         }
1819
1820                         tw32_wait_f(TG3PCI_CLOCK_CTRL,
1821                                     tp->pci_clock_ctrl | newbits3, 40);
1822                 }
1823         }
1824
1825         if (!(tp->tg3_flags & TG3_FLAG_WOL_ENABLE) &&
1826             !(tp->tg3_flags & TG3_FLAG_ENABLE_ASF) &&
1827             !(tp->tg3_flags3 & TG3_FLG3_ENABLE_APE))
1828                 tg3_power_down_phy(tp);
1829
1830         tg3_frob_aux_power(tp);
1831
1832         /* Workaround for unstable PLL clock */
1833         if ((GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_AX) ||
1834             (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_BX)) {
1835                 u32 val = tr32(0x7d00);
1836
1837                 val &= ~((1 << 16) | (1 << 4) | (1 << 2) | (1 << 1) | 1);
1838                 tw32(0x7d00, val);
1839                 if (!(tp->tg3_flags & TG3_FLAG_ENABLE_ASF)) {
1840                         int err;
1841
1842                         err = tg3_nvram_lock(tp);
1843                         tg3_halt_cpu(tp, RX_CPU_BASE);
1844                         if (!err)
1845                                 tg3_nvram_unlock(tp);
1846                 }
1847         }
1848
1849         tg3_write_sig_post_reset(tp, RESET_KIND_SHUTDOWN);
1850
1851         /* Finally, set the new power state. */
1852         pci_write_config_word(tp->pdev, pm + PCI_PM_CTRL, power_control);
1853         udelay(100);    /* Delay after power state change */
1854
1855         return 0;
1856 }
1857
1858 static void tg3_aux_stat_to_speed_duplex(struct tg3 *tp, u32 val, u16 *speed, u8 *duplex)
1859 {
1860         switch (val & MII_TG3_AUX_STAT_SPDMASK) {
1861         case MII_TG3_AUX_STAT_10HALF:
1862                 *speed = SPEED_10;
1863                 *duplex = DUPLEX_HALF;
1864                 break;
1865
1866         case MII_TG3_AUX_STAT_10FULL:
1867                 *speed = SPEED_10;
1868                 *duplex = DUPLEX_FULL;
1869                 break;
1870
1871         case MII_TG3_AUX_STAT_100HALF:
1872                 *speed = SPEED_100;
1873                 *duplex = DUPLEX_HALF;
1874                 break;
1875
1876         case MII_TG3_AUX_STAT_100FULL:
1877                 *speed = SPEED_100;
1878                 *duplex = DUPLEX_FULL;
1879                 break;
1880
1881         case MII_TG3_AUX_STAT_1000HALF:
1882                 *speed = SPEED_1000;
1883                 *duplex = DUPLEX_HALF;
1884                 break;
1885
1886         case MII_TG3_AUX_STAT_1000FULL:
1887                 *speed = SPEED_1000;
1888                 *duplex = DUPLEX_FULL;
1889                 break;
1890
1891         default:
1892                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
1893                         *speed = (val & MII_TG3_AUX_STAT_100) ? SPEED_100 :
1894                                  SPEED_10;
1895                         *duplex = (val & MII_TG3_AUX_STAT_FULL) ? DUPLEX_FULL :
1896                                   DUPLEX_HALF;
1897                         break;
1898                 }
1899                 *speed = SPEED_INVALID;
1900                 *duplex = DUPLEX_INVALID;
1901                 break;
1902         };
1903 }
1904
1905 static void tg3_phy_copper_begin(struct tg3 *tp)
1906 {
1907         u32 new_adv;
1908         int i;
1909
1910         if (tp->link_config.phy_is_low_power) {
1911                 /* Entering low power mode.  Disable gigabit and
1912                  * 100baseT advertisements.
1913                  */
1914                 tg3_writephy(tp, MII_TG3_CTRL, 0);
1915
1916                 new_adv = (ADVERTISE_10HALF | ADVERTISE_10FULL |
1917                            ADVERTISE_CSMA | ADVERTISE_PAUSE_CAP);
1918                 if (tp->tg3_flags & TG3_FLAG_WOL_SPEED_100MB)
1919                         new_adv |= (ADVERTISE_100HALF | ADVERTISE_100FULL);
1920
1921                 tg3_writephy(tp, MII_ADVERTISE, new_adv);
1922         } else if (tp->link_config.speed == SPEED_INVALID) {
1923                 if (tp->tg3_flags & TG3_FLAG_10_100_ONLY)
1924                         tp->link_config.advertising &=
1925                                 ~(ADVERTISED_1000baseT_Half |
1926                                   ADVERTISED_1000baseT_Full);
1927
1928                 new_adv = ADVERTISE_CSMA;
1929                 if (tp->link_config.advertising & ADVERTISED_10baseT_Half)
1930                         new_adv |= ADVERTISE_10HALF;
1931                 if (tp->link_config.advertising & ADVERTISED_10baseT_Full)
1932                         new_adv |= ADVERTISE_10FULL;
1933                 if (tp->link_config.advertising & ADVERTISED_100baseT_Half)
1934                         new_adv |= ADVERTISE_100HALF;
1935                 if (tp->link_config.advertising & ADVERTISED_100baseT_Full)
1936                         new_adv |= ADVERTISE_100FULL;
1937
1938                 new_adv |= tg3_advert_flowctrl_1000T(tp->link_config.flowctrl);
1939
1940                 tg3_writephy(tp, MII_ADVERTISE, new_adv);
1941
1942                 if (tp->link_config.advertising &
1943                     (ADVERTISED_1000baseT_Half | ADVERTISED_1000baseT_Full)) {
1944                         new_adv = 0;
1945                         if (tp->link_config.advertising & ADVERTISED_1000baseT_Half)
1946                                 new_adv |= MII_TG3_CTRL_ADV_1000_HALF;
1947                         if (tp->link_config.advertising & ADVERTISED_1000baseT_Full)
1948                                 new_adv |= MII_TG3_CTRL_ADV_1000_FULL;
1949                         if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY) &&
1950                             (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
1951                              tp->pci_chip_rev_id == CHIPREV_ID_5701_B0))
1952                                 new_adv |= (MII_TG3_CTRL_AS_MASTER |
1953                                             MII_TG3_CTRL_ENABLE_AS_MASTER);
1954                         tg3_writephy(tp, MII_TG3_CTRL, new_adv);
1955                 } else {
1956                         tg3_writephy(tp, MII_TG3_CTRL, 0);
1957                 }
1958         } else {
1959                 new_adv = tg3_advert_flowctrl_1000T(tp->link_config.flowctrl);
1960                 new_adv |= ADVERTISE_CSMA;
1961
1962                 /* Asking for a specific link mode. */
1963                 if (tp->link_config.speed == SPEED_1000) {
1964                         tg3_writephy(tp, MII_ADVERTISE, new_adv);
1965
1966                         if (tp->link_config.duplex == DUPLEX_FULL)
1967                                 new_adv = MII_TG3_CTRL_ADV_1000_FULL;
1968                         else
1969                                 new_adv = MII_TG3_CTRL_ADV_1000_HALF;
1970                         if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
1971                             tp->pci_chip_rev_id == CHIPREV_ID_5701_B0)
1972                                 new_adv |= (MII_TG3_CTRL_AS_MASTER |
1973                                             MII_TG3_CTRL_ENABLE_AS_MASTER);
1974                 } else {
1975                         if (tp->link_config.speed == SPEED_100) {
1976                                 if (tp->link_config.duplex == DUPLEX_FULL)
1977                                         new_adv |= ADVERTISE_100FULL;
1978                                 else
1979                                         new_adv |= ADVERTISE_100HALF;
1980                         } else {
1981                                 if (tp->link_config.duplex == DUPLEX_FULL)
1982                                         new_adv |= ADVERTISE_10FULL;
1983                                 else
1984                                         new_adv |= ADVERTISE_10HALF;
1985                         }
1986                         tg3_writephy(tp, MII_ADVERTISE, new_adv);
1987
1988                         new_adv = 0;
1989                 }
1990
1991                 tg3_writephy(tp, MII_TG3_CTRL, new_adv);
1992         }
1993
1994         if (tp->link_config.autoneg == AUTONEG_DISABLE &&
1995             tp->link_config.speed != SPEED_INVALID) {
1996                 u32 bmcr, orig_bmcr;
1997
1998                 tp->link_config.active_speed = tp->link_config.speed;
1999                 tp->link_config.active_duplex = tp->link_config.duplex;
2000
2001                 bmcr = 0;
2002                 switch (tp->link_config.speed) {
2003                 default:
2004                 case SPEED_10:
2005                         break;
2006
2007                 case SPEED_100:
2008                         bmcr |= BMCR_SPEED100;
2009                         break;
2010
2011                 case SPEED_1000:
2012                         bmcr |= TG3_BMCR_SPEED1000;
2013                         break;
2014                 };
2015
2016                 if (tp->link_config.duplex == DUPLEX_FULL)
2017                         bmcr |= BMCR_FULLDPLX;
2018
2019                 if (!tg3_readphy(tp, MII_BMCR, &orig_bmcr) &&
2020                     (bmcr != orig_bmcr)) {
2021                         tg3_writephy(tp, MII_BMCR, BMCR_LOOPBACK);
2022                         for (i = 0; i < 1500; i++) {
2023                                 u32 tmp;
2024
2025                                 udelay(10);
2026                                 if (tg3_readphy(tp, MII_BMSR, &tmp) ||
2027                                     tg3_readphy(tp, MII_BMSR, &tmp))
2028                                         continue;
2029                                 if (!(tmp & BMSR_LSTATUS)) {
2030                                         udelay(40);
2031                                         break;
2032                                 }
2033                         }
2034                         tg3_writephy(tp, MII_BMCR, bmcr);
2035                         udelay(40);
2036                 }
2037         } else {
2038                 tg3_writephy(tp, MII_BMCR,
2039                              BMCR_ANENABLE | BMCR_ANRESTART);
2040         }
2041 }
2042
2043 static int tg3_init_5401phy_dsp(struct tg3 *tp)
2044 {
2045         int err;
2046
2047         /* Turn off tap power management. */
2048         /* Set Extended packet length bit */
2049         err  = tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x4c20);
2050
2051         err |= tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x0012);
2052         err |= tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x1804);
2053
2054         err |= tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x0013);
2055         err |= tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x1204);
2056
2057         err |= tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8006);
2058         err |= tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0132);
2059
2060         err |= tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8006);
2061         err |= tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0232);
2062
2063         err |= tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x201f);
2064         err |= tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0a20);
2065
2066         udelay(40);
2067
2068         return err;
2069 }
2070
2071 static int tg3_copper_is_advertising_all(struct tg3 *tp, u32 mask)
2072 {
2073         u32 adv_reg, all_mask = 0;
2074
2075         if (mask & ADVERTISED_10baseT_Half)
2076                 all_mask |= ADVERTISE_10HALF;
2077         if (mask & ADVERTISED_10baseT_Full)
2078                 all_mask |= ADVERTISE_10FULL;
2079         if (mask & ADVERTISED_100baseT_Half)
2080                 all_mask |= ADVERTISE_100HALF;
2081         if (mask & ADVERTISED_100baseT_Full)
2082                 all_mask |= ADVERTISE_100FULL;
2083
2084         if (tg3_readphy(tp, MII_ADVERTISE, &adv_reg))
2085                 return 0;
2086
2087         if ((adv_reg & all_mask) != all_mask)
2088                 return 0;
2089         if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY)) {
2090                 u32 tg3_ctrl;
2091
2092                 all_mask = 0;
2093                 if (mask & ADVERTISED_1000baseT_Half)
2094                         all_mask |= ADVERTISE_1000HALF;
2095                 if (mask & ADVERTISED_1000baseT_Full)
2096                         all_mask |= ADVERTISE_1000FULL;
2097
2098                 if (tg3_readphy(tp, MII_TG3_CTRL, &tg3_ctrl))
2099                         return 0;
2100
2101                 if ((tg3_ctrl & all_mask) != all_mask)
2102                         return 0;
2103         }
2104         return 1;
2105 }
2106
2107 static int tg3_adv_1000T_flowctrl_ok(struct tg3 *tp, u32 *lcladv, u32 *rmtadv)
2108 {
2109         u32 curadv, reqadv;
2110
2111         if (tg3_readphy(tp, MII_ADVERTISE, lcladv))
2112                 return 1;
2113
2114         curadv = *lcladv & (ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM);
2115         reqadv = tg3_advert_flowctrl_1000T(tp->link_config.flowctrl);
2116
2117         if (tp->link_config.active_duplex == DUPLEX_FULL) {
2118                 if (curadv != reqadv)
2119                         return 0;
2120
2121                 if (tp->tg3_flags & TG3_FLAG_PAUSE_AUTONEG)
2122                         tg3_readphy(tp, MII_LPA, rmtadv);
2123         } else {
2124                 /* Reprogram the advertisement register, even if it
2125                  * does not affect the current link.  If the link
2126                  * gets renegotiated in the future, we can save an
2127                  * additional renegotiation cycle by advertising
2128                  * it correctly in the first place.
2129                  */
2130                 if (curadv != reqadv) {
2131                         *lcladv &= ~(ADVERTISE_PAUSE_CAP |
2132                                      ADVERTISE_PAUSE_ASYM);
2133                         tg3_writephy(tp, MII_ADVERTISE, *lcladv | reqadv);
2134                 }
2135         }
2136
2137         return 1;
2138 }
2139
2140 static int tg3_setup_copper_phy(struct tg3 *tp, int force_reset)
2141 {
2142         int current_link_up;
2143         u32 bmsr, dummy;
2144         u32 lcl_adv, rmt_adv;
2145         u16 current_speed;
2146         u8 current_duplex;
2147         int i, err;
2148
2149         tw32(MAC_EVENT, 0);
2150
2151         tw32_f(MAC_STATUS,
2152              (MAC_STATUS_SYNC_CHANGED |
2153               MAC_STATUS_CFG_CHANGED |
2154               MAC_STATUS_MI_COMPLETION |
2155               MAC_STATUS_LNKSTATE_CHANGED));
2156         udelay(40);
2157
2158         if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
2159                 tw32_f(MAC_MI_MODE,
2160                      (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
2161                 udelay(80);
2162         }
2163
2164         tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x02);
2165
2166         /* Some third-party PHYs need to be reset on link going
2167          * down.
2168          */
2169         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
2170              GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
2171              GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) &&
2172             netif_carrier_ok(tp->dev)) {
2173                 tg3_readphy(tp, MII_BMSR, &bmsr);
2174                 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
2175                     !(bmsr & BMSR_LSTATUS))
2176                         force_reset = 1;
2177         }
2178         if (force_reset)
2179                 tg3_phy_reset(tp);
2180
2181         if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5401) {
2182                 tg3_readphy(tp, MII_BMSR, &bmsr);
2183                 if (tg3_readphy(tp, MII_BMSR, &bmsr) ||
2184                     !(tp->tg3_flags & TG3_FLAG_INIT_COMPLETE))
2185                         bmsr = 0;
2186
2187                 if (!(bmsr & BMSR_LSTATUS)) {
2188                         err = tg3_init_5401phy_dsp(tp);
2189                         if (err)
2190                                 return err;
2191
2192                         tg3_readphy(tp, MII_BMSR, &bmsr);
2193                         for (i = 0; i < 1000; i++) {
2194                                 udelay(10);
2195                                 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
2196                                     (bmsr & BMSR_LSTATUS)) {
2197                                         udelay(40);
2198                                         break;
2199                                 }
2200                         }
2201
2202                         if ((tp->phy_id & PHY_ID_REV_MASK) == PHY_REV_BCM5401_B0 &&
2203                             !(bmsr & BMSR_LSTATUS) &&
2204                             tp->link_config.active_speed == SPEED_1000) {
2205                                 err = tg3_phy_reset(tp);
2206                                 if (!err)
2207                                         err = tg3_init_5401phy_dsp(tp);
2208                                 if (err)
2209                                         return err;
2210                         }
2211                 }
2212         } else if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
2213                    tp->pci_chip_rev_id == CHIPREV_ID_5701_B0) {
2214                 /* 5701 {A0,B0} CRC bug workaround */
2215                 tg3_writephy(tp, 0x15, 0x0a75);
2216                 tg3_writephy(tp, 0x1c, 0x8c68);
2217                 tg3_writephy(tp, 0x1c, 0x8d68);
2218                 tg3_writephy(tp, 0x1c, 0x8c68);
2219         }
2220
2221         /* Clear pending interrupts... */
2222         tg3_readphy(tp, MII_TG3_ISTAT, &dummy);
2223         tg3_readphy(tp, MII_TG3_ISTAT, &dummy);
2224
2225         if (tp->tg3_flags & TG3_FLAG_USE_MI_INTERRUPT)
2226                 tg3_writephy(tp, MII_TG3_IMASK, ~MII_TG3_INT_LINKCHG);
2227         else if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5906)
2228                 tg3_writephy(tp, MII_TG3_IMASK, ~0);
2229
2230         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
2231             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
2232                 if (tp->led_ctrl == LED_CTRL_MODE_PHY_1)
2233                         tg3_writephy(tp, MII_TG3_EXT_CTRL,
2234                                      MII_TG3_EXT_CTRL_LNK3_LED_MODE);
2235                 else
2236                         tg3_writephy(tp, MII_TG3_EXT_CTRL, 0);
2237         }
2238
2239         current_link_up = 0;
2240         current_speed = SPEED_INVALID;
2241         current_duplex = DUPLEX_INVALID;
2242
2243         if (tp->tg3_flags2 & TG3_FLG2_CAPACITIVE_COUPLING) {
2244                 u32 val;
2245
2246                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x4007);
2247                 tg3_readphy(tp, MII_TG3_AUX_CTRL, &val);
2248                 if (!(val & (1 << 10))) {
2249                         val |= (1 << 10);
2250                         tg3_writephy(tp, MII_TG3_AUX_CTRL, val);
2251                         goto relink;
2252                 }
2253         }
2254
2255         bmsr = 0;
2256         for (i = 0; i < 100; i++) {
2257                 tg3_readphy(tp, MII_BMSR, &bmsr);
2258                 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
2259                     (bmsr & BMSR_LSTATUS))
2260                         break;
2261                 udelay(40);
2262         }
2263
2264         if (bmsr & BMSR_LSTATUS) {
2265                 u32 aux_stat, bmcr;
2266
2267                 tg3_readphy(tp, MII_TG3_AUX_STAT, &aux_stat);
2268                 for (i = 0; i < 2000; i++) {
2269                         udelay(10);
2270                         if (!tg3_readphy(tp, MII_TG3_AUX_STAT, &aux_stat) &&
2271                             aux_stat)
2272                                 break;
2273                 }
2274
2275                 tg3_aux_stat_to_speed_duplex(tp, aux_stat,
2276                                              &current_speed,
2277                                              &current_duplex);
2278
2279                 bmcr = 0;
2280                 for (i = 0; i < 200; i++) {
2281                         tg3_readphy(tp, MII_BMCR, &bmcr);
2282                         if (tg3_readphy(tp, MII_BMCR, &bmcr))
2283                                 continue;
2284                         if (bmcr && bmcr != 0x7fff)
2285                                 break;
2286                         udelay(10);
2287                 }
2288
2289                 lcl_adv = 0;
2290                 rmt_adv = 0;
2291
2292                 tp->link_config.active_speed = current_speed;
2293                 tp->link_config.active_duplex = current_duplex;
2294
2295                 if (tp->link_config.autoneg == AUTONEG_ENABLE) {
2296                         if ((bmcr & BMCR_ANENABLE) &&
2297                             tg3_copper_is_advertising_all(tp,
2298                                                 tp->link_config.advertising)) {
2299                                 if (tg3_adv_1000T_flowctrl_ok(tp, &lcl_adv,
2300                                                                   &rmt_adv))
2301                                         current_link_up = 1;
2302                         }
2303                 } else {
2304                         if (!(bmcr & BMCR_ANENABLE) &&
2305                             tp->link_config.speed == current_speed &&
2306                             tp->link_config.duplex == current_duplex &&
2307                             tp->link_config.flowctrl ==
2308                             tp->link_config.active_flowctrl) {
2309                                 current_link_up = 1;
2310                         }
2311                 }
2312
2313                 if (current_link_up == 1 &&
2314                     tp->link_config.active_duplex == DUPLEX_FULL)
2315                         tg3_setup_flow_control(tp, lcl_adv, rmt_adv);
2316         }
2317
2318 relink:
2319         if (current_link_up == 0 || tp->link_config.phy_is_low_power) {
2320                 u32 tmp;
2321
2322                 tg3_phy_copper_begin(tp);
2323
2324                 tg3_readphy(tp, MII_BMSR, &tmp);
2325                 if (!tg3_readphy(tp, MII_BMSR, &tmp) &&
2326                     (tmp & BMSR_LSTATUS))
2327                         current_link_up = 1;
2328         }
2329
2330         tp->mac_mode &= ~MAC_MODE_PORT_MODE_MASK;
2331         if (current_link_up == 1) {
2332                 if (tp->link_config.active_speed == SPEED_100 ||
2333                     tp->link_config.active_speed == SPEED_10)
2334                         tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
2335                 else
2336                         tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
2337         } else
2338                 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
2339
2340         tp->mac_mode &= ~MAC_MODE_HALF_DUPLEX;
2341         if (tp->link_config.active_duplex == DUPLEX_HALF)
2342                 tp->mac_mode |= MAC_MODE_HALF_DUPLEX;
2343
2344         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700) {
2345                 if (current_link_up == 1 &&
2346                     tg3_5700_link_polarity(tp, tp->link_config.active_speed))
2347                         tp->mac_mode |= MAC_MODE_LINK_POLARITY;
2348                 else
2349                         tp->mac_mode &= ~MAC_MODE_LINK_POLARITY;
2350         }
2351
2352         /* ??? Without this setting Netgear GA302T PHY does not
2353          * ??? send/receive packets...
2354          */
2355         if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5411 &&
2356             tp->pci_chip_rev_id == CHIPREV_ID_5700_ALTIMA) {
2357                 tp->mi_mode |= MAC_MI_MODE_AUTO_POLL;
2358                 tw32_f(MAC_MI_MODE, tp->mi_mode);
2359                 udelay(80);
2360         }
2361
2362         tw32_f(MAC_MODE, tp->mac_mode);
2363         udelay(40);
2364
2365         if (tp->tg3_flags & TG3_FLAG_USE_LINKCHG_REG) {
2366                 /* Polled via timer. */
2367                 tw32_f(MAC_EVENT, 0);
2368         } else {
2369                 tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
2370         }
2371         udelay(40);
2372
2373         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 &&
2374             current_link_up == 1 &&
2375             tp->link_config.active_speed == SPEED_1000 &&
2376             ((tp->tg3_flags & TG3_FLAG_PCIX_MODE) ||
2377              (tp->tg3_flags & TG3_FLAG_PCI_HIGH_SPEED))) {
2378                 udelay(120);
2379                 tw32_f(MAC_STATUS,
2380                      (MAC_STATUS_SYNC_CHANGED |
2381                       MAC_STATUS_CFG_CHANGED));
2382                 udelay(40);
2383                 tg3_write_mem(tp,
2384                               NIC_SRAM_FIRMWARE_MBOX,
2385                               NIC_SRAM_FIRMWARE_MBOX_MAGIC2);
2386         }
2387
2388         if (current_link_up != netif_carrier_ok(tp->dev)) {
2389                 if (current_link_up)
2390                         netif_carrier_on(tp->dev);
2391                 else
2392                         netif_carrier_off(tp->dev);
2393                 tg3_link_report(tp);
2394         }
2395
2396         return 0;
2397 }
2398
2399 struct tg3_fiber_aneginfo {
2400         int state;
2401 #define ANEG_STATE_UNKNOWN              0
2402 #define ANEG_STATE_AN_ENABLE            1
2403 #define ANEG_STATE_RESTART_INIT         2
2404 #define ANEG_STATE_RESTART              3
2405 #define ANEG_STATE_DISABLE_LINK_OK      4
2406 #define ANEG_STATE_ABILITY_DETECT_INIT  5
2407 #define ANEG_STATE_ABILITY_DETECT       6
2408 #define ANEG_STATE_ACK_DETECT_INIT      7
2409 #define ANEG_STATE_ACK_DETECT           8
2410 #define ANEG_STATE_COMPLETE_ACK_INIT    9
2411 #define ANEG_STATE_COMPLETE_ACK         10
2412 #define ANEG_STATE_IDLE_DETECT_INIT     11
2413 #define ANEG_STATE_IDLE_DETECT          12
2414 #define ANEG_STATE_LINK_OK              13
2415 #define ANEG_STATE_NEXT_PAGE_WAIT_INIT  14
2416 #define ANEG_STATE_NEXT_PAGE_WAIT       15
2417
2418         u32 flags;
2419 #define MR_AN_ENABLE            0x00000001
2420 #define MR_RESTART_AN           0x00000002
2421 #define MR_AN_COMPLETE          0x00000004
2422 #define MR_PAGE_RX              0x00000008
2423 #define MR_NP_LOADED            0x00000010
2424 #define MR_TOGGLE_TX            0x00000020
2425 #define MR_LP_ADV_FULL_DUPLEX   0x00000040
2426 #define MR_LP_ADV_HALF_DUPLEX   0x00000080
2427 #define MR_LP_ADV_SYM_PAUSE     0x00000100
2428 #define MR_LP_ADV_ASYM_PAUSE    0x00000200
2429 #define MR_LP_ADV_REMOTE_FAULT1 0x00000400
2430 #define MR_LP_ADV_REMOTE_FAULT2 0x00000800
2431 #define MR_LP_ADV_NEXT_PAGE     0x00001000
2432 #define MR_TOGGLE_RX            0x00002000
2433 #define MR_NP_RX                0x00004000
2434
2435 #define MR_LINK_OK              0x80000000
2436
2437         unsigned long link_time, cur_time;
2438
2439         u32 ability_match_cfg;
2440         int ability_match_count;
2441
2442         char ability_match, idle_match, ack_match;
2443
2444         u32 txconfig, rxconfig;
2445 #define ANEG_CFG_NP             0x00000080
2446 #define ANEG_CFG_ACK            0x00000040
2447 #define ANEG_CFG_RF2            0x00000020
2448 #define ANEG_CFG_RF1            0x00000010
2449 #define ANEG_CFG_PS2            0x00000001
2450 #define ANEG_CFG_PS1            0x00008000
2451 #define ANEG_CFG_HD             0x00004000
2452 #define ANEG_CFG_FD             0x00002000
2453 #define ANEG_CFG_INVAL          0x00001f06
2454
2455 };
2456 #define ANEG_OK         0
2457 #define ANEG_DONE       1
2458 #define ANEG_TIMER_ENAB 2
2459 #define ANEG_FAILED     -1
2460
2461 #define ANEG_STATE_SETTLE_TIME  10000
2462
2463 static int tg3_fiber_aneg_smachine(struct tg3 *tp,
2464                                    struct tg3_fiber_aneginfo *ap)
2465 {
2466         u16 flowctrl;
2467         unsigned long delta;
2468         u32 rx_cfg_reg;
2469         int ret;
2470
2471         if (ap->state == ANEG_STATE_UNKNOWN) {
2472                 ap->rxconfig = 0;
2473                 ap->link_time = 0;
2474                 ap->cur_time = 0;
2475                 ap->ability_match_cfg = 0;
2476                 ap->ability_match_count = 0;
2477                 ap->ability_match = 0;
2478                 ap->idle_match = 0;
2479                 ap->ack_match = 0;
2480         }
2481         ap->cur_time++;
2482
2483         if (tr32(MAC_STATUS) & MAC_STATUS_RCVD_CFG) {
2484                 rx_cfg_reg = tr32(MAC_RX_AUTO_NEG);
2485
2486                 if (rx_cfg_reg != ap->ability_match_cfg) {
2487                         ap->ability_match_cfg = rx_cfg_reg;
2488                         ap->ability_match = 0;
2489                         ap->ability_match_count = 0;
2490                 } else {
2491                         if (++ap->ability_match_count > 1) {
2492                                 ap->ability_match = 1;
2493                                 ap->ability_match_cfg = rx_cfg_reg;
2494                         }
2495                 }
2496                 if (rx_cfg_reg & ANEG_CFG_ACK)
2497                         ap->ack_match = 1;
2498                 else
2499                         ap->ack_match = 0;
2500
2501                 ap->idle_match = 0;
2502         } else {
2503                 ap->idle_match = 1;
2504                 ap->ability_match_cfg = 0;
2505                 ap->ability_match_count = 0;
2506                 ap->ability_match = 0;
2507                 ap->ack_match = 0;
2508
2509                 rx_cfg_reg = 0;
2510         }
2511
2512         ap->rxconfig = rx_cfg_reg;
2513         ret = ANEG_OK;
2514
2515         switch(ap->state) {
2516         case ANEG_STATE_UNKNOWN:
2517                 if (ap->flags & (MR_AN_ENABLE | MR_RESTART_AN))
2518                         ap->state = ANEG_STATE_AN_ENABLE;
2519
2520                 /* fallthru */
2521         case ANEG_STATE_AN_ENABLE:
2522                 ap->flags &= ~(MR_AN_COMPLETE | MR_PAGE_RX);
2523                 if (ap->flags & MR_AN_ENABLE) {
2524                         ap->link_time = 0;
2525                         ap->cur_time = 0;
2526                         ap->ability_match_cfg = 0;
2527                         ap->ability_match_count = 0;
2528                         ap->ability_match = 0;
2529                         ap->idle_match = 0;
2530                         ap->ack_match = 0;
2531
2532                         ap->state = ANEG_STATE_RESTART_INIT;
2533                 } else {
2534                         ap->state = ANEG_STATE_DISABLE_LINK_OK;
2535                 }
2536                 break;
2537
2538         case ANEG_STATE_RESTART_INIT:
2539                 ap->link_time = ap->cur_time;
2540                 ap->flags &= ~(MR_NP_LOADED);
2541                 ap->txconfig = 0;
2542                 tw32(MAC_TX_AUTO_NEG, 0);
2543                 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
2544                 tw32_f(MAC_MODE, tp->mac_mode);
2545                 udelay(40);
2546
2547                 ret = ANEG_TIMER_ENAB;
2548                 ap->state = ANEG_STATE_RESTART;
2549
2550                 /* fallthru */
2551         case ANEG_STATE_RESTART:
2552                 delta = ap->cur_time - ap->link_time;
2553                 if (delta > ANEG_STATE_SETTLE_TIME) {
2554                         ap->state = ANEG_STATE_ABILITY_DETECT_INIT;
2555                 } else {
2556                         ret = ANEG_TIMER_ENAB;
2557                 }
2558                 break;
2559
2560         case ANEG_STATE_DISABLE_LINK_OK:
2561                 ret = ANEG_DONE;
2562                 break;
2563
2564         case ANEG_STATE_ABILITY_DETECT_INIT:
2565                 ap->flags &= ~(MR_TOGGLE_TX);
2566                 ap->txconfig = ANEG_CFG_FD;
2567                 flowctrl = tg3_advert_flowctrl_1000X(tp->link_config.flowctrl);
2568                 if (flowctrl & ADVERTISE_1000XPAUSE)
2569                         ap->txconfig |= ANEG_CFG_PS1;
2570                 if (flowctrl & ADVERTISE_1000XPSE_ASYM)
2571                         ap->txconfig |= ANEG_CFG_PS2;
2572                 tw32(MAC_TX_AUTO_NEG, ap->txconfig);
2573                 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
2574                 tw32_f(MAC_MODE, tp->mac_mode);
2575                 udelay(40);
2576
2577                 ap->state = ANEG_STATE_ABILITY_DETECT;
2578                 break;
2579
2580         case ANEG_STATE_ABILITY_DETECT:
2581                 if (ap->ability_match != 0 && ap->rxconfig != 0) {
2582                         ap->state = ANEG_STATE_ACK_DETECT_INIT;
2583                 }
2584                 break;
2585
2586         case ANEG_STATE_ACK_DETECT_INIT:
2587                 ap->txconfig |= ANEG_CFG_ACK;
2588                 tw32(MAC_TX_AUTO_NEG, ap->txconfig);
2589                 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
2590                 tw32_f(MAC_MODE, tp->mac_mode);
2591                 udelay(40);
2592
2593                 ap->state = ANEG_STATE_ACK_DETECT;
2594
2595                 /* fallthru */
2596         case ANEG_STATE_ACK_DETECT:
2597                 if (ap->ack_match != 0) {
2598                         if ((ap->rxconfig & ~ANEG_CFG_ACK) ==
2599                             (ap->ability_match_cfg & ~ANEG_CFG_ACK)) {
2600                                 ap->state = ANEG_STATE_COMPLETE_ACK_INIT;
2601                         } else {
2602                                 ap->state = ANEG_STATE_AN_ENABLE;
2603                         }
2604                 } else if (ap->ability_match != 0 &&
2605                            ap->rxconfig == 0) {
2606                         ap->state = ANEG_STATE_AN_ENABLE;
2607                 }
2608                 break;
2609
2610         case ANEG_STATE_COMPLETE_ACK_INIT:
2611                 if (ap->rxconfig & ANEG_CFG_INVAL) {
2612                         ret = ANEG_FAILED;
2613                         break;
2614                 }
2615                 ap->flags &= ~(MR_LP_ADV_FULL_DUPLEX |
2616                                MR_LP_ADV_HALF_DUPLEX |
2617                                MR_LP_ADV_SYM_PAUSE |
2618                                MR_LP_ADV_ASYM_PAUSE |
2619                                MR_LP_ADV_REMOTE_FAULT1 |
2620                                MR_LP_ADV_REMOTE_FAULT2 |
2621                                MR_LP_ADV_NEXT_PAGE |
2622                                MR_TOGGLE_RX |
2623                                MR_NP_RX);
2624                 if (ap->rxconfig & ANEG_CFG_FD)
2625                         ap->flags |= MR_LP_ADV_FULL_DUPLEX;
2626                 if (ap->rxconfig & ANEG_CFG_HD)
2627                         ap->flags |= MR_LP_ADV_HALF_DUPLEX;
2628                 if (ap->rxconfig & ANEG_CFG_PS1)
2629                         ap->flags |= MR_LP_ADV_SYM_PAUSE;
2630                 if (ap->rxconfig & ANEG_CFG_PS2)
2631                         ap->flags |= MR_LP_ADV_ASYM_PAUSE;
2632                 if (ap->rxconfig & ANEG_CFG_RF1)
2633                         ap->flags |= MR_LP_ADV_REMOTE_FAULT1;
2634                 if (ap->rxconfig & ANEG_CFG_RF2)
2635                         ap->flags |= MR_LP_ADV_REMOTE_FAULT2;
2636                 if (ap->rxconfig & ANEG_CFG_NP)
2637                         ap->flags |= MR_LP_ADV_NEXT_PAGE;
2638
2639                 ap->link_time = ap->cur_time;
2640
2641                 ap->flags ^= (MR_TOGGLE_TX);
2642                 if (ap->rxconfig & 0x0008)
2643                         ap->flags |= MR_TOGGLE_RX;
2644                 if (ap->rxconfig & ANEG_CFG_NP)
2645                         ap->flags |= MR_NP_RX;
2646                 ap->flags |= MR_PAGE_RX;
2647
2648                 ap->state = ANEG_STATE_COMPLETE_ACK;
2649                 ret = ANEG_TIMER_ENAB;
2650                 break;
2651
2652         case ANEG_STATE_COMPLETE_ACK:
2653                 if (ap->ability_match != 0 &&
2654                     ap->rxconfig == 0) {
2655                         ap->state = ANEG_STATE_AN_ENABLE;
2656                         break;
2657                 }
2658                 delta = ap->cur_time - ap->link_time;
2659                 if (delta > ANEG_STATE_SETTLE_TIME) {
2660                         if (!(ap->flags & (MR_LP_ADV_NEXT_PAGE))) {
2661                                 ap->state = ANEG_STATE_IDLE_DETECT_INIT;
2662                         } else {
2663                                 if ((ap->txconfig & ANEG_CFG_NP) == 0 &&
2664                                     !(ap->flags & MR_NP_RX)) {
2665                                         ap->state = ANEG_STATE_IDLE_DETECT_INIT;
2666                                 } else {
2667                                         ret = ANEG_FAILED;
2668                                 }
2669                         }
2670                 }
2671                 break;
2672
2673         case ANEG_STATE_IDLE_DETECT_INIT:
2674                 ap->link_time = ap->cur_time;
2675                 tp->mac_mode &= ~MAC_MODE_SEND_CONFIGS;
2676                 tw32_f(MAC_MODE, tp->mac_mode);
2677                 udelay(40);
2678
2679                 ap->state = ANEG_STATE_IDLE_DETECT;
2680                 ret = ANEG_TIMER_ENAB;
2681                 break;
2682
2683         case ANEG_STATE_IDLE_DETECT:
2684                 if (ap->ability_match != 0 &&
2685                     ap->rxconfig == 0) {
2686                         ap->state = ANEG_STATE_AN_ENABLE;
2687                         break;
2688                 }
2689                 delta = ap->cur_time - ap->link_time;
2690                 if (delta > ANEG_STATE_SETTLE_TIME) {
2691                         /* XXX another gem from the Broadcom driver :( */
2692                         ap->state = ANEG_STATE_LINK_OK;
2693                 }
2694                 break;
2695
2696         case ANEG_STATE_LINK_OK:
2697                 ap->flags |= (MR_AN_COMPLETE | MR_LINK_OK);
2698                 ret = ANEG_DONE;
2699                 break;
2700
2701         case ANEG_STATE_NEXT_PAGE_WAIT_INIT:
2702                 /* ??? unimplemented */
2703                 break;
2704
2705         case ANEG_STATE_NEXT_PAGE_WAIT:
2706                 /* ??? unimplemented */
2707                 break;
2708
2709         default:
2710                 ret = ANEG_FAILED;
2711                 break;
2712         };
2713
2714         return ret;
2715 }
2716
2717 static int fiber_autoneg(struct tg3 *tp, u32 *txflags, u32 *rxflags)
2718 {
2719         int res = 0;
2720         struct tg3_fiber_aneginfo aninfo;
2721         int status = ANEG_FAILED;
2722         unsigned int tick;
2723         u32 tmp;
2724
2725         tw32_f(MAC_TX_AUTO_NEG, 0);
2726
2727         tmp = tp->mac_mode & ~MAC_MODE_PORT_MODE_MASK;
2728         tw32_f(MAC_MODE, tmp | MAC_MODE_PORT_MODE_GMII);
2729         udelay(40);
2730
2731         tw32_f(MAC_MODE, tp->mac_mode | MAC_MODE_SEND_CONFIGS);
2732         udelay(40);
2733
2734         memset(&aninfo, 0, sizeof(aninfo));
2735         aninfo.flags |= MR_AN_ENABLE;
2736         aninfo.state = ANEG_STATE_UNKNOWN;
2737         aninfo.cur_time = 0;
2738         tick = 0;
2739         while (++tick < 195000) {
2740                 status = tg3_fiber_aneg_smachine(tp, &aninfo);
2741                 if (status == ANEG_DONE || status == ANEG_FAILED)
2742                         break;
2743
2744                 udelay(1);
2745         }
2746
2747         tp->mac_mode &= ~MAC_MODE_SEND_CONFIGS;
2748         tw32_f(MAC_MODE, tp->mac_mode);
2749         udelay(40);
2750
2751         *txflags = aninfo.txconfig;
2752         *rxflags = aninfo.flags;
2753
2754         if (status == ANEG_DONE &&
2755             (aninfo.flags & (MR_AN_COMPLETE | MR_LINK_OK |
2756                              MR_LP_ADV_FULL_DUPLEX)))
2757                 res = 1;
2758
2759         return res;
2760 }
2761
2762 static void tg3_init_bcm8002(struct tg3 *tp)
2763 {
2764         u32 mac_status = tr32(MAC_STATUS);
2765         int i;
2766
2767         /* Reset when initting first time or we have a link. */
2768         if ((tp->tg3_flags & TG3_FLAG_INIT_COMPLETE) &&
2769             !(mac_status & MAC_STATUS_PCS_SYNCED))
2770                 return;
2771
2772         /* Set PLL lock range. */
2773         tg3_writephy(tp, 0x16, 0x8007);
2774
2775         /* SW reset */
2776         tg3_writephy(tp, MII_BMCR, BMCR_RESET);
2777
2778         /* Wait for reset to complete. */
2779         /* XXX schedule_timeout() ... */
2780         for (i = 0; i < 500; i++)
2781                 udelay(10);
2782
2783         /* Config mode; select PMA/Ch 1 regs. */
2784         tg3_writephy(tp, 0x10, 0x8411);
2785
2786         /* Enable auto-lock and comdet, select txclk for tx. */
2787         tg3_writephy(tp, 0x11, 0x0a10);
2788
2789         tg3_writephy(tp, 0x18, 0x00a0);
2790         tg3_writephy(tp, 0x16, 0x41ff);
2791
2792         /* Assert and deassert POR. */
2793         tg3_writephy(tp, 0x13, 0x0400);
2794         udelay(40);
2795         tg3_writephy(tp, 0x13, 0x0000);
2796
2797         tg3_writephy(tp, 0x11, 0x0a50);
2798         udelay(40);
2799         tg3_writephy(tp, 0x11, 0x0a10);
2800
2801         /* Wait for signal to stabilize */
2802         /* XXX schedule_timeout() ... */
2803         for (i = 0; i < 15000; i++)
2804                 udelay(10);
2805
2806         /* Deselect the channel register so we can read the PHYID
2807          * later.
2808          */
2809         tg3_writephy(tp, 0x10, 0x8011);
2810 }
2811
2812 static int tg3_setup_fiber_hw_autoneg(struct tg3 *tp, u32 mac_status)
2813 {
2814         u16 flowctrl;
2815         u32 sg_dig_ctrl, sg_dig_status;
2816         u32 serdes_cfg, expected_sg_dig_ctrl;
2817         int workaround, port_a;
2818         int current_link_up;
2819
2820         serdes_cfg = 0;
2821         expected_sg_dig_ctrl = 0;
2822         workaround = 0;
2823         port_a = 1;
2824         current_link_up = 0;
2825
2826         if (tp->pci_chip_rev_id != CHIPREV_ID_5704_A0 &&
2827             tp->pci_chip_rev_id != CHIPREV_ID_5704_A1) {
2828                 workaround = 1;
2829                 if (tr32(TG3PCI_DUAL_MAC_CTRL) & DUAL_MAC_CTRL_ID)
2830                         port_a = 0;
2831
2832                 /* preserve bits 0-11,13,14 for signal pre-emphasis */
2833                 /* preserve bits 20-23 for voltage regulator */
2834                 serdes_cfg = tr32(MAC_SERDES_CFG) & 0x00f06fff;
2835         }
2836
2837         sg_dig_ctrl = tr32(SG_DIG_CTRL);
2838
2839         if (tp->link_config.autoneg != AUTONEG_ENABLE) {
2840                 if (sg_dig_ctrl & SG_DIG_USING_HW_AUTONEG) {
2841                         if (workaround) {
2842                                 u32 val = serdes_cfg;
2843
2844                                 if (port_a)
2845                                         val |= 0xc010000;
2846                                 else
2847                                         val |= 0x4010000;
2848                                 tw32_f(MAC_SERDES_CFG, val);
2849                         }
2850
2851                         tw32_f(SG_DIG_CTRL, SG_DIG_COMMON_SETUP);
2852                 }
2853                 if (mac_status & MAC_STATUS_PCS_SYNCED) {
2854                         tg3_setup_flow_control(tp, 0, 0);
2855                         current_link_up = 1;
2856                 }
2857                 goto out;
2858         }
2859
2860         /* Want auto-negotiation.  */
2861         expected_sg_dig_ctrl = SG_DIG_USING_HW_AUTONEG | SG_DIG_COMMON_SETUP;
2862
2863         flowctrl = tg3_advert_flowctrl_1000X(tp->link_config.flowctrl);
2864         if (flowctrl & ADVERTISE_1000XPAUSE)
2865                 expected_sg_dig_ctrl |= SG_DIG_PAUSE_CAP;
2866         if (flowctrl & ADVERTISE_1000XPSE_ASYM)
2867                 expected_sg_dig_ctrl |= SG_DIG_ASYM_PAUSE;
2868
2869         if (sg_dig_ctrl != expected_sg_dig_ctrl) {
2870                 if ((tp->tg3_flags2 & TG3_FLG2_PARALLEL_DETECT) &&
2871                     tp->serdes_counter &&
2872                     ((mac_status & (MAC_STATUS_PCS_SYNCED |
2873                                     MAC_STATUS_RCVD_CFG)) ==
2874                      MAC_STATUS_PCS_SYNCED)) {
2875                         tp->serdes_counter--;
2876                         current_link_up = 1;
2877                         goto out;
2878                 }
2879 restart_autoneg:
2880                 if (workaround)
2881                         tw32_f(MAC_SERDES_CFG, serdes_cfg | 0xc011000);
2882                 tw32_f(SG_DIG_CTRL, expected_sg_dig_ctrl | SG_DIG_SOFT_RESET);
2883                 udelay(5);
2884                 tw32_f(SG_DIG_CTRL, expected_sg_dig_ctrl);
2885
2886                 tp->serdes_counter = SERDES_AN_TIMEOUT_5704S;
2887                 tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
2888         } else if (mac_status & (MAC_STATUS_PCS_SYNCED |
2889                                  MAC_STATUS_SIGNAL_DET)) {
2890                 sg_dig_status = tr32(SG_DIG_STATUS);
2891                 mac_status = tr32(MAC_STATUS);
2892
2893                 if ((sg_dig_status & SG_DIG_AUTONEG_COMPLETE) &&
2894                     (mac_status & MAC_STATUS_PCS_SYNCED)) {
2895                         u32 local_adv = 0, remote_adv = 0;
2896
2897                         if (sg_dig_ctrl & SG_DIG_PAUSE_CAP)
2898                                 local_adv |= ADVERTISE_1000XPAUSE;
2899                         if (sg_dig_ctrl & SG_DIG_ASYM_PAUSE)
2900                                 local_adv |= ADVERTISE_1000XPSE_ASYM;
2901
2902                         if (sg_dig_status & SG_DIG_PARTNER_PAUSE_CAPABLE)
2903                                 remote_adv |= LPA_1000XPAUSE;
2904                         if (sg_dig_status & SG_DIG_PARTNER_ASYM_PAUSE)
2905                                 remote_adv |= LPA_1000XPAUSE_ASYM;
2906
2907                         tg3_setup_flow_control(tp, local_adv, remote_adv);
2908                         current_link_up = 1;
2909                         tp->serdes_counter = 0;
2910                         tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
2911                 } else if (!(sg_dig_status & SG_DIG_AUTONEG_COMPLETE)) {
2912                         if (tp->serdes_counter)
2913                                 tp->serdes_counter--;
2914                         else {
2915                                 if (workaround) {
2916                                         u32 val = serdes_cfg;
2917
2918                                         if (port_a)
2919                                                 val |= 0xc010000;
2920                                         else
2921                                                 val |= 0x4010000;
2922
2923                                         tw32_f(MAC_SERDES_CFG, val);
2924                                 }
2925
2926                                 tw32_f(SG_DIG_CTRL, SG_DIG_COMMON_SETUP);
2927                                 udelay(40);
2928
2929                                 /* Link parallel detection - link is up */
2930                                 /* only if we have PCS_SYNC and not */
2931                                 /* receiving config code words */
2932                                 mac_status = tr32(MAC_STATUS);
2933                                 if ((mac_status & MAC_STATUS_PCS_SYNCED) &&
2934                                     !(mac_status & MAC_STATUS_RCVD_CFG)) {
2935                                         tg3_setup_flow_control(tp, 0, 0);
2936                                         current_link_up = 1;
2937                                         tp->tg3_flags2 |=
2938                                                 TG3_FLG2_PARALLEL_DETECT;
2939                                         tp->serdes_counter =
2940                                                 SERDES_PARALLEL_DET_TIMEOUT;
2941                                 } else
2942                                         goto restart_autoneg;
2943                         }
2944                 }
2945         } else {
2946                 tp->serdes_counter = SERDES_AN_TIMEOUT_5704S;
2947                 tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
2948         }
2949
2950 out:
2951         return current_link_up;
2952 }
2953
2954 static int tg3_setup_fiber_by_hand(struct tg3 *tp, u32 mac_status)
2955 {
2956         int current_link_up = 0;
2957
2958         if (!(mac_status & MAC_STATUS_PCS_SYNCED))
2959                 goto out;
2960
2961         if (tp->link_config.autoneg == AUTONEG_ENABLE) {
2962                 u32 txflags, rxflags;
2963                 int i;
2964
2965                 if (fiber_autoneg(tp, &txflags, &rxflags)) {
2966                         u32 local_adv = 0, remote_adv = 0;
2967
2968                         if (txflags & ANEG_CFG_PS1)
2969                                 local_adv |= ADVERTISE_1000XPAUSE;
2970                         if (txflags & ANEG_CFG_PS2)
2971                                 local_adv |= ADVERTISE_1000XPSE_ASYM;
2972
2973                         if (rxflags & MR_LP_ADV_SYM_PAUSE)
2974                                 remote_adv |= LPA_1000XPAUSE;
2975                         if (rxflags & MR_LP_ADV_ASYM_PAUSE)
2976                                 remote_adv |= LPA_1000XPAUSE_ASYM;
2977
2978                         tg3_setup_flow_control(tp, local_adv, remote_adv);
2979
2980                         current_link_up = 1;
2981                 }
2982                 for (i = 0; i < 30; i++) {
2983                         udelay(20);
2984                         tw32_f(MAC_STATUS,
2985                                (MAC_STATUS_SYNC_CHANGED |
2986                                 MAC_STATUS_CFG_CHANGED));
2987                         udelay(40);
2988                         if ((tr32(MAC_STATUS) &
2989                              (MAC_STATUS_SYNC_CHANGED |
2990                               MAC_STATUS_CFG_CHANGED)) == 0)
2991                                 break;
2992                 }
2993
2994                 mac_status = tr32(MAC_STATUS);
2995                 if (current_link_up == 0 &&
2996                     (mac_status & MAC_STATUS_PCS_SYNCED) &&
2997                     !(mac_status & MAC_STATUS_RCVD_CFG))
2998                         current_link_up = 1;
2999         } else {
3000                 tg3_setup_flow_control(tp, 0, 0);
3001
3002                 /* Forcing 1000FD link up. */
3003                 current_link_up = 1;
3004
3005                 tw32_f(MAC_MODE, (tp->mac_mode | MAC_MODE_SEND_CONFIGS));
3006                 udelay(40);
3007
3008                 tw32_f(MAC_MODE, tp->mac_mode);
3009                 udelay(40);
3010         }
3011
3012 out:
3013         return current_link_up;
3014 }
3015
3016 static int tg3_setup_fiber_phy(struct tg3 *tp, int force_reset)
3017 {
3018         u32 orig_pause_cfg;
3019         u16 orig_active_speed;
3020         u8 orig_active_duplex;
3021         u32 mac_status;
3022         int current_link_up;
3023         int i;
3024
3025         orig_pause_cfg = tp->link_config.active_flowctrl;
3026         orig_active_speed = tp->link_config.active_speed;
3027         orig_active_duplex = tp->link_config.active_duplex;
3028
3029         if (!(tp->tg3_flags2 & TG3_FLG2_HW_AUTONEG) &&
3030             netif_carrier_ok(tp->dev) &&
3031             (tp->tg3_flags & TG3_FLAG_INIT_COMPLETE)) {
3032                 mac_status = tr32(MAC_STATUS);
3033                 mac_status &= (MAC_STATUS_PCS_SYNCED |
3034                                MAC_STATUS_SIGNAL_DET |
3035                                MAC_STATUS_CFG_CHANGED |
3036                                MAC_STATUS_RCVD_CFG);
3037                 if (mac_status == (MAC_STATUS_PCS_SYNCED |
3038                                    MAC_STATUS_SIGNAL_DET)) {
3039                         tw32_f(MAC_STATUS, (MAC_STATUS_SYNC_CHANGED |
3040                                             MAC_STATUS_CFG_CHANGED));
3041                         return 0;
3042                 }
3043         }
3044
3045         tw32_f(MAC_TX_AUTO_NEG, 0);
3046
3047         tp->mac_mode &= ~(MAC_MODE_PORT_MODE_MASK | MAC_MODE_HALF_DUPLEX);
3048         tp->mac_mode |= MAC_MODE_PORT_MODE_TBI;
3049         tw32_f(MAC_MODE, tp->mac_mode);
3050         udelay(40);
3051
3052         if (tp->phy_id == PHY_ID_BCM8002)
3053                 tg3_init_bcm8002(tp);
3054
3055         /* Enable link change event even when serdes polling.  */
3056         tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
3057         udelay(40);
3058
3059         current_link_up = 0;
3060         mac_status = tr32(MAC_STATUS);
3061
3062         if (tp->tg3_flags2 & TG3_FLG2_HW_AUTONEG)
3063                 current_link_up = tg3_setup_fiber_hw_autoneg(tp, mac_status);
3064         else
3065                 current_link_up = tg3_setup_fiber_by_hand(tp, mac_status);
3066
3067         tp->hw_status->status =
3068                 (SD_STATUS_UPDATED |
3069                  (tp->hw_status->status & ~SD_STATUS_LINK_CHG));
3070
3071         for (i = 0; i < 100; i++) {
3072                 tw32_f(MAC_STATUS, (MAC_STATUS_SYNC_CHANGED |
3073                                     MAC_STATUS_CFG_CHANGED));
3074                 udelay(5);
3075                 if ((tr32(MAC_STATUS) & (MAC_STATUS_SYNC_CHANGED |
3076                                          MAC_STATUS_CFG_CHANGED |
3077                                          MAC_STATUS_LNKSTATE_CHANGED)) == 0)
3078                         break;
3079         }
3080
3081         mac_status = tr32(MAC_STATUS);
3082         if ((mac_status & MAC_STATUS_PCS_SYNCED) == 0) {
3083                 current_link_up = 0;
3084                 if (tp->link_config.autoneg == AUTONEG_ENABLE &&
3085                     tp->serdes_counter == 0) {
3086                         tw32_f(MAC_MODE, (tp->mac_mode |
3087                                           MAC_MODE_SEND_CONFIGS));
3088                         udelay(1);
3089                         tw32_f(MAC_MODE, tp->mac_mode);
3090                 }
3091         }
3092
3093         if (current_link_up == 1) {
3094                 tp->link_config.active_speed = SPEED_1000;
3095                 tp->link_config.active_duplex = DUPLEX_FULL;
3096                 tw32(MAC_LED_CTRL, (tp->led_ctrl |
3097                                     LED_CTRL_LNKLED_OVERRIDE |
3098                                     LED_CTRL_1000MBPS_ON));
3099         } else {
3100                 tp->link_config.active_speed = SPEED_INVALID;
3101                 tp->link_config.active_duplex = DUPLEX_INVALID;
3102                 tw32(MAC_LED_CTRL, (tp->led_ctrl |
3103                                     LED_CTRL_LNKLED_OVERRIDE |
3104                                     LED_CTRL_TRAFFIC_OVERRIDE));
3105         }
3106
3107         if (current_link_up != netif_carrier_ok(tp->dev)) {
3108                 if (current_link_up)
3109                         netif_carrier_on(tp->dev);
3110                 else
3111                         netif_carrier_off(tp->dev);
3112                 tg3_link_report(tp);
3113         } else {
3114                 u32 now_pause_cfg = tp->link_config.active_flowctrl;
3115                 if (orig_pause_cfg != now_pause_cfg ||
3116                     orig_active_speed != tp->link_config.active_speed ||
3117                     orig_active_duplex != tp->link_config.active_duplex)
3118                         tg3_link_report(tp);
3119         }
3120
3121         return 0;
3122 }
3123
3124 static int tg3_setup_fiber_mii_phy(struct tg3 *tp, int force_reset)
3125 {
3126         int current_link_up, err = 0;
3127         u32 bmsr, bmcr;
3128         u16 current_speed;
3129         u8 current_duplex;
3130         u32 local_adv, remote_adv;
3131
3132         tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
3133         tw32_f(MAC_MODE, tp->mac_mode);
3134         udelay(40);
3135
3136         tw32(MAC_EVENT, 0);
3137
3138         tw32_f(MAC_STATUS,
3139              (MAC_STATUS_SYNC_CHANGED |
3140               MAC_STATUS_CFG_CHANGED |
3141               MAC_STATUS_MI_COMPLETION |
3142               MAC_STATUS_LNKSTATE_CHANGED));
3143         udelay(40);
3144
3145         if (force_reset)
3146                 tg3_phy_reset(tp);
3147
3148         current_link_up = 0;
3149         current_speed = SPEED_INVALID;
3150         current_duplex = DUPLEX_INVALID;
3151
3152         err |= tg3_readphy(tp, MII_BMSR, &bmsr);
3153         err |= tg3_readphy(tp, MII_BMSR, &bmsr);
3154         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) {
3155                 if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
3156                         bmsr |= BMSR_LSTATUS;
3157                 else
3158                         bmsr &= ~BMSR_LSTATUS;
3159         }
3160
3161         err |= tg3_readphy(tp, MII_BMCR, &bmcr);
3162
3163         if ((tp->link_config.autoneg == AUTONEG_ENABLE) && !force_reset &&
3164             (tp->tg3_flags2 & TG3_FLG2_PARALLEL_DETECT) &&
3165              tp->link_config.flowctrl == tp->link_config.active_flowctrl) {
3166                 /* do nothing, just check for link up at the end */
3167         } else if (tp->link_config.autoneg == AUTONEG_ENABLE) {
3168                 u32 adv, new_adv;
3169
3170                 err |= tg3_readphy(tp, MII_ADVERTISE, &adv);
3171                 new_adv = adv & ~(ADVERTISE_1000XFULL | ADVERTISE_1000XHALF |
3172                                   ADVERTISE_1000XPAUSE |
3173                                   ADVERTISE_1000XPSE_ASYM |
3174                                   ADVERTISE_SLCT);
3175
3176                 new_adv |= tg3_advert_flowctrl_1000X(tp->link_config.flowctrl);
3177
3178                 if (tp->link_config.advertising & ADVERTISED_1000baseT_Half)
3179                         new_adv |= ADVERTISE_1000XHALF;
3180                 if (tp->link_config.advertising & ADVERTISED_1000baseT_Full)
3181                         new_adv |= ADVERTISE_1000XFULL;
3182
3183                 if ((new_adv != adv) || !(bmcr & BMCR_ANENABLE)) {
3184                         tg3_writephy(tp, MII_ADVERTISE, new_adv);
3185                         bmcr |= BMCR_ANENABLE | BMCR_ANRESTART;
3186                         tg3_writephy(tp, MII_BMCR, bmcr);
3187
3188                         tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
3189                         tp->serdes_counter = SERDES_AN_TIMEOUT_5714S;
3190                         tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
3191
3192                         return err;
3193                 }
3194         } else {
3195                 u32 new_bmcr;
3196
3197                 bmcr &= ~BMCR_SPEED1000;
3198                 new_bmcr = bmcr & ~(BMCR_ANENABLE | BMCR_FULLDPLX);
3199
3200                 if (tp->link_config.duplex == DUPLEX_FULL)
3201                         new_bmcr |= BMCR_FULLDPLX;
3202
3203                 if (new_bmcr != bmcr) {
3204                         /* BMCR_SPEED1000 is a reserved bit that needs
3205                          * to be set on write.
3206                          */
3207                         new_bmcr |= BMCR_SPEED1000;
3208
3209                         /* Force a linkdown */
3210                         if (netif_carrier_ok(tp->dev)) {
3211                                 u32 adv;
3212
3213                                 err |= tg3_readphy(tp, MII_ADVERTISE, &adv);
3214                                 adv &= ~(ADVERTISE_1000XFULL |
3215                                          ADVERTISE_1000XHALF |
3216                                          ADVERTISE_SLCT);
3217                                 tg3_writephy(tp, MII_ADVERTISE, adv);
3218                                 tg3_writephy(tp, MII_BMCR, bmcr |
3219                                                            BMCR_ANRESTART |
3220                                                            BMCR_ANENABLE);
3221                                 udelay(10);
3222                                 netif_carrier_off(tp->dev);
3223                         }
3224                         tg3_writephy(tp, MII_BMCR, new_bmcr);
3225                         bmcr = new_bmcr;
3226                         err |= tg3_readphy(tp, MII_BMSR, &bmsr);
3227                         err |= tg3_readphy(tp, MII_BMSR, &bmsr);
3228                         if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
3229                             ASIC_REV_5714) {
3230                                 if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
3231                                         bmsr |= BMSR_LSTATUS;
3232                                 else
3233                                         bmsr &= ~BMSR_LSTATUS;
3234                         }
3235                         tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
3236                 }
3237         }
3238
3239         if (bmsr & BMSR_LSTATUS) {
3240                 current_speed = SPEED_1000;
3241                 current_link_up = 1;
3242                 if (bmcr & BMCR_FULLDPLX)
3243                         current_duplex = DUPLEX_FULL;
3244                 else
3245                         current_duplex = DUPLEX_HALF;
3246
3247                 local_adv = 0;
3248                 remote_adv = 0;
3249
3250                 if (bmcr & BMCR_ANENABLE) {
3251                         u32 common;
3252
3253                         err |= tg3_readphy(tp, MII_ADVERTISE, &local_adv);
3254                         err |= tg3_readphy(tp, MII_LPA, &remote_adv);
3255                         common = local_adv & remote_adv;
3256                         if (common & (ADVERTISE_1000XHALF |
3257                                       ADVERTISE_1000XFULL)) {
3258                                 if (common & ADVERTISE_1000XFULL)
3259                                         current_duplex = DUPLEX_FULL;
3260                                 else
3261                                         current_duplex = DUPLEX_HALF;
3262                         }
3263                         else
3264                                 current_link_up = 0;
3265                 }
3266         }
3267
3268         if (current_link_up == 1 && current_duplex == DUPLEX_FULL)
3269                 tg3_setup_flow_control(tp, local_adv, remote_adv);
3270
3271         tp->mac_mode &= ~MAC_MODE_HALF_DUPLEX;
3272         if (tp->link_config.active_duplex == DUPLEX_HALF)
3273                 tp->mac_mode |= MAC_MODE_HALF_DUPLEX;
3274
3275         tw32_f(MAC_MODE, tp->mac_mode);
3276         udelay(40);
3277
3278         tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
3279
3280         tp->link_config.active_speed = current_speed;
3281         tp->link_config.active_duplex = current_duplex;
3282
3283         if (current_link_up != netif_carrier_ok(tp->dev)) {
3284                 if (current_link_up)
3285                         netif_carrier_on(tp->dev);
3286                 else {
3287                         netif_carrier_off(tp->dev);
3288                         tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
3289                 }
3290                 tg3_link_report(tp);
3291         }
3292         return err;
3293 }
3294
3295 static void tg3_serdes_parallel_detect(struct tg3 *tp)
3296 {
3297         if (tp->serdes_counter) {
3298                 /* Give autoneg time to complete. */
3299                 tp->serdes_counter--;
3300                 return;
3301         }
3302         if (!netif_carrier_ok(tp->dev) &&
3303             (tp->link_config.autoneg == AUTONEG_ENABLE)) {
3304                 u32 bmcr;
3305
3306                 tg3_readphy(tp, MII_BMCR, &bmcr);
3307                 if (bmcr & BMCR_ANENABLE) {
3308                         u32 phy1, phy2;
3309
3310                         /* Select shadow register 0x1f */
3311                         tg3_writephy(tp, 0x1c, 0x7c00);
3312                         tg3_readphy(tp, 0x1c, &phy1);
3313
3314                         /* Select expansion interrupt status register */
3315                         tg3_writephy(tp, 0x17, 0x0f01);
3316                         tg3_readphy(tp, 0x15, &phy2);
3317                         tg3_readphy(tp, 0x15, &phy2);
3318
3319                         if ((phy1 & 0x10) && !(phy2 & 0x20)) {
3320                                 /* We have signal detect and not receiving
3321                                  * config code words, link is up by parallel
3322                                  * detection.
3323                                  */
3324
3325                                 bmcr &= ~BMCR_ANENABLE;
3326                                 bmcr |= BMCR_SPEED1000 | BMCR_FULLDPLX;
3327                                 tg3_writephy(tp, MII_BMCR, bmcr);
3328                                 tp->tg3_flags2 |= TG3_FLG2_PARALLEL_DETECT;
3329                         }
3330                 }
3331         }
3332         else if (netif_carrier_ok(tp->dev) &&
3333                  (tp->link_config.autoneg == AUTONEG_ENABLE) &&
3334                  (tp->tg3_flags2 & TG3_FLG2_PARALLEL_DETECT)) {
3335                 u32 phy2;
3336
3337                 /* Select expansion interrupt status register */
3338                 tg3_writephy(tp, 0x17, 0x0f01);
3339                 tg3_readphy(tp, 0x15, &phy2);
3340                 if (phy2 & 0x20) {
3341                         u32 bmcr;
3342
3343                         /* Config code words received, turn on autoneg. */
3344                         tg3_readphy(tp, MII_BMCR, &bmcr);
3345                         tg3_writephy(tp, MII_BMCR, bmcr | BMCR_ANENABLE);
3346
3347                         tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
3348
3349                 }
3350         }
3351 }
3352
3353 static int tg3_setup_phy(struct tg3 *tp, int force_reset)
3354 {
3355         int err;
3356
3357         if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) {
3358                 err = tg3_setup_fiber_phy(tp, force_reset);
3359         } else if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES) {
3360                 err = tg3_setup_fiber_mii_phy(tp, force_reset);
3361         } else {
3362                 err = tg3_setup_copper_phy(tp, force_reset);
3363         }
3364
3365         if (tp->pci_chip_rev_id == CHIPREV_ID_5784_A0 ||
3366             tp->pci_chip_rev_id == CHIPREV_ID_5784_A1) {
3367                 u32 val, scale;
3368
3369                 val = tr32(TG3_CPMU_CLCK_STAT) & CPMU_CLCK_STAT_MAC_CLCK_MASK;
3370                 if (val == CPMU_CLCK_STAT_MAC_CLCK_62_5)
3371                         scale = 65;
3372                 else if (val == CPMU_CLCK_STAT_MAC_CLCK_6_25)
3373                         scale = 6;
3374                 else
3375                         scale = 12;
3376
3377                 val = tr32(GRC_MISC_CFG) & ~GRC_MISC_CFG_PRESCALAR_MASK;
3378                 val |= (scale << GRC_MISC_CFG_PRESCALAR_SHIFT);
3379                 tw32(GRC_MISC_CFG, val);
3380         }
3381
3382         if (tp->link_config.active_speed == SPEED_1000 &&
3383             tp->link_config.active_duplex == DUPLEX_HALF)
3384                 tw32(MAC_TX_LENGTHS,
3385                      ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
3386                       (6 << TX_LENGTHS_IPG_SHIFT) |
3387                       (0xff << TX_LENGTHS_SLOT_TIME_SHIFT)));
3388         else
3389                 tw32(MAC_TX_LENGTHS,
3390                      ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
3391                       (6 << TX_LENGTHS_IPG_SHIFT) |
3392                       (32 << TX_LENGTHS_SLOT_TIME_SHIFT)));
3393
3394         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
3395                 if (netif_carrier_ok(tp->dev)) {
3396                         tw32(HOSTCC_STAT_COAL_TICKS,
3397                              tp->coal.stats_block_coalesce_usecs);
3398                 } else {
3399                         tw32(HOSTCC_STAT_COAL_TICKS, 0);
3400                 }
3401         }
3402
3403         if (tp->tg3_flags & TG3_FLAG_ASPM_WORKAROUND) {
3404                 u32 val = tr32(PCIE_PWR_MGMT_THRESH);
3405                 if (!netif_carrier_ok(tp->dev))
3406                         val = (val & ~PCIE_PWR_MGMT_L1_THRESH_MSK) |
3407                               tp->pwrmgmt_thresh;
3408                 else
3409                         val |= PCIE_PWR_MGMT_L1_THRESH_MSK;
3410                 tw32(PCIE_PWR_MGMT_THRESH, val);
3411         }
3412
3413         return err;
3414 }
3415
3416 /* This is called whenever we suspect that the system chipset is re-
3417  * ordering the sequence of MMIO to the tx send mailbox. The symptom
3418  * is bogus tx completions. We try to recover by setting the
3419  * TG3_FLAG_MBOX_WRITE_REORDER flag and resetting the chip later
3420  * in the workqueue.
3421  */
3422 static void tg3_tx_recover(struct tg3 *tp)
3423 {
3424         BUG_ON((tp->tg3_flags & TG3_FLAG_MBOX_WRITE_REORDER) ||
3425                tp->write32_tx_mbox == tg3_write_indirect_mbox);
3426
3427         printk(KERN_WARNING PFX "%s: The system may be re-ordering memory-"
3428                "mapped I/O cycles to the network device, attempting to "
3429                "recover. Please report the problem to the driver maintainer "
3430                "and include system chipset information.\n", tp->dev->name);
3431
3432         spin_lock(&tp->lock);
3433         tp->tg3_flags |= TG3_FLAG_TX_RECOVERY_PENDING;
3434         spin_unlock(&tp->lock);
3435 }
3436
3437 static inline u32 tg3_tx_avail(struct tg3 *tp)
3438 {
3439         smp_mb();
3440         return (tp->tx_pending -
3441                 ((tp->tx_prod - tp->tx_cons) & (TG3_TX_RING_SIZE - 1)));
3442 }
3443
3444 /* Tigon3 never reports partial packet sends.  So we do not
3445  * need special logic to handle SKBs that have not had all
3446  * of their frags sent yet, like SunGEM does.
3447  */
3448 static void tg3_tx(struct tg3 *tp)
3449 {
3450         u32 hw_idx = tp->hw_status->idx[0].tx_consumer;
3451         u32 sw_idx = tp->tx_cons;
3452
3453         while (sw_idx != hw_idx) {
3454                 struct tx_ring_info *ri = &tp->tx_buffers[sw_idx];
3455                 struct sk_buff *skb = ri->skb;
3456                 int i, tx_bug = 0;
3457
3458                 if (unlikely(skb == NULL)) {
3459                         tg3_tx_recover(tp);
3460                         return;
3461                 }
3462
3463                 pci_unmap_single(tp->pdev,
3464                                  pci_unmap_addr(ri, mapping),
3465                                  skb_headlen(skb),
3466                                  PCI_DMA_TODEVICE);
3467
3468                 ri->skb = NULL;
3469
3470                 sw_idx = NEXT_TX(sw_idx);
3471
3472                 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
3473                         ri = &tp->tx_buffers[sw_idx];
3474                         if (unlikely(ri->skb != NULL || sw_idx == hw_idx))
3475                                 tx_bug = 1;
3476
3477                         pci_unmap_page(tp->pdev,
3478                                        pci_unmap_addr(ri, mapping),
3479                                        skb_shinfo(skb)->frags[i].size,
3480                                        PCI_DMA_TODEVICE);
3481
3482                         sw_idx = NEXT_TX(sw_idx);
3483                 }
3484
3485                 dev_kfree_skb(skb);
3486
3487                 if (unlikely(tx_bug)) {
3488                         tg3_tx_recover(tp);
3489                         return;
3490                 }
3491         }
3492
3493         tp->tx_cons = sw_idx;
3494
3495         /* Need to make the tx_cons update visible to tg3_start_xmit()
3496          * before checking for netif_queue_stopped().  Without the
3497          * memory barrier, there is a small possibility that tg3_start_xmit()
3498          * will miss it and cause the queue to be stopped forever.
3499          */
3500         smp_mb();
3501
3502         if (unlikely(netif_queue_stopped(tp->dev) &&
3503                      (tg3_tx_avail(tp) > TG3_TX_WAKEUP_THRESH(tp)))) {
3504                 netif_tx_lock(tp->dev);
3505                 if (netif_queue_stopped(tp->dev) &&
3506                     (tg3_tx_avail(tp) > TG3_TX_WAKEUP_THRESH(tp)))
3507                         netif_wake_queue(tp->dev);
3508                 netif_tx_unlock(tp->dev);
3509         }
3510 }
3511
3512 /* Returns size of skb allocated or < 0 on error.
3513  *
3514  * We only need to fill in the address because the other members
3515  * of the RX descriptor are invariant, see tg3_init_rings.
3516  *
3517  * Note the purposeful assymetry of cpu vs. chip accesses.  For
3518  * posting buffers we only dirty the first cache line of the RX
3519  * descriptor (containing the address).  Whereas for the RX status
3520  * buffers the cpu only reads the last cacheline of the RX descriptor
3521  * (to fetch the error flags, vlan tag, checksum, and opaque cookie).
3522  */
3523 static int tg3_alloc_rx_skb(struct tg3 *tp, u32 opaque_key,
3524                             int src_idx, u32 dest_idx_unmasked)
3525 {
3526         struct tg3_rx_buffer_desc *desc;
3527         struct ring_info *map, *src_map;
3528         struct sk_buff *skb;
3529         dma_addr_t mapping;
3530         int skb_size, dest_idx;
3531
3532         src_map = NULL;
3533         switch (opaque_key) {
3534         case RXD_OPAQUE_RING_STD:
3535                 dest_idx = dest_idx_unmasked % TG3_RX_RING_SIZE;
3536                 desc = &tp->rx_std[dest_idx];
3537                 map = &tp->rx_std_buffers[dest_idx];
3538                 if (src_idx >= 0)
3539                         src_map = &tp->rx_std_buffers[src_idx];
3540                 skb_size = tp->rx_pkt_buf_sz;
3541                 break;
3542
3543         case RXD_OPAQUE_RING_JUMBO:
3544                 dest_idx = dest_idx_unmasked % TG3_RX_JUMBO_RING_SIZE;
3545                 desc = &tp->rx_jumbo[dest_idx];
3546                 map = &tp->rx_jumbo_buffers[dest_idx];
3547                 if (src_idx >= 0)
3548                         src_map = &tp->rx_jumbo_buffers[src_idx];
3549                 skb_size = RX_JUMBO_PKT_BUF_SZ;
3550                 break;
3551
3552         default:
3553                 return -EINVAL;
3554         };
3555
3556         /* Do not overwrite any of the map or rp information
3557          * until we are sure we can commit to a new buffer.
3558          *
3559          * Callers depend upon this behavior and assume that
3560          * we leave everything unchanged if we fail.
3561          */
3562         skb = netdev_alloc_skb(tp->dev, skb_size);
3563         if (skb == NULL)
3564                 return -ENOMEM;
3565
3566         skb_reserve(skb, tp->rx_offset);
3567
3568         mapping = pci_map_single(tp->pdev, skb->data,
3569                                  skb_size - tp->rx_offset,
3570                                  PCI_DMA_FROMDEVICE);
3571
3572         map->skb = skb;
3573         pci_unmap_addr_set(map, mapping, mapping);
3574
3575         if (src_map != NULL)
3576                 src_map->skb = NULL;
3577
3578         desc->addr_hi = ((u64)mapping >> 32);
3579         desc->addr_lo = ((u64)mapping & 0xffffffff);
3580
3581         return skb_size;
3582 }
3583
3584 /* We only need to move over in the address because the other
3585  * members of the RX descriptor are invariant.  See notes above
3586  * tg3_alloc_rx_skb for full details.
3587  */
3588 static void tg3_recycle_rx(struct tg3 *tp, u32 opaque_key,
3589                            int src_idx, u32 dest_idx_unmasked)
3590 {
3591         struct tg3_rx_buffer_desc *src_desc, *dest_desc;
3592         struct ring_info *src_map, *dest_map;
3593         int dest_idx;
3594
3595         switch (opaque_key) {
3596         case RXD_OPAQUE_RING_STD:
3597                 dest_idx = dest_idx_unmasked % TG3_RX_RING_SIZE;
3598                 dest_desc = &tp->rx_std[dest_idx];
3599                 dest_map = &tp->rx_std_buffers[dest_idx];
3600                 src_desc = &tp->rx_std[src_idx];
3601                 src_map = &tp->rx_std_buffers[src_idx];
3602                 break;
3603
3604         case RXD_OPAQUE_RING_JUMBO:
3605                 dest_idx = dest_idx_unmasked % TG3_RX_JUMBO_RING_SIZE;
3606                 dest_desc = &tp->rx_jumbo[dest_idx];
3607                 dest_map = &tp->rx_jumbo_buffers[dest_idx];
3608                 src_desc = &tp->rx_jumbo[src_idx];
3609                 src_map = &tp->rx_jumbo_buffers[src_idx];
3610                 break;
3611
3612         default:
3613                 return;
3614         };
3615
3616         dest_map->skb = src_map->skb;
3617         pci_unmap_addr_set(dest_map, mapping,
3618                            pci_unmap_addr(src_map, mapping));
3619         dest_desc->addr_hi = src_desc->addr_hi;
3620         dest_desc->addr_lo = src_desc->addr_lo;
3621
3622         src_map->skb = NULL;
3623 }
3624
3625 #if TG3_VLAN_TAG_USED
3626 static int tg3_vlan_rx(struct tg3 *tp, struct sk_buff *skb, u16 vlan_tag)
3627 {
3628         return vlan_hwaccel_receive_skb(skb, tp->vlgrp, vlan_tag);
3629 }
3630 #endif
3631
3632 /* The RX ring scheme is composed of multiple rings which post fresh
3633  * buffers to the chip, and one special ring the chip uses to report
3634  * status back to the host.
3635  *
3636  * The special ring reports the status of received packets to the
3637  * host.  The chip does not write into the original descriptor the
3638  * RX buffer was obtained from.  The chip simply takes the original
3639  * descriptor as provided by the host, updates the status and length
3640  * field, then writes this into the next status ring entry.
3641  *
3642  * Each ring the host uses to post buffers to the chip is described
3643  * by a TG3_BDINFO entry in the chips SRAM area.  When a packet arrives,
3644  * it is first placed into the on-chip ram.  When the packet's length
3645  * is known, it walks down the TG3_BDINFO entries to select the ring.
3646  * Each TG3_BDINFO specifies a MAXLEN field and the first TG3_BDINFO
3647  * which is within the range of the new packet's length is chosen.
3648  *
3649  * The "separate ring for rx status" scheme may sound queer, but it makes
3650  * sense from a cache coherency perspective.  If only the host writes
3651  * to the buffer post rings, and only the chip writes to the rx status
3652  * rings, then cache lines never move beyond shared-modified state.
3653  * If both the host and chip were to write into the same ring, cache line
3654  * eviction could occur since both entities want it in an exclusive state.
3655  */
3656 static int tg3_rx(struct tg3 *tp, int budget)
3657 {
3658         u32 work_mask, rx_std_posted = 0;
3659         u32 sw_idx = tp->rx_rcb_ptr;
3660         u16 hw_idx;
3661         int received;
3662
3663         hw_idx = tp->hw_status->idx[0].rx_producer;
3664         /*
3665          * We need to order the read of hw_idx and the read of
3666          * the opaque cookie.
3667          */
3668         rmb();
3669         work_mask = 0;
3670         received = 0;
3671         while (sw_idx != hw_idx && budget > 0) {
3672                 struct tg3_rx_buffer_desc *desc = &tp->rx_rcb[sw_idx];
3673                 unsigned int len;
3674                 struct sk_buff *skb;
3675                 dma_addr_t dma_addr;
3676                 u32 opaque_key, desc_idx, *post_ptr;
3677
3678                 desc_idx = desc->opaque & RXD_OPAQUE_INDEX_MASK;
3679                 opaque_key = desc->opaque & RXD_OPAQUE_RING_MASK;
3680                 if (opaque_key == RXD_OPAQUE_RING_STD) {
3681                         dma_addr = pci_unmap_addr(&tp->rx_std_buffers[desc_idx],
3682                                                   mapping);
3683                         skb = tp->rx_std_buffers[desc_idx].skb;
3684                         post_ptr = &tp->rx_std_ptr;
3685                         rx_std_posted++;
3686                 } else if (opaque_key == RXD_OPAQUE_RING_JUMBO) {
3687                         dma_addr = pci_unmap_addr(&tp->rx_jumbo_buffers[desc_idx],
3688                                                   mapping);
3689                         skb = tp->rx_jumbo_buffers[desc_idx].skb;
3690                         post_ptr = &tp->rx_jumbo_ptr;
3691                 }
3692                 else {
3693                         goto next_pkt_nopost;
3694                 }
3695
3696                 work_mask |= opaque_key;
3697
3698                 if ((desc->err_vlan & RXD_ERR_MASK) != 0 &&
3699                     (desc->err_vlan != RXD_ERR_ODD_NIBBLE_RCVD_MII)) {
3700                 drop_it:
3701                         tg3_recycle_rx(tp, opaque_key,
3702                                        desc_idx, *post_ptr);
3703                 drop_it_no_recycle:
3704                         /* Other statistics kept track of by card. */
3705                         tp->net_stats.rx_dropped++;
3706                         goto next_pkt;
3707                 }
3708
3709                 len = ((desc->idx_len & RXD_LEN_MASK) >> RXD_LEN_SHIFT) - 4; /* omit crc */
3710
3711                 if (len > RX_COPY_THRESHOLD
3712                         && tp->rx_offset == 2
3713                         /* rx_offset != 2 iff this is a 5701 card running
3714                          * in PCI-X mode [see tg3_get_invariants()] */
3715                 ) {
3716                         int skb_size;
3717
3718                         skb_size = tg3_alloc_rx_skb(tp, opaque_key,
3719                                                     desc_idx, *post_ptr);
3720                         if (skb_size < 0)
3721                                 goto drop_it;
3722
3723                         pci_unmap_single(tp->pdev, dma_addr,
3724                                          skb_size - tp->rx_offset,
3725                                          PCI_DMA_FROMDEVICE);
3726
3727                         skb_put(skb, len);
3728                 } else {
3729                         struct sk_buff *copy_skb;
3730
3731                         tg3_recycle_rx(tp, opaque_key,
3732                                        desc_idx, *post_ptr);
3733
3734                         copy_skb = netdev_alloc_skb(tp->dev, len + 2);
3735                         if (copy_skb == NULL)
3736                                 goto drop_it_no_recycle;
3737
3738                         skb_reserve(copy_skb, 2);
3739                         skb_put(copy_skb, len);
3740                         pci_dma_sync_single_for_cpu(tp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE);
3741                         skb_copy_from_linear_data(skb, copy_skb->data, len);
3742                         pci_dma_sync_single_for_device(tp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE);
3743
3744                         /* We'll reuse the original ring buffer. */
3745                         skb = copy_skb;
3746                 }
3747
3748                 if ((tp->tg3_flags & TG3_FLAG_RX_CHECKSUMS) &&
3749                     (desc->type_flags & RXD_FLAG_TCPUDP_CSUM) &&
3750                     (((desc->ip_tcp_csum & RXD_TCPCSUM_MASK)
3751                       >> RXD_TCPCSUM_SHIFT) == 0xffff))
3752                         skb->ip_summed = CHECKSUM_UNNECESSARY;
3753                 else
3754                         skb->ip_summed = CHECKSUM_NONE;
3755
3756                 skb->protocol = eth_type_trans(skb, tp->dev);
3757 #if TG3_VLAN_TAG_USED
3758                 if (tp->vlgrp != NULL &&
3759                     desc->type_flags & RXD_FLAG_VLAN) {
3760                         tg3_vlan_rx(tp, skb,
3761                                     desc->err_vlan & RXD_VLAN_MASK);
3762                 } else
3763 #endif
3764                         netif_receive_skb(skb);
3765
3766                 tp->dev->last_rx = jiffies;
3767                 received++;
3768                 budget--;
3769
3770 next_pkt:
3771                 (*post_ptr)++;
3772
3773                 if (unlikely(rx_std_posted >= tp->rx_std_max_post)) {
3774                         u32 idx = *post_ptr % TG3_RX_RING_SIZE;
3775
3776                         tw32_rx_mbox(MAILBOX_RCV_STD_PROD_IDX +
3777                                      TG3_64BIT_REG_LOW, idx);
3778                         work_mask &= ~RXD_OPAQUE_RING_STD;
3779                         rx_std_posted = 0;
3780                 }
3781 next_pkt_nopost:
3782                 sw_idx++;
3783                 sw_idx &= (TG3_RX_RCB_RING_SIZE(tp) - 1);
3784
3785                 /* Refresh hw_idx to see if there is new work */
3786                 if (sw_idx == hw_idx) {
3787                         hw_idx = tp->hw_status->idx[0].rx_producer;
3788                         rmb();
3789                 }
3790         }
3791
3792         /* ACK the status ring. */
3793         tp->rx_rcb_ptr = sw_idx;
3794         tw32_rx_mbox(MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW, sw_idx);
3795
3796         /* Refill RX ring(s). */
3797         if (work_mask & RXD_OPAQUE_RING_STD) {
3798                 sw_idx = tp->rx_std_ptr % TG3_RX_RING_SIZE;
3799                 tw32_rx_mbox(MAILBOX_RCV_STD_PROD_IDX + TG3_64BIT_REG_LOW,
3800                              sw_idx);
3801         }
3802         if (work_mask & RXD_OPAQUE_RING_JUMBO) {
3803                 sw_idx = tp->rx_jumbo_ptr % TG3_RX_JUMBO_RING_SIZE;
3804                 tw32_rx_mbox(MAILBOX_RCV_JUMBO_PROD_IDX + TG3_64BIT_REG_LOW,
3805                              sw_idx);
3806         }
3807         mmiowb();
3808
3809         return received;
3810 }
3811
3812 static int tg3_poll_work(struct tg3 *tp, int work_done, int budget)
3813 {
3814         struct tg3_hw_status *sblk = tp->hw_status;
3815
3816         /* handle link change and other phy events */
3817         if (!(tp->tg3_flags &
3818               (TG3_FLAG_USE_LINKCHG_REG |
3819                TG3_FLAG_POLL_SERDES))) {
3820                 if (sblk->status & SD_STATUS_LINK_CHG) {
3821                         sblk->status = SD_STATUS_UPDATED |
3822                                 (sblk->status & ~SD_STATUS_LINK_CHG);
3823                         spin_lock(&tp->lock);
3824                         tg3_setup_phy(tp, 0);
3825                         spin_unlock(&tp->lock);
3826                 }
3827         }
3828
3829         /* run TX completion thread */
3830         if (sblk->idx[0].tx_consumer != tp->tx_cons) {
3831                 tg3_tx(tp);
3832                 if (unlikely(tp->tg3_flags & TG3_FLAG_TX_RECOVERY_PENDING))
3833                         return work_done;
3834         }
3835
3836         /* run RX thread, within the bounds set by NAPI.
3837          * All RX "locking" is done by ensuring outside
3838          * code synchronizes with tg3->napi.poll()
3839          */
3840         if (sblk->idx[0].rx_producer != tp->rx_rcb_ptr)
3841                 work_done += tg3_rx(tp, budget - work_done);
3842
3843         return work_done;
3844 }
3845
3846 static int tg3_poll(struct napi_struct *napi, int budget)
3847 {
3848         struct tg3 *tp = container_of(napi, struct tg3, napi);
3849         int work_done = 0;
3850         struct tg3_hw_status *sblk = tp->hw_status;
3851
3852         while (1) {
3853                 work_done = tg3_poll_work(tp, work_done, budget);
3854
3855                 if (unlikely(tp->tg3_flags & TG3_FLAG_TX_RECOVERY_PENDING))
3856                         goto tx_recovery;
3857
3858                 if (unlikely(work_done >= budget))
3859                         break;
3860
3861                 if (tp->tg3_flags & TG3_FLAG_TAGGED_STATUS) {
3862                         /* tp->last_tag is used in tg3_restart_ints() below
3863                          * to tell the hw how much work has been processed,
3864                          * so we must read it before checking for more work.
3865                          */
3866                         tp->last_tag = sblk->status_tag;
3867                         rmb();
3868                 } else
3869                         sblk->status &= ~SD_STATUS_UPDATED;
3870
3871                 if (likely(!tg3_has_work(tp))) {
3872                         netif_rx_complete(tp->dev, napi);
3873                         tg3_restart_ints(tp);
3874                         break;
3875                 }
3876         }
3877
3878         return work_done;
3879
3880 tx_recovery:
3881         /* work_done is guaranteed to be less than budget. */
3882         netif_rx_complete(tp->dev, napi);
3883         schedule_work(&tp->reset_task);
3884         return work_done;
3885 }
3886
3887 static void tg3_irq_quiesce(struct tg3 *tp)
3888 {
3889         BUG_ON(tp->irq_sync);
3890
3891         tp->irq_sync = 1;
3892         smp_mb();
3893
3894         synchronize_irq(tp->pdev->irq);
3895 }
3896
3897 static inline int tg3_irq_sync(struct tg3 *tp)
3898 {
3899         return tp->irq_sync;
3900 }
3901
3902 /* Fully shutdown all tg3 driver activity elsewhere in the system.
3903  * If irq_sync is non-zero, then the IRQ handler must be synchronized
3904  * with as well.  Most of the time, this is not necessary except when
3905  * shutting down the device.
3906  */
3907 static inline void tg3_full_lock(struct tg3 *tp, int irq_sync)
3908 {
3909         spin_lock_bh(&tp->lock);
3910         if (irq_sync)
3911                 tg3_irq_quiesce(tp);
3912 }
3913
3914 static inline void tg3_full_unlock(struct tg3 *tp)
3915 {
3916         spin_unlock_bh(&tp->lock);
3917 }
3918
3919 /* One-shot MSI handler - Chip automatically disables interrupt
3920  * after sending MSI so driver doesn't have to do it.
3921  */
3922 static irqreturn_t tg3_msi_1shot(int irq, void *dev_id)
3923 {
3924         struct net_device *dev = dev_id;
3925         struct tg3 *tp = netdev_priv(dev);
3926
3927         prefetch(tp->hw_status);
3928         prefetch(&tp->rx_rcb[tp->rx_rcb_ptr]);
3929
3930         if (likely(!tg3_irq_sync(tp)))
3931                 netif_rx_schedule(dev, &tp->napi);
3932
3933         return IRQ_HANDLED;
3934 }
3935
3936 /* MSI ISR - No need to check for interrupt sharing and no need to
3937  * flush status block and interrupt mailbox. PCI ordering rules
3938  * guarantee that MSI will arrive after the status block.
3939  */
3940 static irqreturn_t tg3_msi(int irq, void *dev_id)
3941 {
3942         struct net_device *dev = dev_id;
3943         struct tg3 *tp = netdev_priv(dev);
3944
3945         prefetch(tp->hw_status);
3946         prefetch(&tp->rx_rcb[tp->rx_rcb_ptr]);
3947         /*
3948          * Writing any value to intr-mbox-0 clears PCI INTA# and
3949          * chip-internal interrupt pending events.
3950          * Writing non-zero to intr-mbox-0 additional tells the
3951          * NIC to stop sending us irqs, engaging "in-intr-handler"
3952          * event coalescing.
3953          */
3954         tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
3955         if (likely(!tg3_irq_sync(tp)))
3956                 netif_rx_schedule(dev, &tp->napi);
3957
3958         return IRQ_RETVAL(1);
3959 }
3960
3961 static irqreturn_t tg3_interrupt(int irq, void *dev_id)
3962 {
3963         struct net_device *dev = dev_id;
3964         struct tg3 *tp = netdev_priv(dev);
3965         struct tg3_hw_status *sblk = tp->hw_status;
3966         unsigned int handled = 1;
3967
3968         /* In INTx mode, it is possible for the interrupt to arrive at
3969          * the CPU before the status block posted prior to the interrupt.
3970          * Reading the PCI State register will confirm whether the
3971          * interrupt is ours and will flush the status block.
3972          */
3973         if (unlikely(!(sblk->status & SD_STATUS_UPDATED))) {
3974                 if ((tp->tg3_flags & TG3_FLAG_CHIP_RESETTING) ||
3975                     (tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
3976                         handled = 0;
3977                         goto out;
3978                 }
3979         }
3980
3981         /*
3982          * Writing any value to intr-mbox-0 clears PCI INTA# and
3983          * chip-internal interrupt pending events.
3984          * Writing non-zero to intr-mbox-0 additional tells the
3985          * NIC to stop sending us irqs, engaging "in-intr-handler"
3986          * event coalescing.
3987          *
3988          * Flush the mailbox to de-assert the IRQ immediately to prevent
3989          * spurious interrupts.  The flush impacts performance but
3990          * excessive spurious interrupts can be worse in some cases.
3991          */
3992         tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
3993         if (tg3_irq_sync(tp))
3994                 goto out;
3995         sblk->status &= ~SD_STATUS_UPDATED;
3996         if (likely(tg3_has_work(tp))) {
3997                 prefetch(&tp->rx_rcb[tp->rx_rcb_ptr]);
3998                 netif_rx_schedule(dev, &tp->napi);
3999         } else {
4000                 /* No work, shared interrupt perhaps?  re-enable
4001                  * interrupts, and flush that PCI write
4002                  */
4003                 tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
4004                                0x00000000);
4005         }
4006 out:
4007         return IRQ_RETVAL(handled);
4008 }
4009
4010 static irqreturn_t tg3_interrupt_tagged(int irq, void *dev_id)
4011 {
4012         struct net_device *dev = dev_id;
4013         struct tg3 *tp = netdev_priv(dev);
4014         struct tg3_hw_status *sblk = tp->hw_status;
4015         unsigned int handled = 1;
4016
4017         /* In INTx mode, it is possible for the interrupt to arrive at
4018          * the CPU before the status block posted prior to the interrupt.
4019          * Reading the PCI State register will confirm whether the
4020          * interrupt is ours and will flush the status block.
4021          */
4022         if (unlikely(sblk->status_tag == tp->last_tag)) {
4023                 if ((tp->tg3_flags & TG3_FLAG_CHIP_RESETTING) ||
4024                     (tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
4025                         handled = 0;
4026                         goto out;
4027                 }
4028         }
4029
4030         /*
4031          * writing any value to intr-mbox-0 clears PCI INTA# and
4032          * chip-internal interrupt pending events.
4033          * writing non-zero to intr-mbox-0 additional tells the
4034          * NIC to stop sending us irqs, engaging "in-intr-handler"
4035          * event coalescing.
4036          *
4037          * Flush the mailbox to de-assert the IRQ immediately to prevent
4038          * spurious interrupts.  The flush impacts performance but
4039          * excessive spurious interrupts can be worse in some cases.
4040          */
4041         tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
4042         if (tg3_irq_sync(tp))
4043                 goto out;
4044         if (netif_rx_schedule_prep(dev, &tp->napi)) {
4045                 prefetch(&tp->rx_rcb[tp->rx_rcb_ptr]);
4046                 /* Update last_tag to mark that this status has been
4047                  * seen. Because interrupt may be shared, we may be
4048                  * racing with tg3_poll(), so only update last_tag
4049                  * if tg3_poll() is not scheduled.
4050                  */
4051                 tp->last_tag = sblk->status_tag;
4052                 __netif_rx_schedule(dev, &tp->napi);
4053         }
4054 out:
4055         return IRQ_RETVAL(handled);
4056 }
4057
4058 /* ISR for interrupt test */
4059 static irqreturn_t tg3_test_isr(int irq, void *dev_id)
4060 {
4061         struct net_device *dev = dev_id;
4062         struct tg3 *tp = netdev_priv(dev);
4063         struct tg3_hw_status *sblk = tp->hw_status;
4064
4065         if ((sblk->status & SD_STATUS_UPDATED) ||
4066             !(tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
4067                 tg3_disable_ints(tp);
4068                 return IRQ_RETVAL(1);
4069         }
4070         return IRQ_RETVAL(0);
4071 }
4072
4073 static int tg3_init_hw(struct tg3 *, int);
4074 static int tg3_halt(struct tg3 *, int, int);
4075
4076 /* Restart hardware after configuration changes, self-test, etc.
4077  * Invoked with tp->lock held.
4078  */
4079 static int tg3_restart_hw(struct tg3 *tp, int reset_phy)
4080         __releases(tp->lock)
4081         __acquires(tp->lock)
4082 {
4083         int err;
4084
4085         err = tg3_init_hw(tp, reset_phy);
4086         if (err) {
4087                 printk(KERN_ERR PFX "%s: Failed to re-initialize device, "
4088                        "aborting.\n", tp->dev->name);
4089                 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
4090                 tg3_full_unlock(tp);
4091                 del_timer_sync(&tp->timer);
4092                 tp->irq_sync = 0;
4093                 napi_enable(&tp->napi);
4094                 dev_close(tp->dev);
4095                 tg3_full_lock(tp, 0);
4096         }
4097         return err;
4098 }
4099
4100 #ifdef CONFIG_NET_POLL_CONTROLLER
4101 static void tg3_poll_controller(struct net_device *dev)
4102 {
4103         struct tg3 *tp = netdev_priv(dev);
4104
4105         tg3_interrupt(tp->pdev->irq, dev);
4106 }
4107 #endif
4108
4109 static void tg3_reset_task(struct work_struct *work)
4110 {
4111         struct tg3 *tp = container_of(work, struct tg3, reset_task);
4112         unsigned int restart_timer;
4113
4114         tg3_full_lock(tp, 0);
4115
4116         if (!netif_running(tp->dev)) {
4117                 tg3_full_unlock(tp);
4118                 return;
4119         }
4120
4121         tg3_full_unlock(tp);
4122
4123         tg3_netif_stop(tp);
4124
4125         tg3_full_lock(tp, 1);
4126
4127         restart_timer = tp->tg3_flags2 & TG3_FLG2_RESTART_TIMER;
4128         tp->tg3_flags2 &= ~TG3_FLG2_RESTART_TIMER;
4129
4130         if (tp->tg3_flags & TG3_FLAG_TX_RECOVERY_PENDING) {
4131                 tp->write32_tx_mbox = tg3_write32_tx_mbox;
4132                 tp->write32_rx_mbox = tg3_write_flush_reg32;
4133                 tp->tg3_flags |= TG3_FLAG_MBOX_WRITE_REORDER;
4134                 tp->tg3_flags &= ~TG3_FLAG_TX_RECOVERY_PENDING;
4135         }
4136
4137         tg3_halt(tp, RESET_KIND_SHUTDOWN, 0);
4138         if (tg3_init_hw(tp, 1))
4139                 goto out;
4140
4141         tg3_netif_start(tp);
4142
4143         if (restart_timer)
4144                 mod_timer(&tp->timer, jiffies + 1);
4145
4146 out:
4147         tg3_full_unlock(tp);
4148 }
4149
4150 static void tg3_dump_short_state(struct tg3 *tp)
4151 {
4152         printk(KERN_ERR PFX "DEBUG: MAC_TX_STATUS[%08x] MAC_RX_STATUS[%08x]\n",
4153                tr32(MAC_TX_STATUS), tr32(MAC_RX_STATUS));
4154         printk(KERN_ERR PFX "DEBUG: RDMAC_STATUS[%08x] WDMAC_STATUS[%08x]\n",
4155                tr32(RDMAC_STATUS), tr32(WDMAC_STATUS));
4156 }
4157
4158 static void tg3_tx_timeout(struct net_device *dev)
4159 {
4160         struct tg3 *tp = netdev_priv(dev);
4161
4162         if (netif_msg_tx_err(tp)) {
4163                 printk(KERN_ERR PFX "%s: transmit timed out, resetting\n",
4164                        dev->name);
4165                 tg3_dump_short_state(tp);
4166         }
4167
4168         schedule_work(&tp->reset_task);
4169 }
4170
4171 /* Test for DMA buffers crossing any 4GB boundaries: 4G, 8G, etc */
4172 static inline int tg3_4g_overflow_test(dma_addr_t mapping, int len)
4173 {
4174         u32 base = (u32) mapping & 0xffffffff;
4175
4176         return ((base > 0xffffdcc0) &&
4177                 (base + len + 8 < base));
4178 }
4179
4180 /* Test for DMA addresses > 40-bit */
4181 static inline int tg3_40bit_overflow_test(struct tg3 *tp, dma_addr_t mapping,
4182                                           int len)
4183 {
4184 #if defined(CONFIG_HIGHMEM) && (BITS_PER_LONG == 64)
4185         if (tp->tg3_flags & TG3_FLAG_40BIT_DMA_BUG)
4186                 return (((u64) mapping + len) > DMA_40BIT_MASK);
4187         return 0;
4188 #else
4189         return 0;
4190 #endif
4191 }
4192
4193 static void tg3_set_txd(struct tg3 *, int, dma_addr_t, int, u32, u32);
4194
4195 /* Workaround 4GB and 40-bit hardware DMA bugs. */
4196 static int tigon3_dma_hwbug_workaround(struct tg3 *tp, struct sk_buff *skb,
4197                                        u32 last_plus_one, u32 *start,
4198                                        u32 base_flags, u32 mss)
4199 {
4200         struct sk_buff *new_skb;
4201         dma_addr_t new_addr = 0;
4202         u32 entry = *start;
4203         int i, ret = 0;
4204
4205         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701)
4206                 new_skb = skb_copy(skb, GFP_ATOMIC);
4207         else {
4208                 int more_headroom = 4 - ((unsigned long)skb->data & 3);
4209
4210                 new_skb = skb_copy_expand(skb,
4211                                           skb_headroom(skb) + more_headroom,
4212                                           skb_tailroom(skb), GFP_ATOMIC);
4213         }
4214
4215         if (!new_skb) {
4216                 ret = -1;
4217         } else {
4218                 /* New SKB is guaranteed to be linear. */
4219                 entry = *start;
4220                 new_addr = pci_map_single(tp->pdev, new_skb->data, new_skb->len,
4221                                           PCI_DMA_TODEVICE);
4222                 /* Make sure new skb does not cross any 4G boundaries.
4223                  * Drop the packet if it does.
4224                  */
4225                 if (tg3_4g_overflow_test(new_addr, new_skb->len)) {
4226                         ret = -1;
4227                         dev_kfree_skb(new_skb);
4228                         new_skb = NULL;
4229                 } else {
4230                         tg3_set_txd(tp, entry, new_addr, new_skb->len,
4231                                     base_flags, 1 | (mss << 1));
4232                         *start = NEXT_TX(entry);
4233                 }
4234         }
4235
4236         /* Now clean up the sw ring entries. */
4237         i = 0;
4238         while (entry != last_plus_one) {
4239                 int len;
4240
4241                 if (i == 0)
4242                         len = skb_headlen(skb);
4243                 else
4244                         len = skb_shinfo(skb)->frags[i-1].size;
4245                 pci_unmap_single(tp->pdev,
4246                                  pci_unmap_addr(&tp->tx_buffers[entry], mapping),
4247                                  len, PCI_DMA_TODEVICE);
4248                 if (i == 0) {
4249                         tp->tx_buffers[entry].skb = new_skb;
4250                         pci_unmap_addr_set(&tp->tx_buffers[entry], mapping, new_addr);
4251                 } else {
4252                         tp->tx_buffers[entry].skb = NULL;
4253                 }
4254                 entry = NEXT_TX(entry);
4255                 i++;
4256         }
4257
4258         dev_kfree_skb(skb);
4259
4260         return ret;
4261 }
4262
4263 static void tg3_set_txd(struct tg3 *tp, int entry,
4264                         dma_addr_t mapping, int len, u32 flags,
4265                         u32 mss_and_is_end)
4266 {
4267         struct tg3_tx_buffer_desc *txd = &tp->tx_ring[entry];
4268         int is_end = (mss_and_is_end & 0x1);
4269         u32 mss = (mss_and_is_end >> 1);
4270         u32 vlan_tag = 0;
4271
4272         if (is_end)
4273                 flags |= TXD_FLAG_END;
4274         if (flags & TXD_FLAG_VLAN) {
4275                 vlan_tag = flags >> 16;
4276                 flags &= 0xffff;
4277         }
4278         vlan_tag |= (mss << TXD_MSS_SHIFT);
4279
4280         txd->addr_hi = ((u64) mapping >> 32);
4281         txd->addr_lo = ((u64) mapping & 0xffffffff);
4282         txd->len_flags = (len << TXD_LEN_SHIFT) | flags;
4283         txd->vlan_tag = vlan_tag << TXD_VLAN_TAG_SHIFT;
4284 }
4285
4286 /* hard_start_xmit for devices that don't have any bugs and
4287  * support TG3_FLG2_HW_TSO_2 only.
4288  */
4289 static int tg3_start_xmit(struct sk_buff *skb, struct net_device *dev)
4290 {
4291         struct tg3 *tp = netdev_priv(dev);
4292         dma_addr_t mapping;
4293         u32 len, entry, base_flags, mss;
4294
4295         len = skb_headlen(skb);
4296
4297         /* We are running in BH disabled context with netif_tx_lock
4298          * and TX reclaim runs via tp->napi.poll inside of a software
4299          * interrupt.  Furthermore, IRQ processing runs lockless so we have
4300          * no IRQ context deadlocks to worry about either.  Rejoice!
4301          */
4302         if (unlikely(tg3_tx_avail(tp) <= (skb_shinfo(skb)->nr_frags + 1))) {
4303                 if (!netif_queue_stopped(dev)) {
4304                         netif_stop_queue(dev);
4305
4306                         /* This is a hard error, log it. */
4307                         printk(KERN_ERR PFX "%s: BUG! Tx Ring full when "
4308                                "queue awake!\n", dev->name);
4309                 }
4310                 return NETDEV_TX_BUSY;
4311         }
4312
4313         entry = tp->tx_prod;
4314         base_flags = 0;
4315         mss = 0;
4316         if ((mss = skb_shinfo(skb)->gso_size) != 0) {
4317                 int tcp_opt_len, ip_tcp_len;
4318
4319                 if (skb_header_cloned(skb) &&
4320                     pskb_expand_head(skb, 0, 0, GFP_ATOMIC)) {
4321                         dev_kfree_skb(skb);
4322                         goto out_unlock;
4323                 }
4324
4325                 if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6)
4326                         mss |= (skb_headlen(skb) - ETH_HLEN) << 9;
4327                 else {
4328                         struct iphdr *iph = ip_hdr(skb);
4329
4330                         tcp_opt_len = tcp_optlen(skb);
4331                         ip_tcp_len = ip_hdrlen(skb) + sizeof(struct tcphdr);
4332
4333                         iph->check = 0;
4334                         iph->tot_len = htons(mss + ip_tcp_len + tcp_opt_len);
4335                         mss |= (ip_tcp_len + tcp_opt_len) << 9;
4336                 }
4337
4338                 base_flags |= (TXD_FLAG_CPU_PRE_DMA |
4339                                TXD_FLAG_CPU_POST_DMA);
4340
4341                 tcp_hdr(skb)->check = 0;
4342
4343         }
4344         else if (skb->ip_summed == CHECKSUM_PARTIAL)
4345                 base_flags |= TXD_FLAG_TCPUDP_CSUM;
4346 #if TG3_VLAN_TAG_USED
4347         if (tp->vlgrp != NULL && vlan_tx_tag_present(skb))
4348                 base_flags |= (TXD_FLAG_VLAN |
4349                                (vlan_tx_tag_get(skb) << 16));
4350 #endif
4351
4352         /* Queue skb data, a.k.a. the main skb fragment. */
4353         mapping = pci_map_single(tp->pdev, skb->data, len, PCI_DMA_TODEVICE);
4354
4355         tp->tx_buffers[entry].skb = skb;
4356         pci_unmap_addr_set(&tp->tx_buffers[entry], mapping, mapping);
4357
4358         tg3_set_txd(tp, entry, mapping, len, base_flags,
4359                     (skb_shinfo(skb)->nr_frags == 0) | (mss << 1));
4360
4361         entry = NEXT_TX(entry);
4362
4363         /* Now loop through additional data fragments, and queue them. */
4364         if (skb_shinfo(skb)->nr_frags > 0) {
4365                 unsigned int i, last;
4366
4367                 last = skb_shinfo(skb)->nr_frags - 1;
4368                 for (i = 0; i <= last; i++) {
4369                         skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
4370
4371                         len = frag->size;
4372                         mapping = pci_map_page(tp->pdev,
4373                                                frag->page,
4374                                                frag->page_offset,
4375                                                len, PCI_DMA_TODEVICE);
4376
4377                         tp->tx_buffers[entry].skb = NULL;
4378                         pci_unmap_addr_set(&tp->tx_buffers[entry], mapping, mapping);
4379
4380                         tg3_set_txd(tp, entry, mapping, len,
4381                                     base_flags, (i == last) | (mss << 1));
4382
4383                         entry = NEXT_TX(entry);
4384                 }
4385         }
4386
4387         /* Packets are ready, update Tx producer idx local and on card. */
4388         tw32_tx_mbox((MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW), entry);
4389
4390         tp->tx_prod = entry;
4391         if (unlikely(tg3_tx_avail(tp) <= (MAX_SKB_FRAGS + 1))) {
4392                 netif_stop_queue(dev);
4393                 if (tg3_tx_avail(tp) > TG3_TX_WAKEUP_THRESH(tp))
4394                         netif_wake_queue(tp->dev);
4395         }
4396
4397 out_unlock:
4398         mmiowb();
4399
4400         dev->trans_start = jiffies;
4401
4402         return NETDEV_TX_OK;
4403 }
4404
4405 static int tg3_start_xmit_dma_bug(struct sk_buff *, struct net_device *);
4406
4407 /* Use GSO to workaround a rare TSO bug that may be triggered when the
4408  * TSO header is greater than 80 bytes.
4409  */
4410 static int tg3_tso_bug(struct tg3 *tp, struct sk_buff *skb)
4411 {
4412         struct sk_buff *segs, *nskb;
4413
4414         /* Estimate the number of fragments in the worst case */
4415         if (unlikely(tg3_tx_avail(tp) <= (skb_shinfo(skb)->gso_segs * 3))) {
4416                 netif_stop_queue(tp->dev);
4417                 if (tg3_tx_avail(tp) <= (skb_shinfo(skb)->gso_segs * 3))
4418                         return NETDEV_TX_BUSY;
4419
4420                 netif_wake_queue(tp->dev);
4421         }
4422
4423         segs = skb_gso_segment(skb, tp->dev->features & ~NETIF_F_TSO);
4424         if (IS_ERR(segs))
4425                 goto tg3_tso_bug_end;
4426
4427         do {
4428                 nskb = segs;
4429                 segs = segs->next;
4430                 nskb->next = NULL;
4431                 tg3_start_xmit_dma_bug(nskb, tp->dev);
4432         } while (segs);
4433
4434 tg3_tso_bug_end:
4435         dev_kfree_skb(skb);
4436
4437         return NETDEV_TX_OK;
4438 }
4439
4440 /* hard_start_xmit for devices that have the 4G bug and/or 40-bit bug and
4441  * support TG3_FLG2_HW_TSO_1 or firmware TSO only.
4442  */
4443 static int tg3_start_xmit_dma_bug(struct sk_buff *skb, struct net_device *dev)
4444 {
4445         struct tg3 *tp = netdev_priv(dev);
4446         dma_addr_t mapping;
4447         u32 len, entry, base_flags, mss;
4448         int would_hit_hwbug;
4449
4450         len = skb_headlen(skb);
4451
4452         /* We are running in BH disabled context with netif_tx_lock
4453          * and TX reclaim runs via tp->napi.poll inside of a software
4454          * interrupt.  Furthermore, IRQ processing runs lockless so we have
4455          * no IRQ context deadlocks to worry about either.  Rejoice!
4456          */
4457         if (unlikely(tg3_tx_avail(tp) <= (skb_shinfo(skb)->nr_frags + 1))) {
4458                 if (!netif_queue_stopped(dev)) {
4459                         netif_stop_queue(dev);
4460
4461                         /* This is a hard error, log it. */
4462                         printk(KERN_ERR PFX "%s: BUG! Tx Ring full when "
4463                                "queue awake!\n", dev->name);
4464                 }
4465                 return NETDEV_TX_BUSY;
4466         }
4467
4468         entry = tp->tx_prod;
4469         base_flags = 0;
4470         if (skb->ip_summed == CHECKSUM_PARTIAL)
4471                 base_flags |= TXD_FLAG_TCPUDP_CSUM;
4472         mss = 0;
4473         if ((mss = skb_shinfo(skb)->gso_size) != 0) {
4474                 struct iphdr *iph;
4475                 int tcp_opt_len, ip_tcp_len, hdr_len;
4476
4477                 if (skb_header_cloned(skb) &&
4478                     pskb_expand_head(skb, 0, 0, GFP_ATOMIC)) {
4479                         dev_kfree_skb(skb);
4480                         goto out_unlock;
4481                 }
4482
4483                 tcp_opt_len = tcp_optlen(skb);
4484                 ip_tcp_len = ip_hdrlen(skb) + sizeof(struct tcphdr);
4485
4486                 hdr_len = ip_tcp_len + tcp_opt_len;
4487                 if (unlikely((ETH_HLEN + hdr_len) > 80) &&
4488                              (tp->tg3_flags2 & TG3_FLG2_TSO_BUG))
4489                         return (tg3_tso_bug(tp, skb));
4490
4491                 base_flags |= (TXD_FLAG_CPU_PRE_DMA |
4492                                TXD_FLAG_CPU_POST_DMA);
4493
4494                 iph = ip_hdr(skb);
4495                 iph->check = 0;
4496                 iph->tot_len = htons(mss + hdr_len);
4497                 if (tp->tg3_flags2 & TG3_FLG2_HW_TSO) {
4498                         tcp_hdr(skb)->check = 0;
4499                         base_flags &= ~TXD_FLAG_TCPUDP_CSUM;
4500                 } else
4501                         tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr,
4502                                                                  iph->daddr, 0,
4503                                                                  IPPROTO_TCP,
4504                                                                  0);
4505
4506                 if ((tp->tg3_flags2 & TG3_FLG2_HW_TSO) ||
4507                     (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705)) {
4508                         if (tcp_opt_len || iph->ihl > 5) {
4509                                 int tsflags;
4510
4511                                 tsflags = (iph->ihl - 5) + (tcp_opt_len >> 2);
4512                                 mss |= (tsflags << 11);
4513                         }
4514                 } else {
4515                         if (tcp_opt_len || iph->ihl > 5) {
4516                                 int tsflags;
4517
4518                                 tsflags = (iph->ihl - 5) + (tcp_opt_len >> 2);
4519                                 base_flags |= tsflags << 12;
4520                         }
4521                 }
4522         }
4523 #if TG3_VLAN_TAG_USED
4524         if (tp->vlgrp != NULL && vlan_tx_tag_present(skb))
4525                 base_flags |= (TXD_FLAG_VLAN |
4526                                (vlan_tx_tag_get(skb) << 16));
4527 #endif
4528
4529         /* Queue skb data, a.k.a. the main skb fragment. */
4530         mapping = pci_map_single(tp->pdev, skb->data, len, PCI_DMA_TODEVICE);
4531
4532         tp->tx_buffers[entry].skb = skb;
4533         pci_unmap_addr_set(&tp->tx_buffers[entry], mapping, mapping);
4534
4535         would_hit_hwbug = 0;
4536
4537         if (tp->tg3_flags3 & TG3_FLG3_5701_DMA_BUG)
4538                 would_hit_hwbug = 1;
4539         else if (tg3_4g_overflow_test(mapping, len))
4540                 would_hit_hwbug = 1;
4541
4542         tg3_set_txd(tp, entry, mapping, len, base_flags,
4543                     (skb_shinfo(skb)->nr_frags == 0) | (mss << 1));
4544
4545         entry = NEXT_TX(entry);
4546
4547         /* Now loop through additional data fragments, and queue them. */
4548         if (skb_shinfo(skb)->nr_frags > 0) {
4549                 unsigned int i, last;
4550
4551                 last = skb_shinfo(skb)->nr_frags - 1;
4552                 for (i = 0; i <= last; i++) {
4553                         skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
4554
4555                         len = frag->size;
4556                         mapping = pci_map_page(tp->pdev,
4557                                                frag->page,
4558                                                frag->page_offset,
4559                                                len, PCI_DMA_TODEVICE);
4560
4561                         tp->tx_buffers[entry].skb = NULL;
4562                         pci_unmap_addr_set(&tp->tx_buffers[entry], mapping, mapping);
4563
4564                         if (tg3_4g_overflow_test(mapping, len))
4565                                 would_hit_hwbug = 1;
4566
4567                         if (tg3_40bit_overflow_test(tp, mapping, len))
4568                                 would_hit_hwbug = 1;
4569
4570                         if (tp->tg3_flags2 & TG3_FLG2_HW_TSO)
4571                                 tg3_set_txd(tp, entry, mapping, len,
4572                                             base_flags, (i == last)|(mss << 1));
4573                         else
4574                                 tg3_set_txd(tp, entry, mapping, len,
4575                                             base_flags, (i == last));
4576
4577                         entry = NEXT_TX(entry);
4578                 }
4579         }
4580
4581         if (would_hit_hwbug) {
4582                 u32 last_plus_one = entry;
4583                 u32 start;
4584
4585                 start = entry - 1 - skb_shinfo(skb)->nr_frags;
4586                 start &= (TG3_TX_RING_SIZE - 1);
4587
4588                 /* If the workaround fails due to memory/mapping
4589                  * failure, silently drop this packet.
4590                  */
4591                 if (tigon3_dma_hwbug_workaround(tp, skb, last_plus_one,
4592                                                 &start, base_flags, mss))
4593                         goto out_unlock;
4594
4595                 entry = start;
4596         }
4597
4598         /* Packets are ready, update Tx producer idx local and on card. */
4599         tw32_tx_mbox((MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW), entry);
4600
4601         tp->tx_prod = entry;
4602         if (unlikely(tg3_tx_avail(tp) <= (MAX_SKB_FRAGS + 1))) {
4603                 netif_stop_queue(dev);
4604                 if (tg3_tx_avail(tp) > TG3_TX_WAKEUP_THRESH(tp))
4605                         netif_wake_queue(tp->dev);
4606         }
4607
4608 out_unlock:
4609         mmiowb();
4610
4611         dev->trans_start = jiffies;
4612
4613         return NETDEV_TX_OK;
4614 }
4615
4616 static inline void tg3_set_mtu(struct net_device *dev, struct tg3 *tp,
4617                                int new_mtu)
4618 {
4619         dev->mtu = new_mtu;
4620
4621         if (new_mtu > ETH_DATA_LEN) {
4622                 if (tp->tg3_flags2 & TG3_FLG2_5780_CLASS) {
4623                         tp->tg3_flags2 &= ~TG3_FLG2_TSO_CAPABLE;
4624                         ethtool_op_set_tso(dev, 0);
4625                 }
4626                 else
4627                         tp->tg3_flags |= TG3_FLAG_JUMBO_RING_ENABLE;
4628         } else {
4629                 if (tp->tg3_flags2 & TG3_FLG2_5780_CLASS)
4630                         tp->tg3_flags2 |= TG3_FLG2_TSO_CAPABLE;
4631                 tp->tg3_flags &= ~TG3_FLAG_JUMBO_RING_ENABLE;
4632         }
4633 }
4634
4635 static int tg3_change_mtu(struct net_device *dev, int new_mtu)
4636 {
4637         struct tg3 *tp = netdev_priv(dev);
4638         int err;
4639
4640         if (new_mtu < TG3_MIN_MTU || new_mtu > TG3_MAX_MTU(tp))
4641                 return -EINVAL;
4642
4643         if (!netif_running(dev)) {
4644                 /* We'll just catch it later when the
4645                  * device is up'd.
4646                  */
4647                 tg3_set_mtu(dev, tp, new_mtu);
4648                 return 0;
4649         }
4650
4651         tg3_netif_stop(tp);
4652
4653         tg3_full_lock(tp, 1);
4654
4655         tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
4656
4657         tg3_set_mtu(dev, tp, new_mtu);
4658
4659         err = tg3_restart_hw(tp, 0);
4660
4661         if (!err)
4662                 tg3_netif_start(tp);
4663
4664         tg3_full_unlock(tp);
4665
4666         return err;
4667 }
4668
4669 /* Free up pending packets in all rx/tx rings.
4670  *
4671  * The chip has been shut down and the driver detached from
4672  * the networking, so no interrupts or new tx packets will
4673  * end up in the driver.  tp->{tx,}lock is not held and we are not
4674  * in an interrupt context and thus may sleep.
4675  */
4676 static void tg3_free_rings(struct tg3 *tp)
4677 {
4678         struct ring_info *rxp;
4679         int i;
4680
4681         for (i = 0; i < TG3_RX_RING_SIZE; i++) {
4682                 rxp = &tp->rx_std_buffers[i];
4683
4684                 if (rxp->skb == NULL)
4685                         continue;
4686                 pci_unmap_single(tp->pdev,
4687                                  pci_unmap_addr(rxp, mapping),
4688                                  tp->rx_pkt_buf_sz - tp->rx_offset,
4689                                  PCI_DMA_FROMDEVICE);
4690                 dev_kfree_skb_any(rxp->skb);
4691                 rxp->skb = NULL;
4692         }
4693
4694         for (i = 0; i < TG3_RX_JUMBO_RING_SIZE; i++) {
4695                 rxp = &tp->rx_jumbo_buffers[i];
4696
4697                 if (rxp->skb == NULL)
4698                         continue;
4699                 pci_unmap_single(tp->pdev,
4700                                  pci_unmap_addr(rxp, mapping),
4701                                  RX_JUMBO_PKT_BUF_SZ - tp->rx_offset,
4702                                  PCI_DMA_FROMDEVICE);
4703                 dev_kfree_skb_any(rxp->skb);
4704                 rxp->skb = NULL;
4705         }
4706
4707         for (i = 0; i < TG3_TX_RING_SIZE; ) {
4708                 struct tx_ring_info *txp;
4709                 struct sk_buff *skb;
4710                 int j;
4711
4712                 txp = &tp->tx_buffers[i];
4713                 skb = txp->skb;
4714
4715                 if (skb == NULL) {
4716                         i++;
4717                         continue;
4718                 }
4719
4720                 pci_unmap_single(tp->pdev,
4721                                  pci_unmap_addr(txp, mapping),
4722                                  skb_headlen(skb),
4723                                  PCI_DMA_TODEVICE);
4724                 txp->skb = NULL;
4725
4726                 i++;
4727
4728                 for (j = 0; j < skb_shinfo(skb)->nr_frags; j++) {
4729                         txp = &tp->tx_buffers[i & (TG3_TX_RING_SIZE - 1)];
4730                         pci_unmap_page(tp->pdev,
4731                                        pci_unmap_addr(txp, mapping),
4732                                        skb_shinfo(skb)->frags[j].size,
4733                                        PCI_DMA_TODEVICE);
4734                         i++;
4735                 }
4736
4737                 dev_kfree_skb_any(skb);
4738         }
4739 }
4740
4741 /* Initialize tx/rx rings for packet processing.
4742  *
4743  * The chip has been shut down and the driver detached from
4744  * the networking, so no interrupts or new tx packets will
4745  * end up in the driver.  tp->{tx,}lock are held and thus
4746  * we may not sleep.
4747  */
4748 static int tg3_init_rings(struct tg3 *tp)
4749 {
4750         u32 i;
4751
4752         /* Free up all the SKBs. */
4753         tg3_free_rings(tp);
4754
4755         /* Zero out all descriptors. */
4756         memset(tp->rx_std, 0, TG3_RX_RING_BYTES);
4757         memset(tp->rx_jumbo, 0, TG3_RX_JUMBO_RING_BYTES);
4758         memset(tp->rx_rcb, 0, TG3_RX_RCB_RING_BYTES(tp));
4759         memset(tp->tx_ring, 0, TG3_TX_RING_BYTES);
4760
4761         tp->rx_pkt_buf_sz = RX_PKT_BUF_SZ;
4762         if ((tp->tg3_flags2 & TG3_FLG2_5780_CLASS) &&
4763             (tp->dev->mtu > ETH_DATA_LEN))
4764                 tp->rx_pkt_buf_sz = RX_JUMBO_PKT_BUF_SZ;
4765
4766         /* Initialize invariants of the rings, we only set this
4767          * stuff once.  This works because the card does not
4768          * write into the rx buffer posting rings.
4769          */
4770         for (i = 0; i < TG3_RX_RING_SIZE; i++) {
4771                 struct tg3_rx_buffer_desc *rxd;
4772
4773                 rxd = &tp->rx_std[i];
4774                 rxd->idx_len = (tp->rx_pkt_buf_sz - tp->rx_offset - 64)
4775                         << RXD_LEN_SHIFT;
4776                 rxd->type_flags = (RXD_FLAG_END << RXD_FLAGS_SHIFT);
4777                 rxd->opaque = (RXD_OPAQUE_RING_STD |
4778                                (i << RXD_OPAQUE_INDEX_SHIFT));
4779         }
4780
4781         if (tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE) {
4782                 for (i = 0; i < TG3_RX_JUMBO_RING_SIZE; i++) {
4783                         struct tg3_rx_buffer_desc *rxd;
4784
4785                         rxd = &tp->rx_jumbo[i];
4786                         rxd->idx_len = (RX_JUMBO_PKT_BUF_SZ - tp->rx_offset - 64)
4787                                 << RXD_LEN_SHIFT;
4788                         rxd->type_flags = (RXD_FLAG_END << RXD_FLAGS_SHIFT) |
4789                                 RXD_FLAG_JUMBO;
4790                         rxd->opaque = (RXD_OPAQUE_RING_JUMBO |
4791                                (i << RXD_OPAQUE_INDEX_SHIFT));
4792                 }
4793         }
4794
4795         /* Now allocate fresh SKBs for each rx ring. */
4796         for (i = 0; i < tp->rx_pending; i++) {
4797                 if (tg3_alloc_rx_skb(tp, RXD_OPAQUE_RING_STD, -1, i) < 0) {
4798                         printk(KERN_WARNING PFX
4799                                "%s: Using a smaller RX standard ring, "
4800                                "only %d out of %d buffers were allocated "
4801                                "successfully.\n",
4802                                tp->dev->name, i, tp->rx_pending);
4803                         if (i == 0)
4804                                 return -ENOMEM;
4805                         tp->rx_pending = i;
4806                         break;
4807                 }
4808         }
4809
4810         if (tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE) {
4811                 for (i = 0; i < tp->rx_jumbo_pending; i++) {
4812                         if (tg3_alloc_rx_skb(tp, RXD_OPAQUE_RING_JUMBO,
4813                                              -1, i) < 0) {
4814                                 printk(KERN_WARNING PFX
4815                                        "%s: Using a smaller RX jumbo ring, "
4816                                        "only %d out of %d buffers were "
4817                                        "allocated successfully.\n",
4818                                        tp->dev->name, i, tp->rx_jumbo_pending);
4819                                 if (i == 0) {
4820                                         tg3_free_rings(tp);
4821                                         return -ENOMEM;
4822                                 }
4823                                 tp->rx_jumbo_pending = i;
4824                                 break;
4825                         }
4826                 }
4827         }
4828         return 0;
4829 }
4830
4831 /*
4832  * Must not be invoked with interrupt sources disabled and
4833  * the hardware shutdown down.
4834  */
4835 static void tg3_free_consistent(struct tg3 *tp)
4836 {
4837         kfree(tp->rx_std_buffers);
4838         tp->rx_std_buffers = NULL;
4839         if (tp->rx_std) {
4840                 pci_free_consistent(tp->pdev, TG3_RX_RING_BYTES,
4841                                     tp->rx_std, tp->rx_std_mapping);
4842                 tp->rx_std = NULL;
4843         }
4844         if (tp->rx_jumbo) {
4845                 pci_free_consistent(tp->pdev, TG3_RX_JUMBO_RING_BYTES,
4846                                     tp->rx_jumbo, tp->rx_jumbo_mapping);
4847                 tp->rx_jumbo = NULL;
4848         }
4849         if (tp->rx_rcb) {
4850                 pci_free_consistent(tp->pdev, TG3_RX_RCB_RING_BYTES(tp),
4851                                     tp->rx_rcb, tp->rx_rcb_mapping);
4852                 tp->rx_rcb = NULL;
4853         }
4854         if (tp->tx_ring) {
4855                 pci_free_consistent(tp->pdev, TG3_TX_RING_BYTES,
4856                         tp->tx_ring, tp->tx_desc_mapping);
4857                 tp->tx_ring = NULL;
4858         }
4859         if (tp->hw_status) {
4860                 pci_free_consistent(tp->pdev, TG3_HW_STATUS_SIZE,
4861                                     tp->hw_status, tp->status_mapping);
4862                 tp->hw_status = NULL;
4863         }
4864         if (tp->hw_stats) {
4865                 pci_free_consistent(tp->pdev, sizeof(struct tg3_hw_stats),
4866                                     tp->hw_stats, tp->stats_mapping);
4867                 tp->hw_stats = NULL;
4868         }
4869 }
4870
4871 /*
4872  * Must not be invoked with interrupt sources disabled and
4873  * the hardware shutdown down.  Can sleep.
4874  */
4875 static int tg3_alloc_consistent(struct tg3 *tp)
4876 {
4877         tp->rx_std_buffers = kzalloc((sizeof(struct ring_info) *
4878                                       (TG3_RX_RING_SIZE +
4879                                        TG3_RX_JUMBO_RING_SIZE)) +
4880                                      (sizeof(struct tx_ring_info) *
4881                                       TG3_TX_RING_SIZE),
4882                                      GFP_KERNEL);
4883         if (!tp->rx_std_buffers)
4884                 return -ENOMEM;
4885
4886         tp->rx_jumbo_buffers = &tp->rx_std_buffers[TG3_RX_RING_SIZE];
4887         tp->tx_buffers = (struct tx_ring_info *)
4888                 &tp->rx_jumbo_buffers[TG3_RX_JUMBO_RING_SIZE];
4889
4890         tp->rx_std = pci_alloc_consistent(tp->pdev, TG3_RX_RING_BYTES,
4891                                           &tp->rx_std_mapping);
4892         if (!tp->rx_std)
4893                 goto err_out;
4894
4895         tp->rx_jumbo = pci_alloc_consistent(tp->pdev, TG3_RX_JUMBO_RING_BYTES,
4896                                             &tp->rx_jumbo_mapping);
4897
4898         if (!tp->rx_jumbo)
4899                 goto err_out;
4900
4901         tp->rx_rcb = pci_alloc_consistent(tp->pdev, TG3_RX_RCB_RING_BYTES(tp),
4902                                           &tp->rx_rcb_mapping);
4903         if (!tp->rx_rcb)
4904                 goto err_out;
4905
4906         tp->tx_ring = pci_alloc_consistent(tp->pdev, TG3_TX_RING_BYTES,
4907                                            &tp->tx_desc_mapping);
4908         if (!tp->tx_ring)
4909                 goto err_out;
4910
4911         tp->hw_status = pci_alloc_consistent(tp->pdev,
4912                                              TG3_HW_STATUS_SIZE,
4913                                              &tp->status_mapping);
4914         if (!tp->hw_status)
4915                 goto err_out;
4916
4917         tp->hw_stats = pci_alloc_consistent(tp->pdev,
4918                                             sizeof(struct tg3_hw_stats),
4919                                             &tp->stats_mapping);
4920         if (!tp->hw_stats)
4921                 goto err_out;
4922
4923         memset(tp->hw_status, 0, TG3_HW_STATUS_SIZE);
4924         memset(tp->hw_stats, 0, sizeof(struct tg3_hw_stats));
4925
4926         return 0;
4927
4928 err_out:
4929         tg3_free_consistent(tp);
4930         return -ENOMEM;
4931 }
4932
4933 #define MAX_WAIT_CNT 1000
4934
4935 /* To stop a block, clear the enable bit and poll till it
4936  * clears.  tp->lock is held.
4937  */
4938 static int tg3_stop_block(struct tg3 *tp, unsigned long ofs, u32 enable_bit, int silent)
4939 {
4940         unsigned int i;
4941         u32 val;
4942
4943         if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
4944                 switch (ofs) {
4945                 case RCVLSC_MODE:
4946                 case DMAC_MODE:
4947                 case MBFREE_MODE:
4948                 case BUFMGR_MODE:
4949                 case MEMARB_MODE:
4950                         /* We can't enable/disable these bits of the
4951                          * 5705/5750, just say success.
4952                          */
4953                         return 0;
4954
4955                 default:
4956                         break;
4957                 };
4958         }
4959
4960         val = tr32(ofs);
4961         val &= ~enable_bit;
4962         tw32_f(ofs, val);
4963
4964         for (i = 0; i < MAX_WAIT_CNT; i++) {
4965                 udelay(100);
4966                 val = tr32(ofs);
4967                 if ((val & enable_bit) == 0)
4968                         break;
4969         }
4970
4971         if (i == MAX_WAIT_CNT && !silent) {
4972                 printk(KERN_ERR PFX "tg3_stop_block timed out, "
4973                        "ofs=%lx enable_bit=%x\n",
4974                        ofs, enable_bit);
4975                 return -ENODEV;
4976         }
4977
4978         return 0;
4979 }
4980
4981 /* tp->lock is held. */
4982 static int tg3_abort_hw(struct tg3 *tp, int silent)
4983 {
4984         int i, err;
4985
4986         tg3_disable_ints(tp);
4987
4988         tp->rx_mode &= ~RX_MODE_ENABLE;
4989         tw32_f(MAC_RX_MODE, tp->rx_mode);
4990         udelay(10);
4991
4992         err  = tg3_stop_block(tp, RCVBDI_MODE, RCVBDI_MODE_ENABLE, silent);
4993         err |= tg3_stop_block(tp, RCVLPC_MODE, RCVLPC_MODE_ENABLE, silent);
4994         err |= tg3_stop_block(tp, RCVLSC_MODE, RCVLSC_MODE_ENABLE, silent);
4995         err |= tg3_stop_block(tp, RCVDBDI_MODE, RCVDBDI_MODE_ENABLE, silent);
4996         err |= tg3_stop_block(tp, RCVDCC_MODE, RCVDCC_MODE_ENABLE, silent);
4997         err |= tg3_stop_block(tp, RCVCC_MODE, RCVCC_MODE_ENABLE, silent);
4998
4999         err |= tg3_stop_block(tp, SNDBDS_MODE, SNDBDS_MODE_ENABLE, silent);
5000         err |= tg3_stop_block(tp, SNDBDI_MODE, SNDBDI_MODE_ENABLE, silent);
5001         err |= tg3_stop_block(tp, SNDDATAI_MODE, SNDDATAI_MODE_ENABLE, silent);
5002         err |= tg3_stop_block(tp, RDMAC_MODE, RDMAC_MODE_ENABLE, silent);
5003         err |= tg3_stop_block(tp, SNDDATAC_MODE, SNDDATAC_MODE_ENABLE, silent);
5004         err |= tg3_stop_block(tp, DMAC_MODE, DMAC_MODE_ENABLE, silent);
5005         err |= tg3_stop_block(tp, SNDBDC_MODE, SNDBDC_MODE_ENABLE, silent);
5006
5007         tp->mac_mode &= ~MAC_MODE_TDE_ENABLE;
5008         tw32_f(MAC_MODE, tp->mac_mode);
5009         udelay(40);
5010
5011         tp->tx_mode &= ~TX_MODE_ENABLE;
5012         tw32_f(MAC_TX_MODE, tp->tx_mode);
5013
5014         for (i = 0; i < MAX_WAIT_CNT; i++) {
5015                 udelay(100);
5016                 if (!(tr32(MAC_TX_MODE) & TX_MODE_ENABLE))
5017                         break;
5018         }
5019         if (i >= MAX_WAIT_CNT) {
5020                 printk(KERN_ERR PFX "tg3_abort_hw timed out for %s, "
5021                        "TX_MODE_ENABLE will not clear MAC_TX_MODE=%08x\n",
5022                        tp->dev->name, tr32(MAC_TX_MODE));
5023                 err |= -ENODEV;
5024         }
5025
5026         err |= tg3_stop_block(tp, HOSTCC_MODE, HOSTCC_MODE_ENABLE, silent);
5027         err |= tg3_stop_block(tp, WDMAC_MODE, WDMAC_MODE_ENABLE, silent);
5028         err |= tg3_stop_block(tp, MBFREE_MODE, MBFREE_MODE_ENABLE, silent);
5029
5030         tw32(FTQ_RESET, 0xffffffff);
5031         tw32(FTQ_RESET, 0x00000000);
5032
5033         err |= tg3_stop_block(tp, BUFMGR_MODE, BUFMGR_MODE_ENABLE, silent);
5034         err |= tg3_stop_block(tp, MEMARB_MODE, MEMARB_MODE_ENABLE, silent);
5035
5036         if (tp->hw_status)
5037                 memset(tp->hw_status, 0, TG3_HW_STATUS_SIZE);
5038         if (tp->hw_stats)
5039                 memset(tp->hw_stats, 0, sizeof(struct tg3_hw_stats));
5040
5041         return err;
5042 }
5043
5044 /* tp->lock is held. */
5045 static int tg3_nvram_lock(struct tg3 *tp)
5046 {
5047         if (tp->tg3_flags & TG3_FLAG_NVRAM) {
5048                 int i;
5049
5050                 if (tp->nvram_lock_cnt == 0) {
5051                         tw32(NVRAM_SWARB, SWARB_REQ_SET1);
5052                         for (i = 0; i < 8000; i++) {
5053                                 if (tr32(NVRAM_SWARB) & SWARB_GNT1)
5054                                         break;
5055                                 udelay(20);
5056                         }
5057                         if (i == 8000) {
5058                                 tw32(NVRAM_SWARB, SWARB_REQ_CLR1);
5059                                 return -ENODEV;
5060                         }
5061                 }
5062                 tp->nvram_lock_cnt++;
5063         }
5064         return 0;
5065 }
5066
5067 /* tp->lock is held. */
5068 static void tg3_nvram_unlock(struct tg3 *tp)
5069 {
5070         if (tp->tg3_flags & TG3_FLAG_NVRAM) {
5071                 if (tp->nvram_lock_cnt > 0)
5072                         tp->nvram_lock_cnt--;
5073                 if (tp->nvram_lock_cnt == 0)
5074                         tw32_f(NVRAM_SWARB, SWARB_REQ_CLR1);
5075         }
5076 }
5077
5078 /* tp->lock is held. */
5079 static void tg3_enable_nvram_access(struct tg3 *tp)
5080 {
5081         if ((tp->tg3_flags2 & TG3_FLG2_5750_PLUS) &&
5082             !(tp->tg3_flags2 & TG3_FLG2_PROTECTED_NVRAM)) {
5083                 u32 nvaccess = tr32(NVRAM_ACCESS);
5084
5085                 tw32(NVRAM_ACCESS, nvaccess | ACCESS_ENABLE);
5086         }
5087 }
5088
5089 /* tp->lock is held. */
5090 static void tg3_disable_nvram_access(struct tg3 *tp)
5091 {
5092         if ((tp->tg3_flags2 & TG3_FLG2_5750_PLUS) &&
5093             !(tp->tg3_flags2 & TG3_FLG2_PROTECTED_NVRAM)) {
5094                 u32 nvaccess = tr32(NVRAM_ACCESS);
5095
5096                 tw32(NVRAM_ACCESS, nvaccess & ~ACCESS_ENABLE);
5097         }
5098 }
5099
5100 static void tg3_ape_send_event(struct tg3 *tp, u32 event)
5101 {
5102         int i;
5103         u32 apedata;
5104
5105         apedata = tg3_ape_read32(tp, TG3_APE_SEG_SIG);
5106         if (apedata != APE_SEG_SIG_MAGIC)
5107                 return;
5108
5109         apedata = tg3_ape_read32(tp, TG3_APE_FW_STATUS);
5110         if (apedata != APE_FW_STATUS_READY)
5111                 return;
5112
5113         /* Wait for up to 1 millisecond for APE to service previous event. */
5114         for (i = 0; i < 10; i++) {
5115                 if (tg3_ape_lock(tp, TG3_APE_LOCK_MEM))
5116                         return;
5117
5118                 apedata = tg3_ape_read32(tp, TG3_APE_EVENT_STATUS);
5119
5120                 if (!(apedata & APE_EVENT_STATUS_EVENT_PENDING))
5121                         tg3_ape_write32(tp, TG3_APE_EVENT_STATUS,
5122                                         event | APE_EVENT_STATUS_EVENT_PENDING);
5123
5124                 tg3_ape_unlock(tp, TG3_APE_LOCK_MEM);
5125
5126                 if (!(apedata & APE_EVENT_STATUS_EVENT_PENDING))
5127                         break;
5128
5129                 udelay(100);
5130         }
5131
5132         if (!(apedata & APE_EVENT_STATUS_EVENT_PENDING))
5133                 tg3_ape_write32(tp, TG3_APE_EVENT, APE_EVENT_1);
5134 }
5135
5136 static void tg3_ape_driver_state_change(struct tg3 *tp, int kind)
5137 {
5138         u32 event;
5139         u32 apedata;
5140
5141         if (!(tp->tg3_flags3 & TG3_FLG3_ENABLE_APE))
5142                 return;
5143
5144         switch (kind) {
5145                 case RESET_KIND_INIT:
5146                         tg3_ape_write32(tp, TG3_APE_HOST_SEG_SIG,
5147                                         APE_HOST_SEG_SIG_MAGIC);
5148                         tg3_ape_write32(tp, TG3_APE_HOST_SEG_LEN,
5149                                         APE_HOST_SEG_LEN_MAGIC);
5150                         apedata = tg3_ape_read32(tp, TG3_APE_HOST_INIT_COUNT);
5151                         tg3_ape_write32(tp, TG3_APE_HOST_INIT_COUNT, ++apedata);
5152                         tg3_ape_write32(tp, TG3_APE_HOST_DRIVER_ID,
5153                                         APE_HOST_DRIVER_ID_MAGIC);
5154                         tg3_ape_write32(tp, TG3_APE_HOST_BEHAVIOR,
5155                                         APE_HOST_BEHAV_NO_PHYLOCK);
5156
5157                         event = APE_EVENT_STATUS_STATE_START;
5158                         break;
5159                 case RESET_KIND_SHUTDOWN:
5160                         event = APE_EVENT_STATUS_STATE_UNLOAD;
5161                         break;
5162                 case RESET_KIND_SUSPEND:
5163                         event = APE_EVENT_STATUS_STATE_SUSPEND;
5164                         break;
5165                 default:
5166                         return;
5167         }
5168
5169         event |= APE_EVENT_STATUS_DRIVER_EVNT | APE_EVENT_STATUS_STATE_CHNGE;
5170
5171         tg3_ape_send_event(tp, event);
5172 }
5173
5174 /* tp->lock is held. */
5175 static void tg3_write_sig_pre_reset(struct tg3 *tp, int kind)
5176 {
5177         tg3_write_mem(tp, NIC_SRAM_FIRMWARE_MBOX,
5178                       NIC_SRAM_FIRMWARE_MBOX_MAGIC1);
5179
5180         if (tp->tg3_flags2 & TG3_FLG2_ASF_NEW_HANDSHAKE) {
5181                 switch (kind) {
5182                 case RESET_KIND_INIT:
5183                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
5184                                       DRV_STATE_START);
5185                         break;
5186
5187                 case RESET_KIND_SHUTDOWN:
5188                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
5189                                       DRV_STATE_UNLOAD);
5190                         break;
5191
5192                 case RESET_KIND_SUSPEND:
5193                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
5194                                       DRV_STATE_SUSPEND);
5195                         break;
5196
5197                 default:
5198                         break;
5199                 };
5200         }
5201
5202         if (kind == RESET_KIND_INIT ||
5203             kind == RESET_KIND_SUSPEND)
5204                 tg3_ape_driver_state_change(tp, kind);
5205 }
5206
5207 /* tp->lock is held. */
5208 static void tg3_write_sig_post_reset(struct tg3 *tp, int kind)
5209 {
5210         if (tp->tg3_flags2 & TG3_FLG2_ASF_NEW_HANDSHAKE) {
5211                 switch (kind) {
5212                 case RESET_KIND_INIT:
5213                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
5214                                       DRV_STATE_START_DONE);
5215                         break;
5216
5217                 case RESET_KIND_SHUTDOWN:
5218                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
5219                                       DRV_STATE_UNLOAD_DONE);
5220                         break;
5221
5222                 default:
5223                         break;
5224                 };
5225         }
5226
5227         if (kind == RESET_KIND_SHUTDOWN)
5228                 tg3_ape_driver_state_change(tp, kind);
5229 }
5230
5231 /* tp->lock is held. */
5232 static void tg3_write_sig_legacy(struct tg3 *tp, int kind)
5233 {
5234         if (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) {
5235                 switch (kind) {
5236                 case RESET_KIND_INIT:
5237                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
5238                                       DRV_STATE_START);
5239                         break;
5240
5241                 case RESET_KIND_SHUTDOWN:
5242                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
5243                                       DRV_STATE_UNLOAD);
5244                         break;
5245
5246                 case RESET_KIND_SUSPEND:
5247                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
5248                                       DRV_STATE_SUSPEND);
5249                         break;
5250
5251                 default:
5252                         break;
5253                 };
5254         }
5255 }
5256
5257 static int tg3_poll_fw(struct tg3 *tp)
5258 {
5259         int i;
5260         u32 val;
5261
5262         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
5263                 /* Wait up to 20ms for init done. */
5264                 for (i = 0; i < 200; i++) {
5265                         if (tr32(VCPU_STATUS) & VCPU_STATUS_INIT_DONE)
5266                                 return 0;
5267                         udelay(100);
5268                 }
5269                 return -ENODEV;
5270         }
5271
5272         /* Wait for firmware initialization to complete. */
5273         for (i = 0; i < 100000; i++) {
5274                 tg3_read_mem(tp, NIC_SRAM_FIRMWARE_MBOX, &val);
5275                 if (val == ~NIC_SRAM_FIRMWARE_MBOX_MAGIC1)
5276                         break;
5277                 udelay(10);
5278         }
5279
5280         /* Chip might not be fitted with firmware.  Some Sun onboard
5281          * parts are configured like that.  So don't signal the timeout
5282          * of the above loop as an error, but do report the lack of
5283          * running firmware once.
5284          */
5285         if (i >= 100000 &&
5286             !(tp->tg3_flags2 & TG3_FLG2_NO_FWARE_REPORTED)) {
5287                 tp->tg3_flags2 |= TG3_FLG2_NO_FWARE_REPORTED;
5288
5289                 printk(KERN_INFO PFX "%s: No firmware running.\n",
5290                        tp->dev->name);
5291         }
5292
5293         return 0;
5294 }
5295
5296 /* Save PCI command register before chip reset */
5297 static void tg3_save_pci_state(struct tg3 *tp)
5298 {
5299         pci_read_config_word(tp->pdev, PCI_COMMAND, &tp->pci_cmd);
5300 }
5301
5302 /* Restore PCI state after chip reset */
5303 static void tg3_restore_pci_state(struct tg3 *tp)
5304 {
5305         u32 val;
5306
5307         /* Re-enable indirect register accesses. */
5308         pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
5309                                tp->misc_host_ctrl);
5310
5311         /* Set MAX PCI retry to zero. */
5312         val = (PCISTATE_ROM_ENABLE | PCISTATE_ROM_RETRY_ENABLE);
5313         if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0 &&
5314             (tp->tg3_flags & TG3_FLAG_PCIX_MODE))
5315                 val |= PCISTATE_RETRY_SAME_DMA;
5316         /* Allow reads and writes to the APE register and memory space. */
5317         if (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE)
5318                 val |= PCISTATE_ALLOW_APE_CTLSPC_WR |
5319                        PCISTATE_ALLOW_APE_SHMEM_WR;
5320         pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE, val);
5321
5322         pci_write_config_word(tp->pdev, PCI_COMMAND, tp->pci_cmd);
5323
5324         if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS)
5325                 pcie_set_readrq(tp->pdev, 4096);
5326         else {
5327                 pci_write_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE,
5328                                       tp->pci_cacheline_sz);
5329                 pci_write_config_byte(tp->pdev, PCI_LATENCY_TIMER,
5330                                       tp->pci_lat_timer);
5331         }
5332
5333         /* Make sure PCI-X relaxed ordering bit is clear. */
5334         if (tp->pcix_cap) {
5335                 u16 pcix_cmd;
5336
5337                 pci_read_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
5338                                      &pcix_cmd);
5339                 pcix_cmd &= ~PCI_X_CMD_ERO;
5340                 pci_write_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
5341                                       pcix_cmd);
5342         }
5343
5344         if (tp->tg3_flags2 & TG3_FLG2_5780_CLASS) {
5345
5346                 /* Chip reset on 5780 will reset MSI enable bit,
5347                  * so need to restore it.
5348                  */
5349                 if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
5350                         u16 ctrl;
5351
5352                         pci_read_config_word(tp->pdev,
5353                                              tp->msi_cap + PCI_MSI_FLAGS,
5354                                              &ctrl);
5355                         pci_write_config_word(tp->pdev,
5356                                               tp->msi_cap + PCI_MSI_FLAGS,
5357                                               ctrl | PCI_MSI_FLAGS_ENABLE);
5358                         val = tr32(MSGINT_MODE);
5359                         tw32(MSGINT_MODE, val | MSGINT_MODE_ENABLE);
5360                 }
5361         }
5362 }
5363
5364 static void tg3_stop_fw(struct tg3 *);
5365
5366 /* tp->lock is held. */
5367 static int tg3_chip_reset(struct tg3 *tp)
5368 {
5369         u32 val;
5370         void (*write_op)(struct tg3 *, u32, u32);
5371         int err;
5372
5373         tg3_nvram_lock(tp);
5374
5375         /* No matching tg3_nvram_unlock() after this because
5376          * chip reset below will undo the nvram lock.
5377          */
5378         tp->nvram_lock_cnt = 0;
5379
5380         /* GRC_MISC_CFG core clock reset will clear the memory
5381          * enable bit in PCI register 4 and the MSI enable bit
5382          * on some chips, so we save relevant registers here.
5383          */
5384         tg3_save_pci_state(tp);
5385
5386         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752 ||
5387             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
5388             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
5389             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
5390             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
5391                 tw32(GRC_FASTBOOT_PC, 0);
5392
5393         /*
5394          * We must avoid the readl() that normally takes place.
5395          * It locks machines, causes machine checks, and other
5396          * fun things.  So, temporarily disable the 5701
5397          * hardware workaround, while we do the reset.
5398          */
5399         write_op = tp->write32;
5400         if (write_op == tg3_write_flush_reg32)
5401                 tp->write32 = tg3_write32;
5402
5403         /* Prevent the irq handler from reading or writing PCI registers
5404          * during chip reset when the memory enable bit in the PCI command
5405          * register may be cleared.  The chip does not generate interrupt
5406          * at this time, but the irq handler may still be called due to irq
5407          * sharing or irqpoll.
5408          */
5409         tp->tg3_flags |= TG3_FLAG_CHIP_RESETTING;
5410         if (tp->hw_status) {
5411                 tp->hw_status->status = 0;
5412                 tp->hw_status->status_tag = 0;
5413         }
5414         tp->last_tag = 0;
5415         smp_mb();
5416         synchronize_irq(tp->pdev->irq);
5417
5418         /* do the reset */
5419         val = GRC_MISC_CFG_CORECLK_RESET;
5420
5421         if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) {
5422                 if (tr32(0x7e2c) == 0x60) {
5423                         tw32(0x7e2c, 0x20);
5424                 }
5425                 if (tp->pci_chip_rev_id != CHIPREV_ID_5750_A0) {
5426                         tw32(GRC_MISC_CFG, (1 << 29));
5427                         val |= (1 << 29);
5428                 }
5429         }
5430
5431         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
5432                 tw32(VCPU_STATUS, tr32(VCPU_STATUS) | VCPU_STATUS_DRV_RESET);
5433                 tw32(GRC_VCPU_EXT_CTRL,
5434                      tr32(GRC_VCPU_EXT_CTRL) & ~GRC_VCPU_EXT_CTRL_HALT_CPU);
5435         }
5436
5437         if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS)
5438                 val |= GRC_MISC_CFG_KEEP_GPHY_POWER;
5439         tw32(GRC_MISC_CFG, val);
5440
5441         /* restore 5701 hardware bug workaround write method */
5442         tp->write32 = write_op;
5443
5444         /* Unfortunately, we have to delay before the PCI read back.
5445          * Some 575X chips even will not respond to a PCI cfg access
5446          * when the reset command is given to the chip.
5447          *
5448          * How do these hardware designers expect things to work
5449          * properly if the PCI write is posted for a long period
5450          * of time?  It is always necessary to have some method by
5451          * which a register read back can occur to push the write
5452          * out which does the reset.
5453          *
5454          * For most tg3 variants the trick below was working.
5455          * Ho hum...
5456          */
5457         udelay(120);
5458
5459         /* Flush PCI posted writes.  The normal MMIO registers
5460          * are inaccessible at this time so this is the only
5461          * way to make this reliably (actually, this is no longer
5462          * the case, see above).  I tried to use indirect
5463          * register read/write but this upset some 5701 variants.
5464          */
5465         pci_read_config_dword(tp->pdev, PCI_COMMAND, &val);
5466
5467         udelay(120);
5468
5469         if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) {
5470                 if (tp->pci_chip_rev_id == CHIPREV_ID_5750_A0) {
5471                         int i;
5472                         u32 cfg_val;
5473
5474                         /* Wait for link training to complete.  */
5475                         for (i = 0; i < 5000; i++)
5476                                 udelay(100);
5477
5478                         pci_read_config_dword(tp->pdev, 0xc4, &cfg_val);
5479                         pci_write_config_dword(tp->pdev, 0xc4,
5480                                                cfg_val | (1 << 15));
5481                 }
5482                 /* Set PCIE max payload size and clear error status.  */
5483                 pci_write_config_dword(tp->pdev, 0xd8, 0xf5000);
5484         }
5485
5486         tg3_restore_pci_state(tp);
5487
5488         tp->tg3_flags &= ~TG3_FLAG_CHIP_RESETTING;
5489
5490         val = 0;
5491         if (tp->tg3_flags2 & TG3_FLG2_5780_CLASS)
5492                 val = tr32(MEMARB_MODE);
5493         tw32(MEMARB_MODE, val | MEMARB_MODE_ENABLE);
5494
5495         if (tp->pci_chip_rev_id == CHIPREV_ID_5750_A3) {
5496                 tg3_stop_fw(tp);
5497                 tw32(0x5000, 0x400);
5498         }
5499
5500         tw32(GRC_MODE, tp->grc_mode);
5501
5502         if (tp->pci_chip_rev_id == CHIPREV_ID_5705_A0) {
5503                 val = tr32(0xc4);
5504
5505                 tw32(0xc4, val | (1 << 15));
5506         }
5507
5508         if ((tp->nic_sram_data_cfg & NIC_SRAM_DATA_CFG_MINI_PCI) != 0 &&
5509             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
5510                 tp->pci_clock_ctrl |= CLOCK_CTRL_CLKRUN_OENABLE;
5511                 if (tp->pci_chip_rev_id == CHIPREV_ID_5705_A0)
5512                         tp->pci_clock_ctrl |= CLOCK_CTRL_FORCE_CLKRUN;
5513                 tw32(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl);
5514         }
5515
5516         if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) {
5517                 tp->mac_mode = MAC_MODE_PORT_MODE_TBI;
5518                 tw32_f(MAC_MODE, tp->mac_mode);
5519         } else if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES) {
5520                 tp->mac_mode = MAC_MODE_PORT_MODE_GMII;
5521                 tw32_f(MAC_MODE, tp->mac_mode);
5522         } else
5523                 tw32_f(MAC_MODE, 0);
5524         udelay(40);
5525
5526         err = tg3_poll_fw(tp);
5527         if (err)
5528                 return err;
5529
5530         if ((tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) &&
5531             tp->pci_chip_rev_id != CHIPREV_ID_5750_A0) {
5532                 val = tr32(0x7c00);
5533
5534                 tw32(0x7c00, val | (1 << 25));
5535         }
5536
5537         /* Reprobe ASF enable state.  */
5538         tp->tg3_flags &= ~TG3_FLAG_ENABLE_ASF;
5539         tp->tg3_flags2 &= ~TG3_FLG2_ASF_NEW_HANDSHAKE;
5540         tg3_read_mem(tp, NIC_SRAM_DATA_SIG, &val);
5541         if (val == NIC_SRAM_DATA_SIG_MAGIC) {
5542                 u32 nic_cfg;
5543
5544                 tg3_read_mem(tp, NIC_SRAM_DATA_CFG, &nic_cfg);
5545                 if (nic_cfg & NIC_SRAM_DATA_CFG_ASF_ENABLE) {
5546                         tp->tg3_flags |= TG3_FLAG_ENABLE_ASF;
5547                         if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS)
5548                                 tp->tg3_flags2 |= TG3_FLG2_ASF_NEW_HANDSHAKE;
5549                 }
5550         }
5551
5552         return 0;
5553 }
5554
5555 /* tp->lock is held. */
5556 static void tg3_stop_fw(struct tg3 *tp)
5557 {
5558         if ((tp->tg3_flags & TG3_FLAG_ENABLE_ASF) &&
5559            !(tp->tg3_flags3 & TG3_FLG3_ENABLE_APE)) {
5560                 u32 val;
5561
5562                 /* Wait for RX cpu to ACK the previous event. */
5563                 tg3_wait_for_event_ack(tp);
5564
5565                 tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX, FWCMD_NICDRV_PAUSE_FW);
5566                 val = tr32(GRC_RX_CPU_EVENT);
5567                 val |= GRC_RX_CPU_DRIVER_EVENT;
5568                 tw32(GRC_RX_CPU_EVENT, val);
5569
5570                 /* Wait for RX cpu to ACK this event. */
5571                 tg3_wait_for_event_ack(tp);
5572         }
5573 }
5574
5575 /* tp->lock is held. */
5576 static int tg3_halt(struct tg3 *tp, int kind, int silent)
5577 {
5578         int err;
5579
5580         tg3_stop_fw(tp);
5581
5582         tg3_write_sig_pre_reset(tp, kind);
5583
5584         tg3_abort_hw(tp, silent);
5585         err = tg3_chip_reset(tp);
5586
5587         tg3_write_sig_legacy(tp, kind);
5588         tg3_write_sig_post_reset(tp, kind);
5589
5590         if (err)
5591                 return err;
5592
5593         return 0;
5594 }
5595
5596 #define TG3_FW_RELEASE_MAJOR    0x0
5597 #define TG3_FW_RELASE_MINOR     0x0
5598 #define TG3_FW_RELEASE_FIX      0x0
5599 #define TG3_FW_START_ADDR       0x08000000
5600 #define TG3_FW_TEXT_ADDR        0x08000000
5601 #define TG3_FW_TEXT_LEN         0x9c0
5602 #define TG3_FW_RODATA_ADDR      0x080009c0
5603 #define TG3_FW_RODATA_LEN       0x60
5604 #define TG3_FW_DATA_ADDR        0x08000a40
5605 #define TG3_FW_DATA_LEN         0x20
5606 #define TG3_FW_SBSS_ADDR        0x08000a60
5607 #define TG3_FW_SBSS_LEN         0xc
5608 #define TG3_FW_BSS_ADDR         0x08000a70
5609 #define TG3_FW_BSS_LEN          0x10
5610
5611 static const u32 tg3FwText[(TG3_FW_TEXT_LEN / sizeof(u32)) + 1] = {
5612         0x00000000, 0x10000003, 0x00000000, 0x0000000d, 0x0000000d, 0x3c1d0800,
5613         0x37bd3ffc, 0x03a0f021, 0x3c100800, 0x26100000, 0x0e000018, 0x00000000,
5614         0x0000000d, 0x3c1d0800, 0x37bd3ffc, 0x03a0f021, 0x3c100800, 0x26100034,
5615         0x0e00021c, 0x00000000, 0x0000000d, 0x00000000, 0x00000000, 0x00000000,
5616         0x27bdffe0, 0x3c1cc000, 0xafbf0018, 0xaf80680c, 0x0e00004c, 0x241b2105,
5617         0x97850000, 0x97870002, 0x9782002c, 0x9783002e, 0x3c040800, 0x248409c0,
5618         0xafa00014, 0x00021400, 0x00621825, 0x00052c00, 0xafa30010, 0x8f860010,
5619         0x00e52825, 0x0e000060, 0x24070102, 0x3c02ac00, 0x34420100, 0x3c03ac01,
5620         0x34630100, 0xaf820490, 0x3c02ffff, 0xaf820494, 0xaf830498, 0xaf82049c,
5621         0x24020001, 0xaf825ce0, 0x0e00003f, 0xaf825d00, 0x0e000140, 0x00000000,
5622         0x8fbf0018, 0x03e00008, 0x27bd0020, 0x2402ffff, 0xaf825404, 0x8f835400,
5623         0x34630400, 0xaf835400, 0xaf825404, 0x3c020800, 0x24420034, 0xaf82541c,
5624         0x03e00008, 0xaf805400, 0x00000000, 0x00000000, 0x3c020800, 0x34423000,
5625         0x3c030800, 0x34633000, 0x3c040800, 0x348437ff, 0x3c010800, 0xac220a64,
5626         0x24020040, 0x3c010800, 0xac220a68, 0x3c010800, 0xac200a60, 0xac600000,
5627         0x24630004, 0x0083102b, 0x5040fffd, 0xac600000, 0x03e00008, 0x00000000,
5628         0x00804821, 0x8faa0010, 0x3c020800, 0x8c420a60, 0x3c040800, 0x8c840a68,
5629         0x8fab0014, 0x24430001, 0x0044102b, 0x3c010800, 0xac230a60, 0x14400003,
5630         0x00004021, 0x3c010800, 0xac200a60, 0x3c020800, 0x8c420a60, 0x3c030800,
5631         0x8c630a64, 0x91240000, 0x00021140, 0x00431021, 0x00481021, 0x25080001,
5632         0xa0440000, 0x29020008, 0x1440fff4, 0x25290001, 0x3c020800, 0x8c420a60,
5633         0x3c030800, 0x8c630a64, 0x8f84680c, 0x00021140, 0x00431021, 0xac440008,
5634         0xac45000c, 0xac460010, 0xac470014, 0xac4a0018, 0x03e00008, 0xac4b001c,
5635         0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
5636         0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
5637         0, 0, 0, 0, 0, 0,
5638         0x02000008, 0x00000000, 0x0a0001e3, 0x3c0a0001, 0x0a0001e3, 0x3c0a0002,
5639         0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000,
5640         0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000,
5641         0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000,
5642         0x0a0001e3, 0x3c0a0007, 0x0a0001e3, 0x3c0a0008, 0x0a0001e3, 0x3c0a0009,
5643         0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x3c0a000b,
5644         0x0a0001e3, 0x3c0a000c, 0x0a0001e3, 0x3c0a000d, 0x0a0001e3, 0x00000000,
5645         0x0a0001e3, 0x00000000, 0x0a0001e3, 0x3c0a000e, 0x0a0001e3, 0x00000000,
5646         0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000,
5647         0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000,
5648         0x0a0001e3, 0x00000000, 0x0a0001e3, 0x3c0a0013, 0x0a0001e3, 0x3c0a0014,
5649         0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
5650         0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
5651         0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
5652         0x27bdffe0, 0x00001821, 0x00001021, 0xafbf0018, 0xafb10014, 0xafb00010,
5653         0x3c010800, 0x00220821, 0xac200a70, 0x3c010800, 0x00220821, 0xac200a74,
5654         0x3c010800, 0x00220821, 0xac200a78, 0x24630001, 0x1860fff5, 0x2442000c,
5655         0x24110001, 0x8f906810, 0x32020004, 0x14400005, 0x24040001, 0x3c020800,
5656         0x8c420a78, 0x18400003, 0x00002021, 0x0e000182, 0x00000000, 0x32020001,
5657         0x10400003, 0x00000000, 0x0e000169, 0x00000000, 0x0a000153, 0xaf915028,
5658         0x8fbf0018, 0x8fb10014, 0x8fb00010, 0x03e00008, 0x27bd0020, 0x3c050800,
5659         0x8ca50a70, 0x3c060800, 0x8cc60a80, 0x3c070800, 0x8ce70a78, 0x27bdffe0,
5660         0x3c040800, 0x248409d0, 0xafbf0018, 0xafa00010, 0x0e000060, 0xafa00014,
5661         0x0e00017b, 0x00002021, 0x8fbf0018, 0x03e00008, 0x27bd0020, 0x24020001,
5662         0x8f836810, 0x00821004, 0x00021027, 0x00621824, 0x03e00008, 0xaf836810,
5663         0x27bdffd8, 0xafbf0024, 0x1080002e, 0xafb00020, 0x8f825cec, 0xafa20018,
5664         0x8f825cec, 0x3c100800, 0x26100a78, 0xafa2001c, 0x34028000, 0xaf825cec,
5665         0x8e020000, 0x18400016, 0x00000000, 0x3c020800, 0x94420a74, 0x8fa3001c,
5666         0x000221c0, 0xac830004, 0x8fa2001c, 0x3c010800, 0x0e000201, 0xac220a74,
5667         0x10400005, 0x00000000, 0x8e020000, 0x24420001, 0x0a0001df, 0xae020000,
5668         0x3c020800, 0x8c420a70, 0x00021c02, 0x000321c0, 0x0a0001c5, 0xafa2001c,
5669         0x0e000201, 0x00000000, 0x1040001f, 0x00000000, 0x8e020000, 0x8fa3001c,
5670         0x24420001, 0x3c010800, 0xac230a70, 0x3c010800, 0xac230a74, 0x0a0001df,
5671         0xae020000, 0x3c100800, 0x26100a78, 0x8e020000, 0x18400028, 0x00000000,
5672         0x0e000201, 0x00000000, 0x14400024, 0x00000000, 0x8e020000, 0x3c030800,
5673         0x8c630a70, 0x2442ffff, 0xafa3001c, 0x18400006, 0xae020000, 0x00031402,
5674         0x000221c0, 0x8c820004, 0x3c010800, 0xac220a70, 0x97a2001e, 0x2442ff00,
5675         0x2c420300, 0x1440000b, 0x24024000, 0x3c040800, 0x248409dc, 0xafa00010,
5676         0xafa00014, 0x8fa6001c, 0x24050008, 0x0e000060, 0x00003821, 0x0a0001df,
5677         0x00000000, 0xaf825cf8, 0x3c020800, 0x8c420a40, 0x8fa3001c, 0x24420001,
5678         0xaf835cf8, 0x3c010800, 0xac220a40, 0x8fbf0024, 0x8fb00020, 0x03e00008,
5679         0x27bd0028, 0x27bdffe0, 0x3c040800, 0x248409e8, 0x00002821, 0x00003021,
5680         0x00003821, 0xafbf0018, 0xafa00010, 0x0e000060, 0xafa00014, 0x8fbf0018,
5681         0x03e00008, 0x27bd0020, 0x8f82680c, 0x8f85680c, 0x00021827, 0x0003182b,
5682         0x00031823, 0x00431024, 0x00441021, 0x00a2282b, 0x10a00006, 0x00000000,
5683         0x00401821, 0x8f82680c, 0x0043102b, 0x1440fffd, 0x00000000, 0x03e00008,
5684         0x00000000, 0x3c040800, 0x8c840000, 0x3c030800, 0x8c630a40, 0x0064102b,
5685         0x54400002, 0x00831023, 0x00641023, 0x2c420008, 0x03e00008, 0x38420001,
5686         0x27bdffe0, 0x00802821, 0x3c040800, 0x24840a00, 0x00003021, 0x00003821,
5687         0xafbf0018, 0xafa00010, 0x0e000060, 0xafa00014, 0x0a000216, 0x00000000,
5688         0x8fbf0018, 0x03e00008, 0x27bd0020, 0x00000000, 0x27bdffe0, 0x3c1cc000,
5689         0xafbf0018, 0x0e00004c, 0xaf80680c, 0x3c040800, 0x24840a10, 0x03802821,
5690         0x00003021, 0x00003821, 0xafa00010, 0x0e000060, 0xafa00014, 0x2402ffff,
5691         0xaf825404, 0x3c0200aa, 0x0e000234, 0xaf825434, 0x8fbf0018, 0x03e00008,
5692         0x27bd0020, 0x00000000, 0x00000000, 0x00000000, 0x27bdffe8, 0xafb00010,
5693         0x24100001, 0xafbf0014, 0x3c01c003, 0xac200000, 0x8f826810, 0x30422000,
5694         0x10400003, 0x00000000, 0x0e000246, 0x00000000, 0x0a00023a, 0xaf905428,
5695         0x8fbf0014, 0x8fb00010, 0x03e00008, 0x27bd0018, 0x27bdfff8, 0x8f845d0c,
5696         0x3c0200ff, 0x3c030800, 0x8c630a50, 0x3442fff8, 0x00821024, 0x1043001e,
5697         0x3c0500ff, 0x34a5fff8, 0x3c06c003, 0x3c074000, 0x00851824, 0x8c620010,
5698         0x3c010800, 0xac230a50, 0x30420008, 0x10400005, 0x00871025, 0x8cc20000,
5699         0x24420001, 0xacc20000, 0x00871025, 0xaf825d0c, 0x8fa20000, 0x24420001,
5700         0xafa20000, 0x8fa20000, 0x8fa20000, 0x24420001, 0xafa20000, 0x8fa20000,
5701         0x8f845d0c, 0x3c030800, 0x8c630a50, 0x00851024, 0x1443ffe8, 0x00851824,
5702         0x27bd0008, 0x03e00008, 0x00000000, 0x00000000, 0x00000000
5703 };
5704
5705 static const u32 tg3FwRodata[(TG3_FW_RODATA_LEN / sizeof(u32)) + 1] = {
5706         0x35373031, 0x726c7341, 0x00000000, 0x00000000, 0x53774576, 0x656e7430,
5707         0x00000000, 0x726c7045, 0x76656e74, 0x31000000, 0x556e6b6e, 0x45766e74,
5708         0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x66617461, 0x6c457272,
5709         0x00000000, 0x00000000, 0x4d61696e, 0x43707542, 0x00000000, 0x00000000,
5710         0x00000000
5711 };
5712
5713 #if 0 /* All zeros, don't eat up space with it. */
5714 u32 tg3FwData[(TG3_FW_DATA_LEN / sizeof(u32)) + 1] = {
5715         0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
5716         0x00000000, 0x00000000, 0x00000000, 0x00000000
5717 };
5718 #endif
5719
5720 #define RX_CPU_SCRATCH_BASE     0x30000
5721 #define RX_CPU_SCRATCH_SIZE     0x04000
5722 #define TX_CPU_SCRATCH_BASE     0x34000
5723 #define TX_CPU_SCRATCH_SIZE     0x04000
5724
5725 /* tp->lock is held. */
5726 static int tg3_halt_cpu(struct tg3 *tp, u32 offset)
5727 {
5728         int i;
5729
5730         BUG_ON(offset == TX_CPU_BASE &&
5731             (tp->tg3_flags2 & TG3_FLG2_5705_PLUS));
5732
5733         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
5734                 u32 val = tr32(GRC_VCPU_EXT_CTRL);
5735
5736                 tw32(GRC_VCPU_EXT_CTRL, val | GRC_VCPU_EXT_CTRL_HALT_CPU);
5737                 return 0;
5738         }
5739         if (offset == RX_CPU_BASE) {
5740                 for (i = 0; i < 10000; i++) {
5741                         tw32(offset + CPU_STATE, 0xffffffff);
5742                         tw32(offset + CPU_MODE,  CPU_MODE_HALT);
5743                         if (tr32(offset + CPU_MODE) & CPU_MODE_HALT)
5744                                 break;
5745                 }
5746
5747                 tw32(offset + CPU_STATE, 0xffffffff);