[TG3]: Correct sw autoneg flow control advertisements
[linux-2.6.git] / drivers / net / tg3.c
1 /*
2  * tg3.c: Broadcom Tigon3 ethernet driver.
3  *
4  * Copyright (C) 2001, 2002, 2003, 2004 David S. Miller (davem@redhat.com)
5  * Copyright (C) 2001, 2002, 2003 Jeff Garzik (jgarzik@pobox.com)
6  * Copyright (C) 2004 Sun Microsystems Inc.
7  * Copyright (C) 2005-2007 Broadcom Corporation.
8  *
9  * Firmware is:
10  *      Derived from proprietary unpublished source code,
11  *      Copyright (C) 2000-2003 Broadcom Corporation.
12  *
13  *      Permission is hereby granted for the distribution of this firmware
14  *      data in hexadecimal or equivalent format, provided this copyright
15  *      notice is accompanying it.
16  */
17
18
19 #include <linux/module.h>
20 #include <linux/moduleparam.h>
21 #include <linux/kernel.h>
22 #include <linux/types.h>
23 #include <linux/compiler.h>
24 #include <linux/slab.h>
25 #include <linux/delay.h>
26 #include <linux/in.h>
27 #include <linux/init.h>
28 #include <linux/ioport.h>
29 #include <linux/pci.h>
30 #include <linux/netdevice.h>
31 #include <linux/etherdevice.h>
32 #include <linux/skbuff.h>
33 #include <linux/ethtool.h>
34 #include <linux/mii.h>
35 #include <linux/if_vlan.h>
36 #include <linux/ip.h>
37 #include <linux/tcp.h>
38 #include <linux/workqueue.h>
39 #include <linux/prefetch.h>
40 #include <linux/dma-mapping.h>
41
42 #include <net/checksum.h>
43 #include <net/ip.h>
44
45 #include <asm/system.h>
46 #include <asm/io.h>
47 #include <asm/byteorder.h>
48 #include <asm/uaccess.h>
49
50 #ifdef CONFIG_SPARC
51 #include <asm/idprom.h>
52 #include <asm/prom.h>
53 #endif
54
55 #if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE)
56 #define TG3_VLAN_TAG_USED 1
57 #else
58 #define TG3_VLAN_TAG_USED 0
59 #endif
60
61 #define TG3_TSO_SUPPORT 1
62
63 #include "tg3.h"
64
65 #define DRV_MODULE_NAME         "tg3"
66 #define PFX DRV_MODULE_NAME     ": "
67 #define DRV_MODULE_VERSION      "3.86"
68 #define DRV_MODULE_RELDATE      "November 9, 2007"
69
70 #define TG3_DEF_MAC_MODE        0
71 #define TG3_DEF_RX_MODE         0
72 #define TG3_DEF_TX_MODE         0
73 #define TG3_DEF_MSG_ENABLE        \
74         (NETIF_MSG_DRV          | \
75          NETIF_MSG_PROBE        | \
76          NETIF_MSG_LINK         | \
77          NETIF_MSG_TIMER        | \
78          NETIF_MSG_IFDOWN       | \
79          NETIF_MSG_IFUP         | \
80          NETIF_MSG_RX_ERR       | \
81          NETIF_MSG_TX_ERR)
82
83 /* length of time before we decide the hardware is borked,
84  * and dev->tx_timeout() should be called to fix the problem
85  */
86 #define TG3_TX_TIMEOUT                  (5 * HZ)
87
88 /* hardware minimum and maximum for a single frame's data payload */
89 #define TG3_MIN_MTU                     60
90 #define TG3_MAX_MTU(tp) \
91         ((tp->tg3_flags2 & TG3_FLG2_JUMBO_CAPABLE) ? 9000 : 1500)
92
93 /* These numbers seem to be hard coded in the NIC firmware somehow.
94  * You can't change the ring sizes, but you can change where you place
95  * them in the NIC onboard memory.
96  */
97 #define TG3_RX_RING_SIZE                512
98 #define TG3_DEF_RX_RING_PENDING         200
99 #define TG3_RX_JUMBO_RING_SIZE          256
100 #define TG3_DEF_RX_JUMBO_RING_PENDING   100
101
102 /* Do not place this n-ring entries value into the tp struct itself,
103  * we really want to expose these constants to GCC so that modulo et
104  * al.  operations are done with shifts and masks instead of with
105  * hw multiply/modulo instructions.  Another solution would be to
106  * replace things like '% foo' with '& (foo - 1)'.
107  */
108 #define TG3_RX_RCB_RING_SIZE(tp)        \
109         ((tp->tg3_flags2 & TG3_FLG2_5705_PLUS) ?  512 : 1024)
110
111 #define TG3_TX_RING_SIZE                512
112 #define TG3_DEF_TX_RING_PENDING         (TG3_TX_RING_SIZE - 1)
113
114 #define TG3_RX_RING_BYTES       (sizeof(struct tg3_rx_buffer_desc) * \
115                                  TG3_RX_RING_SIZE)
116 #define TG3_RX_JUMBO_RING_BYTES (sizeof(struct tg3_rx_buffer_desc) * \
117                                  TG3_RX_JUMBO_RING_SIZE)
118 #define TG3_RX_RCB_RING_BYTES(tp) (sizeof(struct tg3_rx_buffer_desc) * \
119                                    TG3_RX_RCB_RING_SIZE(tp))
120 #define TG3_TX_RING_BYTES       (sizeof(struct tg3_tx_buffer_desc) * \
121                                  TG3_TX_RING_SIZE)
122 #define NEXT_TX(N)              (((N) + 1) & (TG3_TX_RING_SIZE - 1))
123
124 #define RX_PKT_BUF_SZ           (1536 + tp->rx_offset + 64)
125 #define RX_JUMBO_PKT_BUF_SZ     (9046 + tp->rx_offset + 64)
126
127 /* minimum number of free TX descriptors required to wake up TX process */
128 #define TG3_TX_WAKEUP_THRESH(tp)                ((tp)->tx_pending / 4)
129
130 /* number of ETHTOOL_GSTATS u64's */
131 #define TG3_NUM_STATS           (sizeof(struct tg3_ethtool_stats)/sizeof(u64))
132
133 #define TG3_NUM_TEST            6
134
135 static char version[] __devinitdata =
136         DRV_MODULE_NAME ".c:v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
137
138 MODULE_AUTHOR("David S. Miller (davem@redhat.com) and Jeff Garzik (jgarzik@pobox.com)");
139 MODULE_DESCRIPTION("Broadcom Tigon3 ethernet driver");
140 MODULE_LICENSE("GPL");
141 MODULE_VERSION(DRV_MODULE_VERSION);
142
143 static int tg3_debug = -1;      /* -1 == use TG3_DEF_MSG_ENABLE as value */
144 module_param(tg3_debug, int, 0);
145 MODULE_PARM_DESC(tg3_debug, "Tigon3 bitmapped debugging message enable value");
146
147 static struct pci_device_id tg3_pci_tbl[] = {
148         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5700)},
149         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5701)},
150         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702)},
151         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703)},
152         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704)},
153         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702FE)},
154         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705)},
155         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705_2)},
156         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705M)},
157         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705M_2)},
158         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702X)},
159         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703X)},
160         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704S)},
161         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702A3)},
162         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703A3)},
163         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5782)},
164         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5788)},
165         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5789)},
166         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5901)},
167         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5901_2)},
168         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704S_2)},
169         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705F)},
170         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5720)},
171         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5721)},
172         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5722)},
173         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5750)},
174         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751)},
175         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5750M)},
176         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751M)},
177         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751F)},
178         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5752)},
179         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5752M)},
180         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753)},
181         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753M)},
182         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753F)},
183         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5754)},
184         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5754M)},
185         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5755)},
186         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5755M)},
187         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5756)},
188         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5786)},
189         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787)},
190         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787M)},
191         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787F)},
192         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5714)},
193         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5714S)},
194         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5715)},
195         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5715S)},
196         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5780)},
197         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5780S)},
198         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5781)},
199         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5906)},
200         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5906M)},
201         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5784)},
202         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5764)},
203         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5723)},
204         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5761)},
205         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5761E)},
206         {PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9DXX)},
207         {PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9MXX)},
208         {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1000)},
209         {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1001)},
210         {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1003)},
211         {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC9100)},
212         {PCI_DEVICE(PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_TIGON3)},
213         {}
214 };
215
216 MODULE_DEVICE_TABLE(pci, tg3_pci_tbl);
217
218 static const struct {
219         const char string[ETH_GSTRING_LEN];
220 } ethtool_stats_keys[TG3_NUM_STATS] = {
221         { "rx_octets" },
222         { "rx_fragments" },
223         { "rx_ucast_packets" },
224         { "rx_mcast_packets" },
225         { "rx_bcast_packets" },
226         { "rx_fcs_errors" },
227         { "rx_align_errors" },
228         { "rx_xon_pause_rcvd" },
229         { "rx_xoff_pause_rcvd" },
230         { "rx_mac_ctrl_rcvd" },
231         { "rx_xoff_entered" },
232         { "rx_frame_too_long_errors" },
233         { "rx_jabbers" },
234         { "rx_undersize_packets" },
235         { "rx_in_length_errors" },
236         { "rx_out_length_errors" },
237         { "rx_64_or_less_octet_packets" },
238         { "rx_65_to_127_octet_packets" },
239         { "rx_128_to_255_octet_packets" },
240         { "rx_256_to_511_octet_packets" },
241         { "rx_512_to_1023_octet_packets" },
242         { "rx_1024_to_1522_octet_packets" },
243         { "rx_1523_to_2047_octet_packets" },
244         { "rx_2048_to_4095_octet_packets" },
245         { "rx_4096_to_8191_octet_packets" },
246         { "rx_8192_to_9022_octet_packets" },
247
248         { "tx_octets" },
249         { "tx_collisions" },
250
251         { "tx_xon_sent" },
252         { "tx_xoff_sent" },
253         { "tx_flow_control" },
254         { "tx_mac_errors" },
255         { "tx_single_collisions" },
256         { "tx_mult_collisions" },
257         { "tx_deferred" },
258         { "tx_excessive_collisions" },
259         { "tx_late_collisions" },
260         { "tx_collide_2times" },
261         { "tx_collide_3times" },
262         { "tx_collide_4times" },
263         { "tx_collide_5times" },
264         { "tx_collide_6times" },
265         { "tx_collide_7times" },
266         { "tx_collide_8times" },
267         { "tx_collide_9times" },
268         { "tx_collide_10times" },
269         { "tx_collide_11times" },
270         { "tx_collide_12times" },
271         { "tx_collide_13times" },
272         { "tx_collide_14times" },
273         { "tx_collide_15times" },
274         { "tx_ucast_packets" },
275         { "tx_mcast_packets" },
276         { "tx_bcast_packets" },
277         { "tx_carrier_sense_errors" },
278         { "tx_discards" },
279         { "tx_errors" },
280
281         { "dma_writeq_full" },
282         { "dma_write_prioq_full" },
283         { "rxbds_empty" },
284         { "rx_discards" },
285         { "rx_errors" },
286         { "rx_threshold_hit" },
287
288         { "dma_readq_full" },
289         { "dma_read_prioq_full" },
290         { "tx_comp_queue_full" },
291
292         { "ring_set_send_prod_index" },
293         { "ring_status_update" },
294         { "nic_irqs" },
295         { "nic_avoided_irqs" },
296         { "nic_tx_threshold_hit" }
297 };
298
299 static const struct {
300         const char string[ETH_GSTRING_LEN];
301 } ethtool_test_keys[TG3_NUM_TEST] = {
302         { "nvram test     (online) " },
303         { "link test      (online) " },
304         { "register test  (offline)" },
305         { "memory test    (offline)" },
306         { "loopback test  (offline)" },
307         { "interrupt test (offline)" },
308 };
309
310 static void tg3_write32(struct tg3 *tp, u32 off, u32 val)
311 {
312         writel(val, tp->regs + off);
313 }
314
315 static u32 tg3_read32(struct tg3 *tp, u32 off)
316 {
317         return (readl(tp->regs + off));
318 }
319
320 static void tg3_ape_write32(struct tg3 *tp, u32 off, u32 val)
321 {
322         writel(val, tp->aperegs + off);
323 }
324
325 static u32 tg3_ape_read32(struct tg3 *tp, u32 off)
326 {
327         return (readl(tp->aperegs + off));
328 }
329
330 static void tg3_write_indirect_reg32(struct tg3 *tp, u32 off, u32 val)
331 {
332         unsigned long flags;
333
334         spin_lock_irqsave(&tp->indirect_lock, flags);
335         pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off);
336         pci_write_config_dword(tp->pdev, TG3PCI_REG_DATA, val);
337         spin_unlock_irqrestore(&tp->indirect_lock, flags);
338 }
339
340 static void tg3_write_flush_reg32(struct tg3 *tp, u32 off, u32 val)
341 {
342         writel(val, tp->regs + off);
343         readl(tp->regs + off);
344 }
345
346 static u32 tg3_read_indirect_reg32(struct tg3 *tp, u32 off)
347 {
348         unsigned long flags;
349         u32 val;
350
351         spin_lock_irqsave(&tp->indirect_lock, flags);
352         pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off);
353         pci_read_config_dword(tp->pdev, TG3PCI_REG_DATA, &val);
354         spin_unlock_irqrestore(&tp->indirect_lock, flags);
355         return val;
356 }
357
358 static void tg3_write_indirect_mbox(struct tg3 *tp, u32 off, u32 val)
359 {
360         unsigned long flags;
361
362         if (off == (MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW)) {
363                 pci_write_config_dword(tp->pdev, TG3PCI_RCV_RET_RING_CON_IDX +
364                                        TG3_64BIT_REG_LOW, val);
365                 return;
366         }
367         if (off == (MAILBOX_RCV_STD_PROD_IDX + TG3_64BIT_REG_LOW)) {
368                 pci_write_config_dword(tp->pdev, TG3PCI_STD_RING_PROD_IDX +
369                                        TG3_64BIT_REG_LOW, val);
370                 return;
371         }
372
373         spin_lock_irqsave(&tp->indirect_lock, flags);
374         pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off + 0x5600);
375         pci_write_config_dword(tp->pdev, TG3PCI_REG_DATA, val);
376         spin_unlock_irqrestore(&tp->indirect_lock, flags);
377
378         /* In indirect mode when disabling interrupts, we also need
379          * to clear the interrupt bit in the GRC local ctrl register.
380          */
381         if ((off == (MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW)) &&
382             (val == 0x1)) {
383                 pci_write_config_dword(tp->pdev, TG3PCI_MISC_LOCAL_CTRL,
384                                        tp->grc_local_ctrl|GRC_LCLCTRL_CLEARINT);
385         }
386 }
387
388 static u32 tg3_read_indirect_mbox(struct tg3 *tp, u32 off)
389 {
390         unsigned long flags;
391         u32 val;
392
393         spin_lock_irqsave(&tp->indirect_lock, flags);
394         pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off + 0x5600);
395         pci_read_config_dword(tp->pdev, TG3PCI_REG_DATA, &val);
396         spin_unlock_irqrestore(&tp->indirect_lock, flags);
397         return val;
398 }
399
400 /* usec_wait specifies the wait time in usec when writing to certain registers
401  * where it is unsafe to read back the register without some delay.
402  * GRC_LOCAL_CTRL is one example if the GPIOs are toggled to switch power.
403  * TG3PCI_CLOCK_CTRL is another example if the clock frequencies are changed.
404  */
405 static void _tw32_flush(struct tg3 *tp, u32 off, u32 val, u32 usec_wait)
406 {
407         if ((tp->tg3_flags & TG3_FLAG_PCIX_TARGET_HWBUG) ||
408             (tp->tg3_flags2 & TG3_FLG2_ICH_WORKAROUND))
409                 /* Non-posted methods */
410                 tp->write32(tp, off, val);
411         else {
412                 /* Posted method */
413                 tg3_write32(tp, off, val);
414                 if (usec_wait)
415                         udelay(usec_wait);
416                 tp->read32(tp, off);
417         }
418         /* Wait again after the read for the posted method to guarantee that
419          * the wait time is met.
420          */
421         if (usec_wait)
422                 udelay(usec_wait);
423 }
424
425 static inline void tw32_mailbox_flush(struct tg3 *tp, u32 off, u32 val)
426 {
427         tp->write32_mbox(tp, off, val);
428         if (!(tp->tg3_flags & TG3_FLAG_MBOX_WRITE_REORDER) &&
429             !(tp->tg3_flags2 & TG3_FLG2_ICH_WORKAROUND))
430                 tp->read32_mbox(tp, off);
431 }
432
433 static void tg3_write32_tx_mbox(struct tg3 *tp, u32 off, u32 val)
434 {
435         void __iomem *mbox = tp->regs + off;
436         writel(val, mbox);
437         if (tp->tg3_flags & TG3_FLAG_TXD_MBOX_HWBUG)
438                 writel(val, mbox);
439         if (tp->tg3_flags & TG3_FLAG_MBOX_WRITE_REORDER)
440                 readl(mbox);
441 }
442
443 static u32 tg3_read32_mbox_5906(struct tg3 *tp, u32 off)
444 {
445         return (readl(tp->regs + off + GRCMBOX_BASE));
446 }
447
448 static void tg3_write32_mbox_5906(struct tg3 *tp, u32 off, u32 val)
449 {
450         writel(val, tp->regs + off + GRCMBOX_BASE);
451 }
452
453 #define tw32_mailbox(reg, val)  tp->write32_mbox(tp, reg, val)
454 #define tw32_mailbox_f(reg, val)        tw32_mailbox_flush(tp, (reg), (val))
455 #define tw32_rx_mbox(reg, val)  tp->write32_rx_mbox(tp, reg, val)
456 #define tw32_tx_mbox(reg, val)  tp->write32_tx_mbox(tp, reg, val)
457 #define tr32_mailbox(reg)       tp->read32_mbox(tp, reg)
458
459 #define tw32(reg,val)           tp->write32(tp, reg, val)
460 #define tw32_f(reg,val)         _tw32_flush(tp,(reg),(val), 0)
461 #define tw32_wait_f(reg,val,us) _tw32_flush(tp,(reg),(val), (us))
462 #define tr32(reg)               tp->read32(tp, reg)
463
464 static void tg3_write_mem(struct tg3 *tp, u32 off, u32 val)
465 {
466         unsigned long flags;
467
468         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) &&
469             (off >= NIC_SRAM_STATS_BLK) && (off < NIC_SRAM_TX_BUFFER_DESC))
470                 return;
471
472         spin_lock_irqsave(&tp->indirect_lock, flags);
473         if (tp->tg3_flags & TG3_FLAG_SRAM_USE_CONFIG) {
474                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off);
475                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
476
477                 /* Always leave this as zero. */
478                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
479         } else {
480                 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, off);
481                 tw32_f(TG3PCI_MEM_WIN_DATA, val);
482
483                 /* Always leave this as zero. */
484                 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, 0);
485         }
486         spin_unlock_irqrestore(&tp->indirect_lock, flags);
487 }
488
489 static void tg3_read_mem(struct tg3 *tp, u32 off, u32 *val)
490 {
491         unsigned long flags;
492
493         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) &&
494             (off >= NIC_SRAM_STATS_BLK) && (off < NIC_SRAM_TX_BUFFER_DESC)) {
495                 *val = 0;
496                 return;
497         }
498
499         spin_lock_irqsave(&tp->indirect_lock, flags);
500         if (tp->tg3_flags & TG3_FLAG_SRAM_USE_CONFIG) {
501                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off);
502                 pci_read_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
503
504                 /* Always leave this as zero. */
505                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
506         } else {
507                 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, off);
508                 *val = tr32(TG3PCI_MEM_WIN_DATA);
509
510                 /* Always leave this as zero. */
511                 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, 0);
512         }
513         spin_unlock_irqrestore(&tp->indirect_lock, flags);
514 }
515
516 static void tg3_ape_lock_init(struct tg3 *tp)
517 {
518         int i;
519
520         /* Make sure the driver hasn't any stale locks. */
521         for (i = 0; i < 8; i++)
522                 tg3_ape_write32(tp, TG3_APE_LOCK_GRANT + 4 * i,
523                                 APE_LOCK_GRANT_DRIVER);
524 }
525
526 static int tg3_ape_lock(struct tg3 *tp, int locknum)
527 {
528         int i, off;
529         int ret = 0;
530         u32 status;
531
532         if (!(tp->tg3_flags3 & TG3_FLG3_ENABLE_APE))
533                 return 0;
534
535         switch (locknum) {
536                 case TG3_APE_LOCK_MEM:
537                         break;
538                 default:
539                         return -EINVAL;
540         }
541
542         off = 4 * locknum;
543
544         tg3_ape_write32(tp, TG3_APE_LOCK_REQ + off, APE_LOCK_REQ_DRIVER);
545
546         /* Wait for up to 1 millisecond to acquire lock. */
547         for (i = 0; i < 100; i++) {
548                 status = tg3_ape_read32(tp, TG3_APE_LOCK_GRANT + off);
549                 if (status == APE_LOCK_GRANT_DRIVER)
550                         break;
551                 udelay(10);
552         }
553
554         if (status != APE_LOCK_GRANT_DRIVER) {
555                 /* Revoke the lock request. */
556                 tg3_ape_write32(tp, TG3_APE_LOCK_GRANT + off,
557                                 APE_LOCK_GRANT_DRIVER);
558
559                 ret = -EBUSY;
560         }
561
562         return ret;
563 }
564
565 static void tg3_ape_unlock(struct tg3 *tp, int locknum)
566 {
567         int off;
568
569         if (!(tp->tg3_flags3 & TG3_FLG3_ENABLE_APE))
570                 return;
571
572         switch (locknum) {
573                 case TG3_APE_LOCK_MEM:
574                         break;
575                 default:
576                         return;
577         }
578
579         off = 4 * locknum;
580         tg3_ape_write32(tp, TG3_APE_LOCK_GRANT + off, APE_LOCK_GRANT_DRIVER);
581 }
582
583 static void tg3_disable_ints(struct tg3 *tp)
584 {
585         tw32(TG3PCI_MISC_HOST_CTRL,
586              (tp->misc_host_ctrl | MISC_HOST_CTRL_MASK_PCI_INT));
587         tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
588 }
589
590 static inline void tg3_cond_int(struct tg3 *tp)
591 {
592         if (!(tp->tg3_flags & TG3_FLAG_TAGGED_STATUS) &&
593             (tp->hw_status->status & SD_STATUS_UPDATED))
594                 tw32(GRC_LOCAL_CTRL, tp->grc_local_ctrl | GRC_LCLCTRL_SETINT);
595         else
596                 tw32(HOSTCC_MODE, tp->coalesce_mode |
597                      (HOSTCC_MODE_ENABLE | HOSTCC_MODE_NOW));
598 }
599
600 static void tg3_enable_ints(struct tg3 *tp)
601 {
602         tp->irq_sync = 0;
603         wmb();
604
605         tw32(TG3PCI_MISC_HOST_CTRL,
606              (tp->misc_host_ctrl & ~MISC_HOST_CTRL_MASK_PCI_INT));
607         tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
608                        (tp->last_tag << 24));
609         if (tp->tg3_flags2 & TG3_FLG2_1SHOT_MSI)
610                 tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
611                                (tp->last_tag << 24));
612         tg3_cond_int(tp);
613 }
614
615 static inline unsigned int tg3_has_work(struct tg3 *tp)
616 {
617         struct tg3_hw_status *sblk = tp->hw_status;
618         unsigned int work_exists = 0;
619
620         /* check for phy events */
621         if (!(tp->tg3_flags &
622               (TG3_FLAG_USE_LINKCHG_REG |
623                TG3_FLAG_POLL_SERDES))) {
624                 if (sblk->status & SD_STATUS_LINK_CHG)
625                         work_exists = 1;
626         }
627         /* check for RX/TX work to do */
628         if (sblk->idx[0].tx_consumer != tp->tx_cons ||
629             sblk->idx[0].rx_producer != tp->rx_rcb_ptr)
630                 work_exists = 1;
631
632         return work_exists;
633 }
634
635 /* tg3_restart_ints
636  *  similar to tg3_enable_ints, but it accurately determines whether there
637  *  is new work pending and can return without flushing the PIO write
638  *  which reenables interrupts
639  */
640 static void tg3_restart_ints(struct tg3 *tp)
641 {
642         tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
643                      tp->last_tag << 24);
644         mmiowb();
645
646         /* When doing tagged status, this work check is unnecessary.
647          * The last_tag we write above tells the chip which piece of
648          * work we've completed.
649          */
650         if (!(tp->tg3_flags & TG3_FLAG_TAGGED_STATUS) &&
651             tg3_has_work(tp))
652                 tw32(HOSTCC_MODE, tp->coalesce_mode |
653                      (HOSTCC_MODE_ENABLE | HOSTCC_MODE_NOW));
654 }
655
656 static inline void tg3_netif_stop(struct tg3 *tp)
657 {
658         tp->dev->trans_start = jiffies; /* prevent tx timeout */
659         napi_disable(&tp->napi);
660         netif_tx_disable(tp->dev);
661 }
662
663 static inline void tg3_netif_start(struct tg3 *tp)
664 {
665         netif_wake_queue(tp->dev);
666         /* NOTE: unconditional netif_wake_queue is only appropriate
667          * so long as all callers are assured to have free tx slots
668          * (such as after tg3_init_hw)
669          */
670         napi_enable(&tp->napi);
671         tp->hw_status->status |= SD_STATUS_UPDATED;
672         tg3_enable_ints(tp);
673 }
674
675 static void tg3_switch_clocks(struct tg3 *tp)
676 {
677         u32 clock_ctrl = tr32(TG3PCI_CLOCK_CTRL);
678         u32 orig_clock_ctrl;
679
680         if ((tp->tg3_flags & TG3_FLAG_CPMU_PRESENT) ||
681             (tp->tg3_flags2 & TG3_FLG2_5780_CLASS))
682                 return;
683
684         orig_clock_ctrl = clock_ctrl;
685         clock_ctrl &= (CLOCK_CTRL_FORCE_CLKRUN |
686                        CLOCK_CTRL_CLKRUN_OENABLE |
687                        0x1f);
688         tp->pci_clock_ctrl = clock_ctrl;
689
690         if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
691                 if (orig_clock_ctrl & CLOCK_CTRL_625_CORE) {
692                         tw32_wait_f(TG3PCI_CLOCK_CTRL,
693                                     clock_ctrl | CLOCK_CTRL_625_CORE, 40);
694                 }
695         } else if ((orig_clock_ctrl & CLOCK_CTRL_44MHZ_CORE) != 0) {
696                 tw32_wait_f(TG3PCI_CLOCK_CTRL,
697                             clock_ctrl |
698                             (CLOCK_CTRL_44MHZ_CORE | CLOCK_CTRL_ALTCLK),
699                             40);
700                 tw32_wait_f(TG3PCI_CLOCK_CTRL,
701                             clock_ctrl | (CLOCK_CTRL_ALTCLK),
702                             40);
703         }
704         tw32_wait_f(TG3PCI_CLOCK_CTRL, clock_ctrl, 40);
705 }
706
707 #define PHY_BUSY_LOOPS  5000
708
709 static int tg3_readphy(struct tg3 *tp, int reg, u32 *val)
710 {
711         u32 frame_val;
712         unsigned int loops;
713         int ret;
714
715         if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
716                 tw32_f(MAC_MI_MODE,
717                      (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
718                 udelay(80);
719         }
720
721         *val = 0x0;
722
723         frame_val  = ((PHY_ADDR << MI_COM_PHY_ADDR_SHIFT) &
724                       MI_COM_PHY_ADDR_MASK);
725         frame_val |= ((reg << MI_COM_REG_ADDR_SHIFT) &
726                       MI_COM_REG_ADDR_MASK);
727         frame_val |= (MI_COM_CMD_READ | MI_COM_START);
728
729         tw32_f(MAC_MI_COM, frame_val);
730
731         loops = PHY_BUSY_LOOPS;
732         while (loops != 0) {
733                 udelay(10);
734                 frame_val = tr32(MAC_MI_COM);
735
736                 if ((frame_val & MI_COM_BUSY) == 0) {
737                         udelay(5);
738                         frame_val = tr32(MAC_MI_COM);
739                         break;
740                 }
741                 loops -= 1;
742         }
743
744         ret = -EBUSY;
745         if (loops != 0) {
746                 *val = frame_val & MI_COM_DATA_MASK;
747                 ret = 0;
748         }
749
750         if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
751                 tw32_f(MAC_MI_MODE, tp->mi_mode);
752                 udelay(80);
753         }
754
755         return ret;
756 }
757
758 static int tg3_writephy(struct tg3 *tp, int reg, u32 val)
759 {
760         u32 frame_val;
761         unsigned int loops;
762         int ret;
763
764         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906 &&
765             (reg == MII_TG3_CTRL || reg == MII_TG3_AUX_CTRL))
766                 return 0;
767
768         if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
769                 tw32_f(MAC_MI_MODE,
770                      (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
771                 udelay(80);
772         }
773
774         frame_val  = ((PHY_ADDR << MI_COM_PHY_ADDR_SHIFT) &
775                       MI_COM_PHY_ADDR_MASK);
776         frame_val |= ((reg << MI_COM_REG_ADDR_SHIFT) &
777                       MI_COM_REG_ADDR_MASK);
778         frame_val |= (val & MI_COM_DATA_MASK);
779         frame_val |= (MI_COM_CMD_WRITE | MI_COM_START);
780
781         tw32_f(MAC_MI_COM, frame_val);
782
783         loops = PHY_BUSY_LOOPS;
784         while (loops != 0) {
785                 udelay(10);
786                 frame_val = tr32(MAC_MI_COM);
787                 if ((frame_val & MI_COM_BUSY) == 0) {
788                         udelay(5);
789                         frame_val = tr32(MAC_MI_COM);
790                         break;
791                 }
792                 loops -= 1;
793         }
794
795         ret = -EBUSY;
796         if (loops != 0)
797                 ret = 0;
798
799         if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
800                 tw32_f(MAC_MI_MODE, tp->mi_mode);
801                 udelay(80);
802         }
803
804         return ret;
805 }
806
807 static void tg3_phy_toggle_automdix(struct tg3 *tp, int enable)
808 {
809         u32 phy;
810
811         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS) ||
812             (tp->tg3_flags2 & TG3_FLG2_ANY_SERDES))
813                 return;
814
815         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
816                 u32 ephy;
817
818                 if (!tg3_readphy(tp, MII_TG3_EPHY_TEST, &ephy)) {
819                         tg3_writephy(tp, MII_TG3_EPHY_TEST,
820                                      ephy | MII_TG3_EPHY_SHADOW_EN);
821                         if (!tg3_readphy(tp, MII_TG3_EPHYTST_MISCCTRL, &phy)) {
822                                 if (enable)
823                                         phy |= MII_TG3_EPHYTST_MISCCTRL_MDIX;
824                                 else
825                                         phy &= ~MII_TG3_EPHYTST_MISCCTRL_MDIX;
826                                 tg3_writephy(tp, MII_TG3_EPHYTST_MISCCTRL, phy);
827                         }
828                         tg3_writephy(tp, MII_TG3_EPHY_TEST, ephy);
829                 }
830         } else {
831                 phy = MII_TG3_AUXCTL_MISC_RDSEL_MISC |
832                       MII_TG3_AUXCTL_SHDWSEL_MISC;
833                 if (!tg3_writephy(tp, MII_TG3_AUX_CTRL, phy) &&
834                     !tg3_readphy(tp, MII_TG3_AUX_CTRL, &phy)) {
835                         if (enable)
836                                 phy |= MII_TG3_AUXCTL_MISC_FORCE_AMDIX;
837                         else
838                                 phy &= ~MII_TG3_AUXCTL_MISC_FORCE_AMDIX;
839                         phy |= MII_TG3_AUXCTL_MISC_WREN;
840                         tg3_writephy(tp, MII_TG3_AUX_CTRL, phy);
841                 }
842         }
843 }
844
845 static void tg3_phy_set_wirespeed(struct tg3 *tp)
846 {
847         u32 val;
848
849         if (tp->tg3_flags2 & TG3_FLG2_NO_ETH_WIRE_SPEED)
850                 return;
851
852         if (!tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x7007) &&
853             !tg3_readphy(tp, MII_TG3_AUX_CTRL, &val))
854                 tg3_writephy(tp, MII_TG3_AUX_CTRL,
855                              (val | (1 << 15) | (1 << 4)));
856 }
857
858 static int tg3_bmcr_reset(struct tg3 *tp)
859 {
860         u32 phy_control;
861         int limit, err;
862
863         /* OK, reset it, and poll the BMCR_RESET bit until it
864          * clears or we time out.
865          */
866         phy_control = BMCR_RESET;
867         err = tg3_writephy(tp, MII_BMCR, phy_control);
868         if (err != 0)
869                 return -EBUSY;
870
871         limit = 5000;
872         while (limit--) {
873                 err = tg3_readphy(tp, MII_BMCR, &phy_control);
874                 if (err != 0)
875                         return -EBUSY;
876
877                 if ((phy_control & BMCR_RESET) == 0) {
878                         udelay(40);
879                         break;
880                 }
881                 udelay(10);
882         }
883         if (limit <= 0)
884                 return -EBUSY;
885
886         return 0;
887 }
888
889 static int tg3_wait_macro_done(struct tg3 *tp)
890 {
891         int limit = 100;
892
893         while (limit--) {
894                 u32 tmp32;
895
896                 if (!tg3_readphy(tp, 0x16, &tmp32)) {
897                         if ((tmp32 & 0x1000) == 0)
898                                 break;
899                 }
900         }
901         if (limit <= 0)
902                 return -EBUSY;
903
904         return 0;
905 }
906
907 static int tg3_phy_write_and_check_testpat(struct tg3 *tp, int *resetp)
908 {
909         static const u32 test_pat[4][6] = {
910         { 0x00005555, 0x00000005, 0x00002aaa, 0x0000000a, 0x00003456, 0x00000003 },
911         { 0x00002aaa, 0x0000000a, 0x00003333, 0x00000003, 0x0000789a, 0x00000005 },
912         { 0x00005a5a, 0x00000005, 0x00002a6a, 0x0000000a, 0x00001bcd, 0x00000003 },
913         { 0x00002a5a, 0x0000000a, 0x000033c3, 0x00000003, 0x00002ef1, 0x00000005 }
914         };
915         int chan;
916
917         for (chan = 0; chan < 4; chan++) {
918                 int i;
919
920                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
921                              (chan * 0x2000) | 0x0200);
922                 tg3_writephy(tp, 0x16, 0x0002);
923
924                 for (i = 0; i < 6; i++)
925                         tg3_writephy(tp, MII_TG3_DSP_RW_PORT,
926                                      test_pat[chan][i]);
927
928                 tg3_writephy(tp, 0x16, 0x0202);
929                 if (tg3_wait_macro_done(tp)) {
930                         *resetp = 1;
931                         return -EBUSY;
932                 }
933
934                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
935                              (chan * 0x2000) | 0x0200);
936                 tg3_writephy(tp, 0x16, 0x0082);
937                 if (tg3_wait_macro_done(tp)) {
938                         *resetp = 1;
939                         return -EBUSY;
940                 }
941
942                 tg3_writephy(tp, 0x16, 0x0802);
943                 if (tg3_wait_macro_done(tp)) {
944                         *resetp = 1;
945                         return -EBUSY;
946                 }
947
948                 for (i = 0; i < 6; i += 2) {
949                         u32 low, high;
950
951                         if (tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &low) ||
952                             tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &high) ||
953                             tg3_wait_macro_done(tp)) {
954                                 *resetp = 1;
955                                 return -EBUSY;
956                         }
957                         low &= 0x7fff;
958                         high &= 0x000f;
959                         if (low != test_pat[chan][i] ||
960                             high != test_pat[chan][i+1]) {
961                                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000b);
962                                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x4001);
963                                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x4005);
964
965                                 return -EBUSY;
966                         }
967                 }
968         }
969
970         return 0;
971 }
972
973 static int tg3_phy_reset_chanpat(struct tg3 *tp)
974 {
975         int chan;
976
977         for (chan = 0; chan < 4; chan++) {
978                 int i;
979
980                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
981                              (chan * 0x2000) | 0x0200);
982                 tg3_writephy(tp, 0x16, 0x0002);
983                 for (i = 0; i < 6; i++)
984                         tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x000);
985                 tg3_writephy(tp, 0x16, 0x0202);
986                 if (tg3_wait_macro_done(tp))
987                         return -EBUSY;
988         }
989
990         return 0;
991 }
992
993 static int tg3_phy_reset_5703_4_5(struct tg3 *tp)
994 {
995         u32 reg32, phy9_orig;
996         int retries, do_phy_reset, err;
997
998         retries = 10;
999         do_phy_reset = 1;
1000         do {
1001                 if (do_phy_reset) {
1002                         err = tg3_bmcr_reset(tp);
1003                         if (err)
1004                                 return err;
1005                         do_phy_reset = 0;
1006                 }
1007
1008                 /* Disable transmitter and interrupt.  */
1009                 if (tg3_readphy(tp, MII_TG3_EXT_CTRL, &reg32))
1010                         continue;
1011
1012                 reg32 |= 0x3000;
1013                 tg3_writephy(tp, MII_TG3_EXT_CTRL, reg32);
1014
1015                 /* Set full-duplex, 1000 mbps.  */
1016                 tg3_writephy(tp, MII_BMCR,
1017                              BMCR_FULLDPLX | TG3_BMCR_SPEED1000);
1018
1019                 /* Set to master mode.  */
1020                 if (tg3_readphy(tp, MII_TG3_CTRL, &phy9_orig))
1021                         continue;
1022
1023                 tg3_writephy(tp, MII_TG3_CTRL,
1024                              (MII_TG3_CTRL_AS_MASTER |
1025                               MII_TG3_CTRL_ENABLE_AS_MASTER));
1026
1027                 /* Enable SM_DSP_CLOCK and 6dB.  */
1028                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0c00);
1029
1030                 /* Block the PHY control access.  */
1031                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8005);
1032                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0800);
1033
1034                 err = tg3_phy_write_and_check_testpat(tp, &do_phy_reset);
1035                 if (!err)
1036                         break;
1037         } while (--retries);
1038
1039         err = tg3_phy_reset_chanpat(tp);
1040         if (err)
1041                 return err;
1042
1043         tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8005);
1044         tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0000);
1045
1046         tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8200);
1047         tg3_writephy(tp, 0x16, 0x0000);
1048
1049         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
1050             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
1051                 /* Set Extended packet length bit for jumbo frames */
1052                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x4400);
1053         }
1054         else {
1055                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0400);
1056         }
1057
1058         tg3_writephy(tp, MII_TG3_CTRL, phy9_orig);
1059
1060         if (!tg3_readphy(tp, MII_TG3_EXT_CTRL, &reg32)) {
1061                 reg32 &= ~0x3000;
1062                 tg3_writephy(tp, MII_TG3_EXT_CTRL, reg32);
1063         } else if (!err)
1064                 err = -EBUSY;
1065
1066         return err;
1067 }
1068
1069 static void tg3_link_report(struct tg3 *);
1070
1071 /* This will reset the tigon3 PHY if there is no valid
1072  * link unless the FORCE argument is non-zero.
1073  */
1074 static int tg3_phy_reset(struct tg3 *tp)
1075 {
1076         u32 phy_status;
1077         int err;
1078
1079         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
1080                 u32 val;
1081
1082                 val = tr32(GRC_MISC_CFG);
1083                 tw32_f(GRC_MISC_CFG, val & ~GRC_MISC_CFG_EPHY_IDDQ);
1084                 udelay(40);
1085         }
1086         err  = tg3_readphy(tp, MII_BMSR, &phy_status);
1087         err |= tg3_readphy(tp, MII_BMSR, &phy_status);
1088         if (err != 0)
1089                 return -EBUSY;
1090
1091         if (netif_running(tp->dev) && netif_carrier_ok(tp->dev)) {
1092                 netif_carrier_off(tp->dev);
1093                 tg3_link_report(tp);
1094         }
1095
1096         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
1097             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
1098             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
1099                 err = tg3_phy_reset_5703_4_5(tp);
1100                 if (err)
1101                         return err;
1102                 goto out;
1103         }
1104
1105         err = tg3_bmcr_reset(tp);
1106         if (err)
1107                 return err;
1108
1109         if (tp->tg3_flags3 & TG3_FLG3_5761_5784_AX_FIXES) {
1110                 u32 val;
1111
1112                 val = tr32(TG3_CPMU_LSPD_1000MB_CLK);
1113                 if ((val & CPMU_LSPD_1000MB_MACCLK_MASK) ==
1114                     CPMU_LSPD_1000MB_MACCLK_12_5) {
1115                         val &= ~CPMU_LSPD_1000MB_MACCLK_MASK;
1116                         udelay(40);
1117                         tw32_f(TG3_CPMU_LSPD_1000MB_CLK, val);
1118                 }
1119
1120                 /* Disable GPHY autopowerdown. */
1121                 tg3_writephy(tp, MII_TG3_MISC_SHDW,
1122                              MII_TG3_MISC_SHDW_WREN |
1123                              MII_TG3_MISC_SHDW_APD_SEL |
1124                              MII_TG3_MISC_SHDW_APD_WKTM_84MS);
1125         }
1126
1127 out:
1128         if (tp->tg3_flags2 & TG3_FLG2_PHY_ADC_BUG) {
1129                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0c00);
1130                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x201f);
1131                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x2aaa);
1132                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000a);
1133                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0323);
1134                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0400);
1135         }
1136         if (tp->tg3_flags2 & TG3_FLG2_PHY_5704_A0_BUG) {
1137                 tg3_writephy(tp, 0x1c, 0x8d68);
1138                 tg3_writephy(tp, 0x1c, 0x8d68);
1139         }
1140         if (tp->tg3_flags2 & TG3_FLG2_PHY_BER_BUG) {
1141                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0c00);
1142                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000a);
1143                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x310b);
1144                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x201f);
1145                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x9506);
1146                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x401f);
1147                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x14e2);
1148                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0400);
1149         }
1150         else if (tp->tg3_flags2 & TG3_FLG2_PHY_JITTER_BUG) {
1151                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0c00);
1152                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000a);
1153                 if (tp->tg3_flags2 & TG3_FLG2_PHY_ADJUST_TRIM) {
1154                         tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x110b);
1155                         tg3_writephy(tp, MII_TG3_TEST1,
1156                                      MII_TG3_TEST1_TRIM_EN | 0x4);
1157                 } else
1158                         tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x010b);
1159                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0400);
1160         }
1161         /* Set Extended packet length bit (bit 14) on all chips that */
1162         /* support jumbo frames */
1163         if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5401) {
1164                 /* Cannot do read-modify-write on 5401 */
1165                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x4c20);
1166         } else if (tp->tg3_flags2 & TG3_FLG2_JUMBO_CAPABLE) {
1167                 u32 phy_reg;
1168
1169                 /* Set bit 14 with read-modify-write to preserve other bits */
1170                 if (!tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0007) &&
1171                     !tg3_readphy(tp, MII_TG3_AUX_CTRL, &phy_reg))
1172                         tg3_writephy(tp, MII_TG3_AUX_CTRL, phy_reg | 0x4000);
1173         }
1174
1175         /* Set phy register 0x10 bit 0 to high fifo elasticity to support
1176          * jumbo frames transmission.
1177          */
1178         if (tp->tg3_flags2 & TG3_FLG2_JUMBO_CAPABLE) {
1179                 u32 phy_reg;
1180
1181                 if (!tg3_readphy(tp, MII_TG3_EXT_CTRL, &phy_reg))
1182                     tg3_writephy(tp, MII_TG3_EXT_CTRL,
1183                                  phy_reg | MII_TG3_EXT_CTRL_FIFO_ELASTIC);
1184         }
1185
1186         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
1187                 /* adjust output voltage */
1188                 tg3_writephy(tp, MII_TG3_EPHY_PTEST, 0x12);
1189         }
1190
1191         tg3_phy_toggle_automdix(tp, 1);
1192         tg3_phy_set_wirespeed(tp);
1193         return 0;
1194 }
1195
1196 static void tg3_frob_aux_power(struct tg3 *tp)
1197 {
1198         struct tg3 *tp_peer = tp;
1199
1200         if ((tp->tg3_flags2 & TG3_FLG2_IS_NIC) == 0)
1201                 return;
1202
1203         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) ||
1204             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714)) {
1205                 struct net_device *dev_peer;
1206
1207                 dev_peer = pci_get_drvdata(tp->pdev_peer);
1208                 /* remove_one() may have been run on the peer. */
1209                 if (!dev_peer)
1210                         tp_peer = tp;
1211                 else
1212                         tp_peer = netdev_priv(dev_peer);
1213         }
1214
1215         if ((tp->tg3_flags & TG3_FLAG_WOL_ENABLE) != 0 ||
1216             (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) != 0 ||
1217             (tp_peer->tg3_flags & TG3_FLAG_WOL_ENABLE) != 0 ||
1218             (tp_peer->tg3_flags & TG3_FLAG_ENABLE_ASF) != 0) {
1219                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
1220                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
1221                         tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1222                                     (GRC_LCLCTRL_GPIO_OE0 |
1223                                      GRC_LCLCTRL_GPIO_OE1 |
1224                                      GRC_LCLCTRL_GPIO_OE2 |
1225                                      GRC_LCLCTRL_GPIO_OUTPUT0 |
1226                                      GRC_LCLCTRL_GPIO_OUTPUT1),
1227                                     100);
1228                 } else {
1229                         u32 no_gpio2;
1230                         u32 grc_local_ctrl = 0;
1231
1232                         if (tp_peer != tp &&
1233                             (tp_peer->tg3_flags & TG3_FLAG_INIT_COMPLETE) != 0)
1234                                 return;
1235
1236                         /* Workaround to prevent overdrawing Amps. */
1237                         if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
1238                             ASIC_REV_5714) {
1239                                 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE3;
1240                                 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1241                                             grc_local_ctrl, 100);
1242                         }
1243
1244                         /* On 5753 and variants, GPIO2 cannot be used. */
1245                         no_gpio2 = tp->nic_sram_data_cfg &
1246                                     NIC_SRAM_DATA_CFG_NO_GPIO2;
1247
1248                         grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE0 |
1249                                          GRC_LCLCTRL_GPIO_OE1 |
1250                                          GRC_LCLCTRL_GPIO_OE2 |
1251                                          GRC_LCLCTRL_GPIO_OUTPUT1 |
1252                                          GRC_LCLCTRL_GPIO_OUTPUT2;
1253                         if (no_gpio2) {
1254                                 grc_local_ctrl &= ~(GRC_LCLCTRL_GPIO_OE2 |
1255                                                     GRC_LCLCTRL_GPIO_OUTPUT2);
1256                         }
1257                         tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1258                                                     grc_local_ctrl, 100);
1259
1260                         grc_local_ctrl |= GRC_LCLCTRL_GPIO_OUTPUT0;
1261
1262                         tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1263                                                     grc_local_ctrl, 100);
1264
1265                         if (!no_gpio2) {
1266                                 grc_local_ctrl &= ~GRC_LCLCTRL_GPIO_OUTPUT2;
1267                                 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1268                                             grc_local_ctrl, 100);
1269                         }
1270                 }
1271         } else {
1272                 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
1273                     GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701) {
1274                         if (tp_peer != tp &&
1275                             (tp_peer->tg3_flags & TG3_FLAG_INIT_COMPLETE) != 0)
1276                                 return;
1277
1278                         tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1279                                     (GRC_LCLCTRL_GPIO_OE1 |
1280                                      GRC_LCLCTRL_GPIO_OUTPUT1), 100);
1281
1282                         tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1283                                     GRC_LCLCTRL_GPIO_OE1, 100);
1284
1285                         tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1286                                     (GRC_LCLCTRL_GPIO_OE1 |
1287                                      GRC_LCLCTRL_GPIO_OUTPUT1), 100);
1288                 }
1289         }
1290 }
1291
1292 static int tg3_5700_link_polarity(struct tg3 *tp, u32 speed)
1293 {
1294         if (tp->led_ctrl == LED_CTRL_MODE_PHY_2)
1295                 return 1;
1296         else if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5411) {
1297                 if (speed != SPEED_10)
1298                         return 1;
1299         } else if (speed == SPEED_10)
1300                 return 1;
1301
1302         return 0;
1303 }
1304
1305 static int tg3_setup_phy(struct tg3 *, int);
1306
1307 #define RESET_KIND_SHUTDOWN     0
1308 #define RESET_KIND_INIT         1
1309 #define RESET_KIND_SUSPEND      2
1310
1311 static void tg3_write_sig_post_reset(struct tg3 *, int);
1312 static int tg3_halt_cpu(struct tg3 *, u32);
1313 static int tg3_nvram_lock(struct tg3 *);
1314 static void tg3_nvram_unlock(struct tg3 *);
1315
1316 static void tg3_power_down_phy(struct tg3 *tp)
1317 {
1318         u32 val;
1319
1320         if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) {
1321                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
1322                         u32 sg_dig_ctrl = tr32(SG_DIG_CTRL);
1323                         u32 serdes_cfg = tr32(MAC_SERDES_CFG);
1324
1325                         sg_dig_ctrl |=
1326                                 SG_DIG_USING_HW_AUTONEG | SG_DIG_SOFT_RESET;
1327                         tw32(SG_DIG_CTRL, sg_dig_ctrl);
1328                         tw32(MAC_SERDES_CFG, serdes_cfg | (1 << 15));
1329                 }
1330                 return;
1331         }
1332
1333         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
1334                 tg3_bmcr_reset(tp);
1335                 val = tr32(GRC_MISC_CFG);
1336                 tw32_f(GRC_MISC_CFG, val | GRC_MISC_CFG_EPHY_IDDQ);
1337                 udelay(40);
1338                 return;
1339         } else {
1340                 tg3_writephy(tp, MII_TG3_EXT_CTRL,
1341                              MII_TG3_EXT_CTRL_FORCE_LED_OFF);
1342                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x01b2);
1343         }
1344
1345         /* The PHY should not be powered down on some chips because
1346          * of bugs.
1347          */
1348         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
1349             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
1350             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780 &&
1351              (tp->tg3_flags2 & TG3_FLG2_MII_SERDES)))
1352                 return;
1353
1354         if (tp->tg3_flags3 & TG3_FLG3_5761_5784_AX_FIXES) {
1355                 val = tr32(TG3_CPMU_LSPD_1000MB_CLK);
1356                 val &= ~CPMU_LSPD_1000MB_MACCLK_MASK;
1357                 val |= CPMU_LSPD_1000MB_MACCLK_12_5;
1358                 tw32_f(TG3_CPMU_LSPD_1000MB_CLK, val);
1359         }
1360
1361         tg3_writephy(tp, MII_BMCR, BMCR_PDOWN);
1362 }
1363
1364 static int tg3_set_power_state(struct tg3 *tp, pci_power_t state)
1365 {
1366         u32 misc_host_ctrl;
1367         u16 power_control, power_caps;
1368         int pm = tp->pm_cap;
1369
1370         /* Make sure register accesses (indirect or otherwise)
1371          * will function correctly.
1372          */
1373         pci_write_config_dword(tp->pdev,
1374                                TG3PCI_MISC_HOST_CTRL,
1375                                tp->misc_host_ctrl);
1376
1377         pci_read_config_word(tp->pdev,
1378                              pm + PCI_PM_CTRL,
1379                              &power_control);
1380         power_control |= PCI_PM_CTRL_PME_STATUS;
1381         power_control &= ~(PCI_PM_CTRL_STATE_MASK);
1382         switch (state) {
1383         case PCI_D0:
1384                 power_control |= 0;
1385                 pci_write_config_word(tp->pdev,
1386                                       pm + PCI_PM_CTRL,
1387                                       power_control);
1388                 udelay(100);    /* Delay after power state change */
1389
1390                 /* Switch out of Vaux if it is a NIC */
1391                 if (tp->tg3_flags2 & TG3_FLG2_IS_NIC)
1392                         tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl, 100);
1393
1394                 return 0;
1395
1396         case PCI_D1:
1397                 power_control |= 1;
1398                 break;
1399
1400         case PCI_D2:
1401                 power_control |= 2;
1402                 break;
1403
1404         case PCI_D3hot:
1405                 power_control |= 3;
1406                 break;
1407
1408         default:
1409                 printk(KERN_WARNING PFX "%s: Invalid power state (%d) "
1410                        "requested.\n",
1411                        tp->dev->name, state);
1412                 return -EINVAL;
1413         };
1414
1415         power_control |= PCI_PM_CTRL_PME_ENABLE;
1416
1417         misc_host_ctrl = tr32(TG3PCI_MISC_HOST_CTRL);
1418         tw32(TG3PCI_MISC_HOST_CTRL,
1419              misc_host_ctrl | MISC_HOST_CTRL_MASK_PCI_INT);
1420
1421         if (tp->link_config.phy_is_low_power == 0) {
1422                 tp->link_config.phy_is_low_power = 1;
1423                 tp->link_config.orig_speed = tp->link_config.speed;
1424                 tp->link_config.orig_duplex = tp->link_config.duplex;
1425                 tp->link_config.orig_autoneg = tp->link_config.autoneg;
1426         }
1427
1428         if (!(tp->tg3_flags2 & TG3_FLG2_ANY_SERDES)) {
1429                 tp->link_config.speed = SPEED_10;
1430                 tp->link_config.duplex = DUPLEX_HALF;
1431                 tp->link_config.autoneg = AUTONEG_ENABLE;
1432                 tg3_setup_phy(tp, 0);
1433         }
1434
1435         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
1436                 u32 val;
1437
1438                 val = tr32(GRC_VCPU_EXT_CTRL);
1439                 tw32(GRC_VCPU_EXT_CTRL, val | GRC_VCPU_EXT_CTRL_DISABLE_WOL);
1440         } else if (!(tp->tg3_flags & TG3_FLAG_ENABLE_ASF)) {
1441                 int i;
1442                 u32 val;
1443
1444                 for (i = 0; i < 200; i++) {
1445                         tg3_read_mem(tp, NIC_SRAM_FW_ASF_STATUS_MBOX, &val);
1446                         if (val == ~NIC_SRAM_FIRMWARE_MBOX_MAGIC1)
1447                                 break;
1448                         msleep(1);
1449                 }
1450         }
1451         if (tp->tg3_flags & TG3_FLAG_WOL_CAP)
1452                 tg3_write_mem(tp, NIC_SRAM_WOL_MBOX, WOL_SIGNATURE |
1453                                                      WOL_DRV_STATE_SHUTDOWN |
1454                                                      WOL_DRV_WOL |
1455                                                      WOL_SET_MAGIC_PKT);
1456
1457         pci_read_config_word(tp->pdev, pm + PCI_PM_PMC, &power_caps);
1458
1459         if (tp->tg3_flags & TG3_FLAG_WOL_ENABLE) {
1460                 u32 mac_mode;
1461
1462                 if (!(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)) {
1463                         tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x5a);
1464                         udelay(40);
1465
1466                         if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES)
1467                                 mac_mode = MAC_MODE_PORT_MODE_GMII;
1468                         else
1469                                 mac_mode = MAC_MODE_PORT_MODE_MII;
1470
1471                         mac_mode |= tp->mac_mode & MAC_MODE_LINK_POLARITY;
1472                         if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
1473                             ASIC_REV_5700) {
1474                                 u32 speed = (tp->tg3_flags &
1475                                              TG3_FLAG_WOL_SPEED_100MB) ?
1476                                              SPEED_100 : SPEED_10;
1477                                 if (tg3_5700_link_polarity(tp, speed))
1478                                         mac_mode |= MAC_MODE_LINK_POLARITY;
1479                                 else
1480                                         mac_mode &= ~MAC_MODE_LINK_POLARITY;
1481                         }
1482                 } else {
1483                         mac_mode = MAC_MODE_PORT_MODE_TBI;
1484                 }
1485
1486                 if (!(tp->tg3_flags2 & TG3_FLG2_5750_PLUS))
1487                         tw32(MAC_LED_CTRL, tp->led_ctrl);
1488
1489                 if (((power_caps & PCI_PM_CAP_PME_D3cold) &&
1490                      (tp->tg3_flags & TG3_FLAG_WOL_ENABLE)))
1491                         mac_mode |= MAC_MODE_MAGIC_PKT_ENABLE;
1492
1493                 tw32_f(MAC_MODE, mac_mode);
1494                 udelay(100);
1495
1496                 tw32_f(MAC_RX_MODE, RX_MODE_ENABLE);
1497                 udelay(10);
1498         }
1499
1500         if (!(tp->tg3_flags & TG3_FLAG_WOL_SPEED_100MB) &&
1501             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
1502              GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)) {
1503                 u32 base_val;
1504
1505                 base_val = tp->pci_clock_ctrl;
1506                 base_val |= (CLOCK_CTRL_RXCLK_DISABLE |
1507                              CLOCK_CTRL_TXCLK_DISABLE);
1508
1509                 tw32_wait_f(TG3PCI_CLOCK_CTRL, base_val | CLOCK_CTRL_ALTCLK |
1510                             CLOCK_CTRL_PWRDOWN_PLL133, 40);
1511         } else if ((tp->tg3_flags2 & TG3_FLG2_5780_CLASS) ||
1512                    (tp->tg3_flags & TG3_FLAG_CPMU_PRESENT) ||
1513                    (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)) {
1514                 /* do nothing */
1515         } else if (!((tp->tg3_flags2 & TG3_FLG2_5750_PLUS) &&
1516                      (tp->tg3_flags & TG3_FLAG_ENABLE_ASF))) {
1517                 u32 newbits1, newbits2;
1518
1519                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
1520                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
1521                         newbits1 = (CLOCK_CTRL_RXCLK_DISABLE |
1522                                     CLOCK_CTRL_TXCLK_DISABLE |
1523                                     CLOCK_CTRL_ALTCLK);
1524                         newbits2 = newbits1 | CLOCK_CTRL_44MHZ_CORE;
1525                 } else if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
1526                         newbits1 = CLOCK_CTRL_625_CORE;
1527                         newbits2 = newbits1 | CLOCK_CTRL_ALTCLK;
1528                 } else {
1529                         newbits1 = CLOCK_CTRL_ALTCLK;
1530                         newbits2 = newbits1 | CLOCK_CTRL_44MHZ_CORE;
1531                 }
1532
1533                 tw32_wait_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl | newbits1,
1534                             40);
1535
1536                 tw32_wait_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl | newbits2,
1537                             40);
1538
1539                 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
1540                         u32 newbits3;
1541
1542                         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
1543                             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
1544                                 newbits3 = (CLOCK_CTRL_RXCLK_DISABLE |
1545                                             CLOCK_CTRL_TXCLK_DISABLE |
1546                                             CLOCK_CTRL_44MHZ_CORE);
1547                         } else {
1548                                 newbits3 = CLOCK_CTRL_44MHZ_CORE;
1549                         }
1550
1551                         tw32_wait_f(TG3PCI_CLOCK_CTRL,
1552                                     tp->pci_clock_ctrl | newbits3, 40);
1553                 }
1554         }
1555
1556         if (!(tp->tg3_flags & TG3_FLAG_WOL_ENABLE) &&
1557             !(tp->tg3_flags & TG3_FLAG_ENABLE_ASF) &&
1558             !(tp->tg3_flags3 & TG3_FLG3_ENABLE_APE))
1559                 tg3_power_down_phy(tp);
1560
1561         tg3_frob_aux_power(tp);
1562
1563         /* Workaround for unstable PLL clock */
1564         if ((GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_AX) ||
1565             (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_BX)) {
1566                 u32 val = tr32(0x7d00);
1567
1568                 val &= ~((1 << 16) | (1 << 4) | (1 << 2) | (1 << 1) | 1);
1569                 tw32(0x7d00, val);
1570                 if (!(tp->tg3_flags & TG3_FLAG_ENABLE_ASF)) {
1571                         int err;
1572
1573                         err = tg3_nvram_lock(tp);
1574                         tg3_halt_cpu(tp, RX_CPU_BASE);
1575                         if (!err)
1576                                 tg3_nvram_unlock(tp);
1577                 }
1578         }
1579
1580         tg3_write_sig_post_reset(tp, RESET_KIND_SHUTDOWN);
1581
1582         /* Finally, set the new power state. */
1583         pci_write_config_word(tp->pdev, pm + PCI_PM_CTRL, power_control);
1584         udelay(100);    /* Delay after power state change */
1585
1586         return 0;
1587 }
1588
1589 static void tg3_link_report(struct tg3 *tp)
1590 {
1591         if (!netif_carrier_ok(tp->dev)) {
1592                 if (netif_msg_link(tp))
1593                         printk(KERN_INFO PFX "%s: Link is down.\n",
1594                                tp->dev->name);
1595         } else if (netif_msg_link(tp)) {
1596                 printk(KERN_INFO PFX "%s: Link is up at %d Mbps, %s duplex.\n",
1597                        tp->dev->name,
1598                        (tp->link_config.active_speed == SPEED_1000 ?
1599                         1000 :
1600                         (tp->link_config.active_speed == SPEED_100 ?
1601                          100 : 10)),
1602                        (tp->link_config.active_duplex == DUPLEX_FULL ?
1603                         "full" : "half"));
1604
1605                 printk(KERN_INFO PFX
1606                        "%s: Flow control is %s for TX and %s for RX.\n",
1607                        tp->dev->name,
1608                        (tp->link_config.active_flowctrl & TG3_FLOW_CTRL_TX) ?
1609                        "on" : "off",
1610                        (tp->link_config.active_flowctrl & TG3_FLOW_CTRL_RX) ?
1611                        "on" : "off");
1612         }
1613 }
1614
1615 static u16 tg3_advert_flowctrl_1000T(u8 flow_ctrl)
1616 {
1617         u16 miireg;
1618
1619         if ((flow_ctrl & TG3_FLOW_CTRL_TX) && (flow_ctrl & TG3_FLOW_CTRL_RX))
1620                 miireg = ADVERTISE_PAUSE_CAP;
1621         else if (flow_ctrl & TG3_FLOW_CTRL_TX)
1622                 miireg = ADVERTISE_PAUSE_ASYM;
1623         else if (flow_ctrl & TG3_FLOW_CTRL_RX)
1624                 miireg = ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM;
1625         else
1626                 miireg = 0;
1627
1628         return miireg;
1629 }
1630
1631 static u16 tg3_advert_flowctrl_1000X(u8 flow_ctrl)
1632 {
1633         u16 miireg;
1634
1635         if ((flow_ctrl & TG3_FLOW_CTRL_TX) && (flow_ctrl & TG3_FLOW_CTRL_RX))
1636                 miireg = ADVERTISE_1000XPAUSE;
1637         else if (flow_ctrl & TG3_FLOW_CTRL_TX)
1638                 miireg = ADVERTISE_1000XPSE_ASYM;
1639         else if (flow_ctrl & TG3_FLOW_CTRL_RX)
1640                 miireg = ADVERTISE_1000XPAUSE | ADVERTISE_1000XPSE_ASYM;
1641         else
1642                 miireg = 0;
1643
1644         return miireg;
1645 }
1646
1647 static u8 tg3_resolve_flowctrl_1000T(u16 lcladv, u16 rmtadv)
1648 {
1649         u8 cap = 0;
1650
1651         if (lcladv & ADVERTISE_PAUSE_CAP) {
1652                 if (lcladv & ADVERTISE_PAUSE_ASYM) {
1653                         if (rmtadv & LPA_PAUSE_CAP)
1654                                 cap = TG3_FLOW_CTRL_TX | TG3_FLOW_CTRL_RX;
1655                         else if (rmtadv & LPA_PAUSE_ASYM)
1656                                 cap = TG3_FLOW_CTRL_RX;
1657                 } else {
1658                         if (rmtadv & LPA_PAUSE_CAP)
1659                                 cap = TG3_FLOW_CTRL_TX | TG3_FLOW_CTRL_RX;
1660                 }
1661         } else if (lcladv & ADVERTISE_PAUSE_ASYM) {
1662                 if ((rmtadv & LPA_PAUSE_CAP) && (rmtadv & LPA_PAUSE_ASYM))
1663                         cap = TG3_FLOW_CTRL_TX;
1664         }
1665
1666         return cap;
1667 }
1668
1669 static u8 tg3_resolve_flowctrl_1000X(u16 lcladv, u16 rmtadv)
1670 {
1671         u8 cap = 0;
1672
1673         if (lcladv & ADVERTISE_1000XPAUSE) {
1674                 if (lcladv & ADVERTISE_1000XPSE_ASYM) {
1675                         if (rmtadv & LPA_1000XPAUSE)
1676                                 cap = TG3_FLOW_CTRL_TX | TG3_FLOW_CTRL_RX;
1677                         else if (rmtadv & LPA_1000XPAUSE_ASYM)
1678                                 cap = TG3_FLOW_CTRL_RX;
1679                 } else {
1680                         if (rmtadv & LPA_1000XPAUSE)
1681                                 cap = TG3_FLOW_CTRL_TX | TG3_FLOW_CTRL_RX;
1682                 }
1683         } else if (lcladv & ADVERTISE_1000XPSE_ASYM) {
1684                 if ((rmtadv & LPA_1000XPAUSE) && (rmtadv & LPA_1000XPAUSE_ASYM))
1685                         cap = TG3_FLOW_CTRL_TX;
1686         }
1687
1688         return cap;
1689 }
1690
1691 static void tg3_setup_flow_control(struct tg3 *tp, u32 local_adv, u32 remote_adv)
1692 {
1693         u8 new_tg3_flags = 0;
1694         u32 old_rx_mode = tp->rx_mode;
1695         u32 old_tx_mode = tp->tx_mode;
1696
1697         if (tp->tg3_flags & TG3_FLAG_PAUSE_AUTONEG) {
1698                 if (tp->tg3_flags2 & TG3_FLG2_ANY_SERDES)
1699                         new_tg3_flags = tg3_resolve_flowctrl_1000X(local_adv,
1700                                                                    remote_adv);
1701                 else
1702                         new_tg3_flags = tg3_resolve_flowctrl_1000T(local_adv,
1703                                                                    remote_adv);
1704         } else {
1705                 new_tg3_flags = tp->link_config.flowctrl;
1706         }
1707
1708         tp->link_config.active_flowctrl = new_tg3_flags;
1709
1710         if (new_tg3_flags & TG3_FLOW_CTRL_RX)
1711                 tp->rx_mode |= RX_MODE_FLOW_CTRL_ENABLE;
1712         else
1713                 tp->rx_mode &= ~RX_MODE_FLOW_CTRL_ENABLE;
1714
1715         if (old_rx_mode != tp->rx_mode) {
1716                 tw32_f(MAC_RX_MODE, tp->rx_mode);
1717         }
1718
1719         if (new_tg3_flags & TG3_FLOW_CTRL_TX)
1720                 tp->tx_mode |= TX_MODE_FLOW_CTRL_ENABLE;
1721         else
1722                 tp->tx_mode &= ~TX_MODE_FLOW_CTRL_ENABLE;
1723
1724         if (old_tx_mode != tp->tx_mode) {
1725                 tw32_f(MAC_TX_MODE, tp->tx_mode);
1726         }
1727 }
1728
1729 static void tg3_aux_stat_to_speed_duplex(struct tg3 *tp, u32 val, u16 *speed, u8 *duplex)
1730 {
1731         switch (val & MII_TG3_AUX_STAT_SPDMASK) {
1732         case MII_TG3_AUX_STAT_10HALF:
1733                 *speed = SPEED_10;
1734                 *duplex = DUPLEX_HALF;
1735                 break;
1736
1737         case MII_TG3_AUX_STAT_10FULL:
1738                 *speed = SPEED_10;
1739                 *duplex = DUPLEX_FULL;
1740                 break;
1741
1742         case MII_TG3_AUX_STAT_100HALF:
1743                 *speed = SPEED_100;
1744                 *duplex = DUPLEX_HALF;
1745                 break;
1746
1747         case MII_TG3_AUX_STAT_100FULL:
1748                 *speed = SPEED_100;
1749                 *duplex = DUPLEX_FULL;
1750                 break;
1751
1752         case MII_TG3_AUX_STAT_1000HALF:
1753                 *speed = SPEED_1000;
1754                 *duplex = DUPLEX_HALF;
1755                 break;
1756
1757         case MII_TG3_AUX_STAT_1000FULL:
1758                 *speed = SPEED_1000;
1759                 *duplex = DUPLEX_FULL;
1760                 break;
1761
1762         default:
1763                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
1764                         *speed = (val & MII_TG3_AUX_STAT_100) ? SPEED_100 :
1765                                  SPEED_10;
1766                         *duplex = (val & MII_TG3_AUX_STAT_FULL) ? DUPLEX_FULL :
1767                                   DUPLEX_HALF;
1768                         break;
1769                 }
1770                 *speed = SPEED_INVALID;
1771                 *duplex = DUPLEX_INVALID;
1772                 break;
1773         };
1774 }
1775
1776 static void tg3_phy_copper_begin(struct tg3 *tp)
1777 {
1778         u32 new_adv;
1779         int i;
1780
1781         if (tp->link_config.phy_is_low_power) {
1782                 /* Entering low power mode.  Disable gigabit and
1783                  * 100baseT advertisements.
1784                  */
1785                 tg3_writephy(tp, MII_TG3_CTRL, 0);
1786
1787                 new_adv = (ADVERTISE_10HALF | ADVERTISE_10FULL |
1788                            ADVERTISE_CSMA | ADVERTISE_PAUSE_CAP);
1789                 if (tp->tg3_flags & TG3_FLAG_WOL_SPEED_100MB)
1790                         new_adv |= (ADVERTISE_100HALF | ADVERTISE_100FULL);
1791
1792                 tg3_writephy(tp, MII_ADVERTISE, new_adv);
1793         } else if (tp->link_config.speed == SPEED_INVALID) {
1794                 if (tp->tg3_flags & TG3_FLAG_10_100_ONLY)
1795                         tp->link_config.advertising &=
1796                                 ~(ADVERTISED_1000baseT_Half |
1797                                   ADVERTISED_1000baseT_Full);
1798
1799                 new_adv = ADVERTISE_CSMA;
1800                 if (tp->link_config.advertising & ADVERTISED_10baseT_Half)
1801                         new_adv |= ADVERTISE_10HALF;
1802                 if (tp->link_config.advertising & ADVERTISED_10baseT_Full)
1803                         new_adv |= ADVERTISE_10FULL;
1804                 if (tp->link_config.advertising & ADVERTISED_100baseT_Half)
1805                         new_adv |= ADVERTISE_100HALF;
1806                 if (tp->link_config.advertising & ADVERTISED_100baseT_Full)
1807                         new_adv |= ADVERTISE_100FULL;
1808
1809                 new_adv |= tg3_advert_flowctrl_1000T(tp->link_config.flowctrl);
1810
1811                 tg3_writephy(tp, MII_ADVERTISE, new_adv);
1812
1813                 if (tp->link_config.advertising &
1814                     (ADVERTISED_1000baseT_Half | ADVERTISED_1000baseT_Full)) {
1815                         new_adv = 0;
1816                         if (tp->link_config.advertising & ADVERTISED_1000baseT_Half)
1817                                 new_adv |= MII_TG3_CTRL_ADV_1000_HALF;
1818                         if (tp->link_config.advertising & ADVERTISED_1000baseT_Full)
1819                                 new_adv |= MII_TG3_CTRL_ADV_1000_FULL;
1820                         if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY) &&
1821                             (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
1822                              tp->pci_chip_rev_id == CHIPREV_ID_5701_B0))
1823                                 new_adv |= (MII_TG3_CTRL_AS_MASTER |
1824                                             MII_TG3_CTRL_ENABLE_AS_MASTER);
1825                         tg3_writephy(tp, MII_TG3_CTRL, new_adv);
1826                 } else {
1827                         tg3_writephy(tp, MII_TG3_CTRL, 0);
1828                 }
1829         } else {
1830                 new_adv = tg3_advert_flowctrl_1000T(tp->link_config.flowctrl);
1831                 new_adv |= ADVERTISE_CSMA;
1832
1833                 /* Asking for a specific link mode. */
1834                 if (tp->link_config.speed == SPEED_1000) {
1835                         tg3_writephy(tp, MII_ADVERTISE, new_adv);
1836
1837                         if (tp->link_config.duplex == DUPLEX_FULL)
1838                                 new_adv = MII_TG3_CTRL_ADV_1000_FULL;
1839                         else
1840                                 new_adv = MII_TG3_CTRL_ADV_1000_HALF;
1841                         if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
1842                             tp->pci_chip_rev_id == CHIPREV_ID_5701_B0)
1843                                 new_adv |= (MII_TG3_CTRL_AS_MASTER |
1844                                             MII_TG3_CTRL_ENABLE_AS_MASTER);
1845                 } else {
1846                         if (tp->link_config.speed == SPEED_100) {
1847                                 if (tp->link_config.duplex == DUPLEX_FULL)
1848                                         new_adv |= ADVERTISE_100FULL;
1849                                 else
1850                                         new_adv |= ADVERTISE_100HALF;
1851                         } else {
1852                                 if (tp->link_config.duplex == DUPLEX_FULL)
1853                                         new_adv |= ADVERTISE_10FULL;
1854                                 else
1855                                         new_adv |= ADVERTISE_10HALF;
1856                         }
1857                         tg3_writephy(tp, MII_ADVERTISE, new_adv);
1858
1859                         new_adv = 0;
1860                 }
1861
1862                 tg3_writephy(tp, MII_TG3_CTRL, new_adv);
1863         }
1864
1865         if (tp->link_config.autoneg == AUTONEG_DISABLE &&
1866             tp->link_config.speed != SPEED_INVALID) {
1867                 u32 bmcr, orig_bmcr;
1868
1869                 tp->link_config.active_speed = tp->link_config.speed;
1870                 tp->link_config.active_duplex = tp->link_config.duplex;
1871
1872                 bmcr = 0;
1873                 switch (tp->link_config.speed) {
1874                 default:
1875                 case SPEED_10:
1876                         break;
1877
1878                 case SPEED_100:
1879                         bmcr |= BMCR_SPEED100;
1880                         break;
1881
1882                 case SPEED_1000:
1883                         bmcr |= TG3_BMCR_SPEED1000;
1884                         break;
1885                 };
1886
1887                 if (tp->link_config.duplex == DUPLEX_FULL)
1888                         bmcr |= BMCR_FULLDPLX;
1889
1890                 if (!tg3_readphy(tp, MII_BMCR, &orig_bmcr) &&
1891                     (bmcr != orig_bmcr)) {
1892                         tg3_writephy(tp, MII_BMCR, BMCR_LOOPBACK);
1893                         for (i = 0; i < 1500; i++) {
1894                                 u32 tmp;
1895
1896                                 udelay(10);
1897                                 if (tg3_readphy(tp, MII_BMSR, &tmp) ||
1898                                     tg3_readphy(tp, MII_BMSR, &tmp))
1899                                         continue;
1900                                 if (!(tmp & BMSR_LSTATUS)) {
1901                                         udelay(40);
1902                                         break;
1903                                 }
1904                         }
1905                         tg3_writephy(tp, MII_BMCR, bmcr);
1906                         udelay(40);
1907                 }
1908         } else {
1909                 tg3_writephy(tp, MII_BMCR,
1910                              BMCR_ANENABLE | BMCR_ANRESTART);
1911         }
1912 }
1913
1914 static int tg3_init_5401phy_dsp(struct tg3 *tp)
1915 {
1916         int err;
1917
1918         /* Turn off tap power management. */
1919         /* Set Extended packet length bit */
1920         err  = tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x4c20);
1921
1922         err |= tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x0012);
1923         err |= tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x1804);
1924
1925         err |= tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x0013);
1926         err |= tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x1204);
1927
1928         err |= tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8006);
1929         err |= tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0132);
1930
1931         err |= tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8006);
1932         err |= tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0232);
1933
1934         err |= tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x201f);
1935         err |= tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0a20);
1936
1937         udelay(40);
1938
1939         return err;
1940 }
1941
1942 static int tg3_copper_is_advertising_all(struct tg3 *tp, u32 mask)
1943 {
1944         u32 adv_reg, all_mask = 0;
1945
1946         if (mask & ADVERTISED_10baseT_Half)
1947                 all_mask |= ADVERTISE_10HALF;
1948         if (mask & ADVERTISED_10baseT_Full)
1949                 all_mask |= ADVERTISE_10FULL;
1950         if (mask & ADVERTISED_100baseT_Half)
1951                 all_mask |= ADVERTISE_100HALF;
1952         if (mask & ADVERTISED_100baseT_Full)
1953                 all_mask |= ADVERTISE_100FULL;
1954
1955         if (tg3_readphy(tp, MII_ADVERTISE, &adv_reg))
1956                 return 0;
1957
1958         if ((adv_reg & all_mask) != all_mask)
1959                 return 0;
1960         if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY)) {
1961                 u32 tg3_ctrl;
1962
1963                 all_mask = 0;
1964                 if (mask & ADVERTISED_1000baseT_Half)
1965                         all_mask |= ADVERTISE_1000HALF;
1966                 if (mask & ADVERTISED_1000baseT_Full)
1967                         all_mask |= ADVERTISE_1000FULL;
1968
1969                 if (tg3_readphy(tp, MII_TG3_CTRL, &tg3_ctrl))
1970                         return 0;
1971
1972                 if ((tg3_ctrl & all_mask) != all_mask)
1973                         return 0;
1974         }
1975         return 1;
1976 }
1977
1978 static int tg3_setup_copper_phy(struct tg3 *tp, int force_reset)
1979 {
1980         int current_link_up;
1981         u32 bmsr, dummy;
1982         u16 current_speed;
1983         u8 current_duplex;
1984         int i, err;
1985
1986         tw32(MAC_EVENT, 0);
1987
1988         tw32_f(MAC_STATUS,
1989              (MAC_STATUS_SYNC_CHANGED |
1990               MAC_STATUS_CFG_CHANGED |
1991               MAC_STATUS_MI_COMPLETION |
1992               MAC_STATUS_LNKSTATE_CHANGED));
1993         udelay(40);
1994
1995         tp->mi_mode = MAC_MI_MODE_BASE;
1996         tw32_f(MAC_MI_MODE, tp->mi_mode);
1997         udelay(80);
1998
1999         tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x02);
2000
2001         /* Some third-party PHYs need to be reset on link going
2002          * down.
2003          */
2004         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
2005              GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
2006              GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) &&
2007             netif_carrier_ok(tp->dev)) {
2008                 tg3_readphy(tp, MII_BMSR, &bmsr);
2009                 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
2010                     !(bmsr & BMSR_LSTATUS))
2011                         force_reset = 1;
2012         }
2013         if (force_reset)
2014                 tg3_phy_reset(tp);
2015
2016         if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5401) {
2017                 tg3_readphy(tp, MII_BMSR, &bmsr);
2018                 if (tg3_readphy(tp, MII_BMSR, &bmsr) ||
2019                     !(tp->tg3_flags & TG3_FLAG_INIT_COMPLETE))
2020                         bmsr = 0;
2021
2022                 if (!(bmsr & BMSR_LSTATUS)) {
2023                         err = tg3_init_5401phy_dsp(tp);
2024                         if (err)
2025                                 return err;
2026
2027                         tg3_readphy(tp, MII_BMSR, &bmsr);
2028                         for (i = 0; i < 1000; i++) {
2029                                 udelay(10);
2030                                 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
2031                                     (bmsr & BMSR_LSTATUS)) {
2032                                         udelay(40);
2033                                         break;
2034                                 }
2035                         }
2036
2037                         if ((tp->phy_id & PHY_ID_REV_MASK) == PHY_REV_BCM5401_B0 &&
2038                             !(bmsr & BMSR_LSTATUS) &&
2039                             tp->link_config.active_speed == SPEED_1000) {
2040                                 err = tg3_phy_reset(tp);
2041                                 if (!err)
2042                                         err = tg3_init_5401phy_dsp(tp);
2043                                 if (err)
2044                                         return err;
2045                         }
2046                 }
2047         } else if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
2048                    tp->pci_chip_rev_id == CHIPREV_ID_5701_B0) {
2049                 /* 5701 {A0,B0} CRC bug workaround */
2050                 tg3_writephy(tp, 0x15, 0x0a75);
2051                 tg3_writephy(tp, 0x1c, 0x8c68);
2052                 tg3_writephy(tp, 0x1c, 0x8d68);
2053                 tg3_writephy(tp, 0x1c, 0x8c68);
2054         }
2055
2056         /* Clear pending interrupts... */
2057         tg3_readphy(tp, MII_TG3_ISTAT, &dummy);
2058         tg3_readphy(tp, MII_TG3_ISTAT, &dummy);
2059
2060         if (tp->tg3_flags & TG3_FLAG_USE_MI_INTERRUPT)
2061                 tg3_writephy(tp, MII_TG3_IMASK, ~MII_TG3_INT_LINKCHG);
2062         else if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5906)
2063                 tg3_writephy(tp, MII_TG3_IMASK, ~0);
2064
2065         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
2066             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
2067                 if (tp->led_ctrl == LED_CTRL_MODE_PHY_1)
2068                         tg3_writephy(tp, MII_TG3_EXT_CTRL,
2069                                      MII_TG3_EXT_CTRL_LNK3_LED_MODE);
2070                 else
2071                         tg3_writephy(tp, MII_TG3_EXT_CTRL, 0);
2072         }
2073
2074         current_link_up = 0;
2075         current_speed = SPEED_INVALID;
2076         current_duplex = DUPLEX_INVALID;
2077
2078         if (tp->tg3_flags2 & TG3_FLG2_CAPACITIVE_COUPLING) {
2079                 u32 val;
2080
2081                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x4007);
2082                 tg3_readphy(tp, MII_TG3_AUX_CTRL, &val);
2083                 if (!(val & (1 << 10))) {
2084                         val |= (1 << 10);
2085                         tg3_writephy(tp, MII_TG3_AUX_CTRL, val);
2086                         goto relink;
2087                 }
2088         }
2089
2090         bmsr = 0;
2091         for (i = 0; i < 100; i++) {
2092                 tg3_readphy(tp, MII_BMSR, &bmsr);
2093                 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
2094                     (bmsr & BMSR_LSTATUS))
2095                         break;
2096                 udelay(40);
2097         }
2098
2099         if (bmsr & BMSR_LSTATUS) {
2100                 u32 aux_stat, bmcr;
2101
2102                 tg3_readphy(tp, MII_TG3_AUX_STAT, &aux_stat);
2103                 for (i = 0; i < 2000; i++) {
2104                         udelay(10);
2105                         if (!tg3_readphy(tp, MII_TG3_AUX_STAT, &aux_stat) &&
2106                             aux_stat)
2107                                 break;
2108                 }
2109
2110                 tg3_aux_stat_to_speed_duplex(tp, aux_stat,
2111                                              &current_speed,
2112                                              &current_duplex);
2113
2114                 bmcr = 0;
2115                 for (i = 0; i < 200; i++) {
2116                         tg3_readphy(tp, MII_BMCR, &bmcr);
2117                         if (tg3_readphy(tp, MII_BMCR, &bmcr))
2118                                 continue;
2119                         if (bmcr && bmcr != 0x7fff)
2120                                 break;
2121                         udelay(10);
2122                 }
2123
2124                 if (tp->link_config.autoneg == AUTONEG_ENABLE) {
2125                         if (bmcr & BMCR_ANENABLE) {
2126                                 current_link_up = 1;
2127
2128                                 /* Force autoneg restart if we are exiting
2129                                  * low power mode.
2130                                  */
2131                                 if (!tg3_copper_is_advertising_all(tp,
2132                                                 tp->link_config.advertising))
2133                                         current_link_up = 0;
2134                         } else {
2135                                 current_link_up = 0;
2136                         }
2137                 } else {
2138                         if (!(bmcr & BMCR_ANENABLE) &&
2139                             tp->link_config.speed == current_speed &&
2140                             tp->link_config.duplex == current_duplex) {
2141                                 current_link_up = 1;
2142                         } else {
2143                                 current_link_up = 0;
2144                         }
2145                 }
2146
2147                 tp->link_config.active_speed = current_speed;
2148                 tp->link_config.active_duplex = current_duplex;
2149         }
2150
2151         if (current_link_up == 1 &&
2152             (tp->link_config.active_duplex == DUPLEX_FULL) &&
2153             (tp->link_config.autoneg == AUTONEG_ENABLE)) {
2154                 u32 local_adv, remote_adv;
2155
2156                 if (tg3_readphy(tp, MII_ADVERTISE, &local_adv))
2157                         local_adv = 0;
2158
2159                 if (tg3_readphy(tp, MII_LPA, &remote_adv))
2160                         remote_adv = 0;
2161
2162                 /* If we are not advertising what has been requested,
2163                  * bring the link down and reconfigure.
2164                  */
2165                 if (local_adv !=
2166                     tg3_advert_flowctrl_1000T(tp->link_config.flowctrl)) {
2167                         current_link_up = 0;
2168                 } else {
2169                         tg3_setup_flow_control(tp, local_adv, remote_adv);
2170                 }
2171         }
2172 relink:
2173         if (current_link_up == 0 || tp->link_config.phy_is_low_power) {
2174                 u32 tmp;
2175
2176                 tg3_phy_copper_begin(tp);
2177
2178                 tg3_readphy(tp, MII_BMSR, &tmp);
2179                 if (!tg3_readphy(tp, MII_BMSR, &tmp) &&
2180                     (tmp & BMSR_LSTATUS))
2181                         current_link_up = 1;
2182         }
2183
2184         tp->mac_mode &= ~MAC_MODE_PORT_MODE_MASK;
2185         if (current_link_up == 1) {
2186                 if (tp->link_config.active_speed == SPEED_100 ||
2187                     tp->link_config.active_speed == SPEED_10)
2188                         tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
2189                 else
2190                         tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
2191         } else
2192                 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
2193
2194         tp->mac_mode &= ~MAC_MODE_HALF_DUPLEX;
2195         if (tp->link_config.active_duplex == DUPLEX_HALF)
2196                 tp->mac_mode |= MAC_MODE_HALF_DUPLEX;
2197
2198         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700) {
2199                 if (current_link_up == 1 &&
2200                     tg3_5700_link_polarity(tp, tp->link_config.active_speed))
2201                         tp->mac_mode |= MAC_MODE_LINK_POLARITY;
2202                 else
2203                         tp->mac_mode &= ~MAC_MODE_LINK_POLARITY;
2204         }
2205
2206         /* ??? Without this setting Netgear GA302T PHY does not
2207          * ??? send/receive packets...
2208          */
2209         if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5411 &&
2210             tp->pci_chip_rev_id == CHIPREV_ID_5700_ALTIMA) {
2211                 tp->mi_mode |= MAC_MI_MODE_AUTO_POLL;
2212                 tw32_f(MAC_MI_MODE, tp->mi_mode);
2213                 udelay(80);
2214         }
2215
2216         tw32_f(MAC_MODE, tp->mac_mode);
2217         udelay(40);
2218
2219         if (tp->tg3_flags & TG3_FLAG_USE_LINKCHG_REG) {
2220                 /* Polled via timer. */
2221                 tw32_f(MAC_EVENT, 0);
2222         } else {
2223                 tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
2224         }
2225         udelay(40);
2226
2227         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 &&
2228             current_link_up == 1 &&
2229             tp->link_config.active_speed == SPEED_1000 &&
2230             ((tp->tg3_flags & TG3_FLAG_PCIX_MODE) ||
2231              (tp->tg3_flags & TG3_FLAG_PCI_HIGH_SPEED))) {
2232                 udelay(120);
2233                 tw32_f(MAC_STATUS,
2234                      (MAC_STATUS_SYNC_CHANGED |
2235                       MAC_STATUS_CFG_CHANGED));
2236                 udelay(40);
2237                 tg3_write_mem(tp,
2238                               NIC_SRAM_FIRMWARE_MBOX,
2239                               NIC_SRAM_FIRMWARE_MBOX_MAGIC2);
2240         }
2241
2242         if (current_link_up != netif_carrier_ok(tp->dev)) {
2243                 if (current_link_up)
2244                         netif_carrier_on(tp->dev);
2245                 else
2246                         netif_carrier_off(tp->dev);
2247                 tg3_link_report(tp);
2248         }
2249
2250         return 0;
2251 }
2252
2253 struct tg3_fiber_aneginfo {
2254         int state;
2255 #define ANEG_STATE_UNKNOWN              0
2256 #define ANEG_STATE_AN_ENABLE            1
2257 #define ANEG_STATE_RESTART_INIT         2
2258 #define ANEG_STATE_RESTART              3
2259 #define ANEG_STATE_DISABLE_LINK_OK      4
2260 #define ANEG_STATE_ABILITY_DETECT_INIT  5
2261 #define ANEG_STATE_ABILITY_DETECT       6
2262 #define ANEG_STATE_ACK_DETECT_INIT      7
2263 #define ANEG_STATE_ACK_DETECT           8
2264 #define ANEG_STATE_COMPLETE_ACK_INIT    9
2265 #define ANEG_STATE_COMPLETE_ACK         10
2266 #define ANEG_STATE_IDLE_DETECT_INIT     11
2267 #define ANEG_STATE_IDLE_DETECT          12
2268 #define ANEG_STATE_LINK_OK              13
2269 #define ANEG_STATE_NEXT_PAGE_WAIT_INIT  14
2270 #define ANEG_STATE_NEXT_PAGE_WAIT       15
2271
2272         u32 flags;
2273 #define MR_AN_ENABLE            0x00000001
2274 #define MR_RESTART_AN           0x00000002
2275 #define MR_AN_COMPLETE          0x00000004
2276 #define MR_PAGE_RX              0x00000008
2277 #define MR_NP_LOADED            0x00000010
2278 #define MR_TOGGLE_TX            0x00000020
2279 #define MR_LP_ADV_FULL_DUPLEX   0x00000040
2280 #define MR_LP_ADV_HALF_DUPLEX   0x00000080
2281 #define MR_LP_ADV_SYM_PAUSE     0x00000100
2282 #define MR_LP_ADV_ASYM_PAUSE    0x00000200
2283 #define MR_LP_ADV_REMOTE_FAULT1 0x00000400
2284 #define MR_LP_ADV_REMOTE_FAULT2 0x00000800
2285 #define MR_LP_ADV_NEXT_PAGE     0x00001000
2286 #define MR_TOGGLE_RX            0x00002000
2287 #define MR_NP_RX                0x00004000
2288
2289 #define MR_LINK_OK              0x80000000
2290
2291         unsigned long link_time, cur_time;
2292
2293         u32 ability_match_cfg;
2294         int ability_match_count;
2295
2296         char ability_match, idle_match, ack_match;
2297
2298         u32 txconfig, rxconfig;
2299 #define ANEG_CFG_NP             0x00000080
2300 #define ANEG_CFG_ACK            0x00000040
2301 #define ANEG_CFG_RF2            0x00000020
2302 #define ANEG_CFG_RF1            0x00000010
2303 #define ANEG_CFG_PS2            0x00000001
2304 #define ANEG_CFG_PS1            0x00008000
2305 #define ANEG_CFG_HD             0x00004000
2306 #define ANEG_CFG_FD             0x00002000
2307 #define ANEG_CFG_INVAL          0x00001f06
2308
2309 };
2310 #define ANEG_OK         0
2311 #define ANEG_DONE       1
2312 #define ANEG_TIMER_ENAB 2
2313 #define ANEG_FAILED     -1
2314
2315 #define ANEG_STATE_SETTLE_TIME  10000
2316
2317 static int tg3_fiber_aneg_smachine(struct tg3 *tp,
2318                                    struct tg3_fiber_aneginfo *ap)
2319 {
2320         u16 flowctrl;
2321         unsigned long delta;
2322         u32 rx_cfg_reg;
2323         int ret;
2324
2325         if (ap->state == ANEG_STATE_UNKNOWN) {
2326                 ap->rxconfig = 0;
2327                 ap->link_time = 0;
2328                 ap->cur_time = 0;
2329                 ap->ability_match_cfg = 0;
2330                 ap->ability_match_count = 0;
2331                 ap->ability_match = 0;
2332                 ap->idle_match = 0;
2333                 ap->ack_match = 0;
2334         }
2335         ap->cur_time++;
2336
2337         if (tr32(MAC_STATUS) & MAC_STATUS_RCVD_CFG) {
2338                 rx_cfg_reg = tr32(MAC_RX_AUTO_NEG);
2339
2340                 if (rx_cfg_reg != ap->ability_match_cfg) {
2341                         ap->ability_match_cfg = rx_cfg_reg;
2342                         ap->ability_match = 0;
2343                         ap->ability_match_count = 0;
2344                 } else {
2345                         if (++ap->ability_match_count > 1) {
2346                                 ap->ability_match = 1;
2347                                 ap->ability_match_cfg = rx_cfg_reg;
2348                         }
2349                 }
2350                 if (rx_cfg_reg & ANEG_CFG_ACK)
2351                         ap->ack_match = 1;
2352                 else
2353                         ap->ack_match = 0;
2354
2355                 ap->idle_match = 0;
2356         } else {
2357                 ap->idle_match = 1;
2358                 ap->ability_match_cfg = 0;
2359                 ap->ability_match_count = 0;
2360                 ap->ability_match = 0;
2361                 ap->ack_match = 0;
2362
2363                 rx_cfg_reg = 0;
2364         }
2365
2366         ap->rxconfig = rx_cfg_reg;
2367         ret = ANEG_OK;
2368
2369         switch(ap->state) {
2370         case ANEG_STATE_UNKNOWN:
2371                 if (ap->flags & (MR_AN_ENABLE | MR_RESTART_AN))
2372                         ap->state = ANEG_STATE_AN_ENABLE;
2373
2374                 /* fallthru */
2375         case ANEG_STATE_AN_ENABLE:
2376                 ap->flags &= ~(MR_AN_COMPLETE | MR_PAGE_RX);
2377                 if (ap->flags & MR_AN_ENABLE) {
2378                         ap->link_time = 0;
2379                         ap->cur_time = 0;
2380                         ap->ability_match_cfg = 0;
2381                         ap->ability_match_count = 0;
2382                         ap->ability_match = 0;
2383                         ap->idle_match = 0;
2384                         ap->ack_match = 0;
2385
2386                         ap->state = ANEG_STATE_RESTART_INIT;
2387                 } else {
2388                         ap->state = ANEG_STATE_DISABLE_LINK_OK;
2389                 }
2390                 break;
2391
2392         case ANEG_STATE_RESTART_INIT:
2393                 ap->link_time = ap->cur_time;
2394                 ap->flags &= ~(MR_NP_LOADED);
2395                 ap->txconfig = 0;
2396                 tw32(MAC_TX_AUTO_NEG, 0);
2397                 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
2398                 tw32_f(MAC_MODE, tp->mac_mode);
2399                 udelay(40);
2400
2401                 ret = ANEG_TIMER_ENAB;
2402                 ap->state = ANEG_STATE_RESTART;
2403
2404                 /* fallthru */
2405         case ANEG_STATE_RESTART:
2406                 delta = ap->cur_time - ap->link_time;
2407                 if (delta > ANEG_STATE_SETTLE_TIME) {
2408                         ap->state = ANEG_STATE_ABILITY_DETECT_INIT;
2409                 } else {
2410                         ret = ANEG_TIMER_ENAB;
2411                 }
2412                 break;
2413
2414         case ANEG_STATE_DISABLE_LINK_OK:
2415                 ret = ANEG_DONE;
2416                 break;
2417
2418         case ANEG_STATE_ABILITY_DETECT_INIT:
2419                 ap->flags &= ~(MR_TOGGLE_TX);
2420                 ap->txconfig = ANEG_CFG_FD;
2421                 flowctrl = tg3_advert_flowctrl_1000X(tp->link_config.flowctrl);
2422                 if (flowctrl & ADVERTISE_1000XPAUSE)
2423                         ap->txconfig |= ANEG_CFG_PS1;
2424                 if (flowctrl & ADVERTISE_1000XPSE_ASYM)
2425                         ap->txconfig |= ANEG_CFG_PS2;
2426                 tw32(MAC_TX_AUTO_NEG, ap->txconfig);
2427                 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
2428                 tw32_f(MAC_MODE, tp->mac_mode);
2429                 udelay(40);
2430
2431                 ap->state = ANEG_STATE_ABILITY_DETECT;
2432                 break;
2433
2434         case ANEG_STATE_ABILITY_DETECT:
2435                 if (ap->ability_match != 0 && ap->rxconfig != 0) {
2436                         ap->state = ANEG_STATE_ACK_DETECT_INIT;
2437                 }
2438                 break;
2439
2440         case ANEG_STATE_ACK_DETECT_INIT:
2441                 ap->txconfig |= ANEG_CFG_ACK;
2442                 tw32(MAC_TX_AUTO_NEG, ap->txconfig);
2443                 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
2444                 tw32_f(MAC_MODE, tp->mac_mode);
2445                 udelay(40);
2446
2447                 ap->state = ANEG_STATE_ACK_DETECT;
2448
2449                 /* fallthru */
2450         case ANEG_STATE_ACK_DETECT:
2451                 if (ap->ack_match != 0) {
2452                         if ((ap->rxconfig & ~ANEG_CFG_ACK) ==
2453                             (ap->ability_match_cfg & ~ANEG_CFG_ACK)) {
2454                                 ap->state = ANEG_STATE_COMPLETE_ACK_INIT;
2455                         } else {
2456                                 ap->state = ANEG_STATE_AN_ENABLE;
2457                         }
2458                 } else if (ap->ability_match != 0 &&
2459                            ap->rxconfig == 0) {
2460                         ap->state = ANEG_STATE_AN_ENABLE;
2461                 }
2462                 break;
2463
2464         case ANEG_STATE_COMPLETE_ACK_INIT:
2465                 if (ap->rxconfig & ANEG_CFG_INVAL) {
2466                         ret = ANEG_FAILED;
2467                         break;
2468                 }
2469                 ap->flags &= ~(MR_LP_ADV_FULL_DUPLEX |
2470                                MR_LP_ADV_HALF_DUPLEX |
2471                                MR_LP_ADV_SYM_PAUSE |
2472                                MR_LP_ADV_ASYM_PAUSE |
2473                                MR_LP_ADV_REMOTE_FAULT1 |
2474                                MR_LP_ADV_REMOTE_FAULT2 |
2475                                MR_LP_ADV_NEXT_PAGE |
2476                                MR_TOGGLE_RX |
2477                                MR_NP_RX);
2478                 if (ap->rxconfig & ANEG_CFG_FD)
2479                         ap->flags |= MR_LP_ADV_FULL_DUPLEX;
2480                 if (ap->rxconfig & ANEG_CFG_HD)
2481                         ap->flags |= MR_LP_ADV_HALF_DUPLEX;
2482                 if (ap->rxconfig & ANEG_CFG_PS1)
2483                         ap->flags |= MR_LP_ADV_SYM_PAUSE;
2484                 if (ap->rxconfig & ANEG_CFG_PS2)
2485                         ap->flags |= MR_LP_ADV_ASYM_PAUSE;
2486                 if (ap->rxconfig & ANEG_CFG_RF1)
2487                         ap->flags |= MR_LP_ADV_REMOTE_FAULT1;
2488                 if (ap->rxconfig & ANEG_CFG_RF2)
2489                         ap->flags |= MR_LP_ADV_REMOTE_FAULT2;
2490                 if (ap->rxconfig & ANEG_CFG_NP)
2491                         ap->flags |= MR_LP_ADV_NEXT_PAGE;
2492
2493                 ap->link_time = ap->cur_time;
2494
2495                 ap->flags ^= (MR_TOGGLE_TX);
2496                 if (ap->rxconfig & 0x0008)
2497                         ap->flags |= MR_TOGGLE_RX;
2498                 if (ap->rxconfig & ANEG_CFG_NP)
2499                         ap->flags |= MR_NP_RX;
2500                 ap->flags |= MR_PAGE_RX;
2501
2502                 ap->state = ANEG_STATE_COMPLETE_ACK;
2503                 ret = ANEG_TIMER_ENAB;
2504                 break;
2505
2506         case ANEG_STATE_COMPLETE_ACK:
2507                 if (ap->ability_match != 0 &&
2508                     ap->rxconfig == 0) {
2509                         ap->state = ANEG_STATE_AN_ENABLE;
2510                         break;
2511                 }
2512                 delta = ap->cur_time - ap->link_time;
2513                 if (delta > ANEG_STATE_SETTLE_TIME) {
2514                         if (!(ap->flags & (MR_LP_ADV_NEXT_PAGE))) {
2515                                 ap->state = ANEG_STATE_IDLE_DETECT_INIT;
2516                         } else {
2517                                 if ((ap->txconfig & ANEG_CFG_NP) == 0 &&
2518                                     !(ap->flags & MR_NP_RX)) {
2519                                         ap->state = ANEG_STATE_IDLE_DETECT_INIT;
2520                                 } else {
2521                                         ret = ANEG_FAILED;
2522                                 }
2523                         }
2524                 }
2525                 break;
2526
2527         case ANEG_STATE_IDLE_DETECT_INIT:
2528                 ap->link_time = ap->cur_time;
2529                 tp->mac_mode &= ~MAC_MODE_SEND_CONFIGS;
2530                 tw32_f(MAC_MODE, tp->mac_mode);
2531                 udelay(40);
2532
2533                 ap->state = ANEG_STATE_IDLE_DETECT;
2534                 ret = ANEG_TIMER_ENAB;
2535                 break;
2536
2537         case ANEG_STATE_IDLE_DETECT:
2538                 if (ap->ability_match != 0 &&
2539                     ap->rxconfig == 0) {
2540                         ap->state = ANEG_STATE_AN_ENABLE;
2541                         break;
2542                 }
2543                 delta = ap->cur_time - ap->link_time;
2544                 if (delta > ANEG_STATE_SETTLE_TIME) {
2545                         /* XXX another gem from the Broadcom driver :( */
2546                         ap->state = ANEG_STATE_LINK_OK;
2547                 }
2548                 break;
2549
2550         case ANEG_STATE_LINK_OK:
2551                 ap->flags |= (MR_AN_COMPLETE | MR_LINK_OK);
2552                 ret = ANEG_DONE;
2553                 break;
2554
2555         case ANEG_STATE_NEXT_PAGE_WAIT_INIT:
2556                 /* ??? unimplemented */
2557                 break;
2558
2559         case ANEG_STATE_NEXT_PAGE_WAIT:
2560                 /* ??? unimplemented */
2561                 break;
2562
2563         default:
2564                 ret = ANEG_FAILED;
2565                 break;
2566         };
2567
2568         return ret;
2569 }
2570
2571 static int fiber_autoneg(struct tg3 *tp, u32 *txflags, u32 *rxflags)
2572 {
2573         int res = 0;
2574         struct tg3_fiber_aneginfo aninfo;
2575         int status = ANEG_FAILED;
2576         unsigned int tick;
2577         u32 tmp;
2578
2579         tw32_f(MAC_TX_AUTO_NEG, 0);
2580
2581         tmp = tp->mac_mode & ~MAC_MODE_PORT_MODE_MASK;
2582         tw32_f(MAC_MODE, tmp | MAC_MODE_PORT_MODE_GMII);
2583         udelay(40);
2584
2585         tw32_f(MAC_MODE, tp->mac_mode | MAC_MODE_SEND_CONFIGS);
2586         udelay(40);
2587
2588         memset(&aninfo, 0, sizeof(aninfo));
2589         aninfo.flags |= MR_AN_ENABLE;
2590         aninfo.state = ANEG_STATE_UNKNOWN;
2591         aninfo.cur_time = 0;
2592         tick = 0;
2593         while (++tick < 195000) {
2594                 status = tg3_fiber_aneg_smachine(tp, &aninfo);
2595                 if (status == ANEG_DONE || status == ANEG_FAILED)
2596                         break;
2597
2598                 udelay(1);
2599         }
2600
2601         tp->mac_mode &= ~MAC_MODE_SEND_CONFIGS;
2602         tw32_f(MAC_MODE, tp->mac_mode);
2603         udelay(40);
2604
2605         *txflags = aninfo.txconfig;
2606         *rxflags = aninfo.flags;
2607
2608         if (status == ANEG_DONE &&
2609             (aninfo.flags & (MR_AN_COMPLETE | MR_LINK_OK |
2610                              MR_LP_ADV_FULL_DUPLEX)))
2611                 res = 1;
2612
2613         return res;
2614 }
2615
2616 static void tg3_init_bcm8002(struct tg3 *tp)
2617 {
2618         u32 mac_status = tr32(MAC_STATUS);
2619         int i;
2620
2621         /* Reset when initting first time or we have a link. */
2622         if ((tp->tg3_flags & TG3_FLAG_INIT_COMPLETE) &&
2623             !(mac_status & MAC_STATUS_PCS_SYNCED))
2624                 return;
2625
2626         /* Set PLL lock range. */
2627         tg3_writephy(tp, 0x16, 0x8007);
2628
2629         /* SW reset */
2630         tg3_writephy(tp, MII_BMCR, BMCR_RESET);
2631
2632         /* Wait for reset to complete. */
2633         /* XXX schedule_timeout() ... */
2634         for (i = 0; i < 500; i++)
2635                 udelay(10);
2636
2637         /* Config mode; select PMA/Ch 1 regs. */
2638         tg3_writephy(tp, 0x10, 0x8411);
2639
2640         /* Enable auto-lock and comdet, select txclk for tx. */
2641         tg3_writephy(tp, 0x11, 0x0a10);
2642
2643         tg3_writephy(tp, 0x18, 0x00a0);
2644         tg3_writephy(tp, 0x16, 0x41ff);
2645
2646         /* Assert and deassert POR. */
2647         tg3_writephy(tp, 0x13, 0x0400);
2648         udelay(40);
2649         tg3_writephy(tp, 0x13, 0x0000);
2650
2651         tg3_writephy(tp, 0x11, 0x0a50);
2652         udelay(40);
2653         tg3_writephy(tp, 0x11, 0x0a10);
2654
2655         /* Wait for signal to stabilize */
2656         /* XXX schedule_timeout() ... */
2657         for (i = 0; i < 15000; i++)
2658                 udelay(10);
2659
2660         /* Deselect the channel register so we can read the PHYID
2661          * later.
2662          */
2663         tg3_writephy(tp, 0x10, 0x8011);
2664 }
2665
2666 static int tg3_setup_fiber_hw_autoneg(struct tg3 *tp, u32 mac_status)
2667 {
2668         u16 flowctrl;
2669         u32 sg_dig_ctrl, sg_dig_status;
2670         u32 serdes_cfg, expected_sg_dig_ctrl;
2671         int workaround, port_a;
2672         int current_link_up;
2673
2674         serdes_cfg = 0;
2675         expected_sg_dig_ctrl = 0;
2676         workaround = 0;
2677         port_a = 1;
2678         current_link_up = 0;
2679
2680         if (tp->pci_chip_rev_id != CHIPREV_ID_5704_A0 &&
2681             tp->pci_chip_rev_id != CHIPREV_ID_5704_A1) {
2682                 workaround = 1;
2683                 if (tr32(TG3PCI_DUAL_MAC_CTRL) & DUAL_MAC_CTRL_ID)
2684                         port_a = 0;
2685
2686                 /* preserve bits 0-11,13,14 for signal pre-emphasis */
2687                 /* preserve bits 20-23 for voltage regulator */
2688                 serdes_cfg = tr32(MAC_SERDES_CFG) & 0x00f06fff;
2689         }
2690
2691         sg_dig_ctrl = tr32(SG_DIG_CTRL);
2692
2693         if (tp->link_config.autoneg != AUTONEG_ENABLE) {
2694                 if (sg_dig_ctrl & SG_DIG_USING_HW_AUTONEG) {
2695                         if (workaround) {
2696                                 u32 val = serdes_cfg;
2697
2698                                 if (port_a)
2699                                         val |= 0xc010000;
2700                                 else
2701                                         val |= 0x4010000;
2702                                 tw32_f(MAC_SERDES_CFG, val);
2703                         }
2704
2705                         tw32_f(SG_DIG_CTRL, SG_DIG_COMMON_SETUP);
2706                 }
2707                 if (mac_status & MAC_STATUS_PCS_SYNCED) {
2708                         tg3_setup_flow_control(tp, 0, 0);
2709                         current_link_up = 1;
2710                 }
2711                 goto out;
2712         }
2713
2714         /* Want auto-negotiation.  */
2715         expected_sg_dig_ctrl = SG_DIG_USING_HW_AUTONEG | SG_DIG_COMMON_SETUP;
2716
2717         flowctrl = tg3_advert_flowctrl_1000X(tp->link_config.flowctrl);
2718         if (flowctrl & ADVERTISE_1000XPAUSE)
2719                 expected_sg_dig_ctrl |= SG_DIG_PAUSE_CAP;
2720         if (flowctrl & ADVERTISE_1000XPSE_ASYM)
2721                 expected_sg_dig_ctrl |= SG_DIG_ASYM_PAUSE;
2722
2723         if (sg_dig_ctrl != expected_sg_dig_ctrl) {
2724                 if ((tp->tg3_flags2 & TG3_FLG2_PARALLEL_DETECT) &&
2725                     tp->serdes_counter &&
2726                     ((mac_status & (MAC_STATUS_PCS_SYNCED |
2727                                     MAC_STATUS_RCVD_CFG)) ==
2728                      MAC_STATUS_PCS_SYNCED)) {
2729                         tp->serdes_counter--;
2730                         current_link_up = 1;
2731                         goto out;
2732                 }
2733 restart_autoneg:
2734                 if (workaround)
2735                         tw32_f(MAC_SERDES_CFG, serdes_cfg | 0xc011000);
2736                 tw32_f(SG_DIG_CTRL, expected_sg_dig_ctrl | SG_DIG_SOFT_RESET);
2737                 udelay(5);
2738                 tw32_f(SG_DIG_CTRL, expected_sg_dig_ctrl);
2739
2740                 tp->serdes_counter = SERDES_AN_TIMEOUT_5704S;
2741                 tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
2742         } else if (mac_status & (MAC_STATUS_PCS_SYNCED |
2743                                  MAC_STATUS_SIGNAL_DET)) {
2744                 sg_dig_status = tr32(SG_DIG_STATUS);
2745                 mac_status = tr32(MAC_STATUS);
2746
2747                 if ((sg_dig_status & SG_DIG_AUTONEG_COMPLETE) &&
2748                     (mac_status & MAC_STATUS_PCS_SYNCED)) {
2749                         u32 local_adv = 0, remote_adv = 0;
2750
2751                         if (sg_dig_ctrl & SG_DIG_PAUSE_CAP)
2752                                 local_adv |= ADVERTISE_1000XPAUSE;
2753                         if (sg_dig_ctrl & SG_DIG_ASYM_PAUSE)
2754                                 local_adv |= ADVERTISE_1000XPSE_ASYM;
2755
2756                         if (sg_dig_status & SG_DIG_PARTNER_PAUSE_CAPABLE)
2757                                 remote_adv |= LPA_1000XPAUSE;
2758                         if (sg_dig_status & SG_DIG_PARTNER_ASYM_PAUSE)
2759                                 remote_adv |= LPA_1000XPAUSE_ASYM;
2760
2761                         tg3_setup_flow_control(tp, local_adv, remote_adv);
2762                         current_link_up = 1;
2763                         tp->serdes_counter = 0;
2764                         tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
2765                 } else if (!(sg_dig_status & SG_DIG_AUTONEG_COMPLETE)) {
2766                         if (tp->serdes_counter)
2767                                 tp->serdes_counter--;
2768                         else {
2769                                 if (workaround) {
2770                                         u32 val = serdes_cfg;
2771
2772                                         if (port_a)
2773                                                 val |= 0xc010000;
2774                                         else
2775                                                 val |= 0x4010000;
2776
2777                                         tw32_f(MAC_SERDES_CFG, val);
2778                                 }
2779
2780                                 tw32_f(SG_DIG_CTRL, SG_DIG_COMMON_SETUP);
2781                                 udelay(40);
2782
2783                                 /* Link parallel detection - link is up */
2784                                 /* only if we have PCS_SYNC and not */
2785                                 /* receiving config code words */
2786                                 mac_status = tr32(MAC_STATUS);
2787                                 if ((mac_status & MAC_STATUS_PCS_SYNCED) &&
2788                                     !(mac_status & MAC_STATUS_RCVD_CFG)) {
2789                                         tg3_setup_flow_control(tp, 0, 0);
2790                                         current_link_up = 1;
2791                                         tp->tg3_flags2 |=
2792                                                 TG3_FLG2_PARALLEL_DETECT;
2793                                         tp->serdes_counter =
2794                                                 SERDES_PARALLEL_DET_TIMEOUT;
2795                                 } else
2796                                         goto restart_autoneg;
2797                         }
2798                 }
2799         } else {
2800                 tp->serdes_counter = SERDES_AN_TIMEOUT_5704S;
2801                 tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
2802         }
2803
2804 out:
2805         return current_link_up;
2806 }
2807
2808 static int tg3_setup_fiber_by_hand(struct tg3 *tp, u32 mac_status)
2809 {
2810         int current_link_up = 0;
2811
2812         if (!(mac_status & MAC_STATUS_PCS_SYNCED))
2813                 goto out;
2814
2815         if (tp->link_config.autoneg == AUTONEG_ENABLE) {
2816                 u32 txflags, rxflags;
2817                 int i;
2818
2819                 if (fiber_autoneg(tp, &txflags, &rxflags)) {
2820                         u32 local_adv = 0, remote_adv = 0;
2821
2822                         if (txflags & ANEG_CFG_PS1)
2823                                 local_adv |= ADVERTISE_1000XPAUSE;
2824                         if (txflags & ANEG_CFG_PS2)
2825                                 local_adv |= ADVERTISE_1000XPSE_ASYM;
2826
2827                         if (rxflags & MR_LP_ADV_SYM_PAUSE)
2828                                 remote_adv |= LPA_1000XPAUSE;
2829                         if (rxflags & MR_LP_ADV_ASYM_PAUSE)
2830                                 remote_adv |= LPA_1000XPAUSE_ASYM;
2831
2832                         tg3_setup_flow_control(tp, local_adv, remote_adv);
2833
2834                         current_link_up = 1;
2835                 }
2836                 for (i = 0; i < 30; i++) {
2837                         udelay(20);
2838                         tw32_f(MAC_STATUS,
2839                                (MAC_STATUS_SYNC_CHANGED |
2840                                 MAC_STATUS_CFG_CHANGED));
2841                         udelay(40);
2842                         if ((tr32(MAC_STATUS) &
2843                              (MAC_STATUS_SYNC_CHANGED |
2844                               MAC_STATUS_CFG_CHANGED)) == 0)
2845                                 break;
2846                 }
2847
2848                 mac_status = tr32(MAC_STATUS);
2849                 if (current_link_up == 0 &&
2850                     (mac_status & MAC_STATUS_PCS_SYNCED) &&
2851                     !(mac_status & MAC_STATUS_RCVD_CFG))
2852                         current_link_up = 1;
2853         } else {
2854                 tg3_setup_flow_control(tp, 0, 0);
2855
2856                 /* Forcing 1000FD link up. */
2857                 current_link_up = 1;
2858
2859                 tw32_f(MAC_MODE, (tp->mac_mode | MAC_MODE_SEND_CONFIGS));
2860                 udelay(40);
2861
2862                 tw32_f(MAC_MODE, tp->mac_mode);
2863                 udelay(40);
2864         }
2865
2866 out:
2867         return current_link_up;
2868 }
2869
2870 static int tg3_setup_fiber_phy(struct tg3 *tp, int force_reset)
2871 {
2872         u32 orig_pause_cfg;
2873         u16 orig_active_speed;
2874         u8 orig_active_duplex;
2875         u32 mac_status;
2876         int current_link_up;
2877         int i;
2878
2879         orig_pause_cfg = tp->link_config.active_flowctrl;
2880         orig_active_speed = tp->link_config.active_speed;
2881         orig_active_duplex = tp->link_config.active_duplex;
2882
2883         if (!(tp->tg3_flags2 & TG3_FLG2_HW_AUTONEG) &&
2884             netif_carrier_ok(tp->dev) &&
2885             (tp->tg3_flags & TG3_FLAG_INIT_COMPLETE)) {
2886                 mac_status = tr32(MAC_STATUS);
2887                 mac_status &= (MAC_STATUS_PCS_SYNCED |
2888                                MAC_STATUS_SIGNAL_DET |
2889                                MAC_STATUS_CFG_CHANGED |
2890                                MAC_STATUS_RCVD_CFG);
2891                 if (mac_status == (MAC_STATUS_PCS_SYNCED |
2892                                    MAC_STATUS_SIGNAL_DET)) {
2893                         tw32_f(MAC_STATUS, (MAC_STATUS_SYNC_CHANGED |
2894                                             MAC_STATUS_CFG_CHANGED));
2895                         return 0;
2896                 }
2897         }
2898
2899         tw32_f(MAC_TX_AUTO_NEG, 0);
2900
2901         tp->mac_mode &= ~(MAC_MODE_PORT_MODE_MASK | MAC_MODE_HALF_DUPLEX);
2902         tp->mac_mode |= MAC_MODE_PORT_MODE_TBI;
2903         tw32_f(MAC_MODE, tp->mac_mode);
2904         udelay(40);
2905
2906         if (tp->phy_id == PHY_ID_BCM8002)
2907                 tg3_init_bcm8002(tp);
2908
2909         /* Enable link change event even when serdes polling.  */
2910         tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
2911         udelay(40);
2912
2913         current_link_up = 0;
2914         mac_status = tr32(MAC_STATUS);
2915
2916         if (tp->tg3_flags2 & TG3_FLG2_HW_AUTONEG)
2917                 current_link_up = tg3_setup_fiber_hw_autoneg(tp, mac_status);
2918         else
2919                 current_link_up = tg3_setup_fiber_by_hand(tp, mac_status);
2920
2921         tp->hw_status->status =
2922                 (SD_STATUS_UPDATED |
2923                  (tp->hw_status->status & ~SD_STATUS_LINK_CHG));
2924
2925         for (i = 0; i < 100; i++) {
2926                 tw32_f(MAC_STATUS, (MAC_STATUS_SYNC_CHANGED |
2927                                     MAC_STATUS_CFG_CHANGED));
2928                 udelay(5);
2929                 if ((tr32(MAC_STATUS) & (MAC_STATUS_SYNC_CHANGED |
2930                                          MAC_STATUS_CFG_CHANGED |
2931                                          MAC_STATUS_LNKSTATE_CHANGED)) == 0)
2932                         break;
2933         }
2934
2935         mac_status = tr32(MAC_STATUS);
2936         if ((mac_status & MAC_STATUS_PCS_SYNCED) == 0) {
2937                 current_link_up = 0;
2938                 if (tp->link_config.autoneg == AUTONEG_ENABLE &&
2939                     tp->serdes_counter == 0) {
2940                         tw32_f(MAC_MODE, (tp->mac_mode |
2941                                           MAC_MODE_SEND_CONFIGS));
2942                         udelay(1);
2943                         tw32_f(MAC_MODE, tp->mac_mode);
2944                 }
2945         }
2946
2947         if (current_link_up == 1) {
2948                 tp->link_config.active_speed = SPEED_1000;
2949                 tp->link_config.active_duplex = DUPLEX_FULL;
2950                 tw32(MAC_LED_CTRL, (tp->led_ctrl |
2951                                     LED_CTRL_LNKLED_OVERRIDE |
2952                                     LED_CTRL_1000MBPS_ON));
2953         } else {
2954                 tp->link_config.active_speed = SPEED_INVALID;
2955                 tp->link_config.active_duplex = DUPLEX_INVALID;
2956                 tw32(MAC_LED_CTRL, (tp->led_ctrl |
2957                                     LED_CTRL_LNKLED_OVERRIDE |
2958                                     LED_CTRL_TRAFFIC_OVERRIDE));
2959         }
2960
2961         if (current_link_up != netif_carrier_ok(tp->dev)) {
2962                 if (current_link_up)
2963                         netif_carrier_on(tp->dev);
2964                 else
2965                         netif_carrier_off(tp->dev);
2966                 tg3_link_report(tp);
2967         } else {
2968                 u32 now_pause_cfg = tp->link_config.active_flowctrl;
2969                 if (orig_pause_cfg != now_pause_cfg ||
2970                     orig_active_speed != tp->link_config.active_speed ||
2971                     orig_active_duplex != tp->link_config.active_duplex)
2972                         tg3_link_report(tp);
2973         }
2974
2975         return 0;
2976 }
2977
2978 static int tg3_setup_fiber_mii_phy(struct tg3 *tp, int force_reset)
2979 {
2980         int current_link_up, err = 0;
2981         u32 bmsr, bmcr;
2982         u16 current_speed;
2983         u8 current_duplex;
2984
2985         tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
2986         tw32_f(MAC_MODE, tp->mac_mode);
2987         udelay(40);
2988
2989         tw32(MAC_EVENT, 0);
2990
2991         tw32_f(MAC_STATUS,
2992              (MAC_STATUS_SYNC_CHANGED |
2993               MAC_STATUS_CFG_CHANGED |
2994               MAC_STATUS_MI_COMPLETION |
2995               MAC_STATUS_LNKSTATE_CHANGED));
2996         udelay(40);
2997
2998         if (force_reset)
2999                 tg3_phy_reset(tp);
3000
3001         current_link_up = 0;
3002         current_speed = SPEED_INVALID;
3003         current_duplex = DUPLEX_INVALID;
3004
3005         err |= tg3_readphy(tp, MII_BMSR, &bmsr);
3006         err |= tg3_readphy(tp, MII_BMSR, &bmsr);
3007         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) {
3008                 if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
3009                         bmsr |= BMSR_LSTATUS;
3010                 else
3011                         bmsr &= ~BMSR_LSTATUS;
3012         }
3013
3014         err |= tg3_readphy(tp, MII_BMCR, &bmcr);
3015
3016         if ((tp->link_config.autoneg == AUTONEG_ENABLE) && !force_reset &&
3017             (tp->tg3_flags2 & TG3_FLG2_PARALLEL_DETECT)) {
3018                 /* do nothing, just check for link up at the end */
3019         } else if (tp->link_config.autoneg == AUTONEG_ENABLE) {
3020                 u32 adv, new_adv;
3021
3022                 err |= tg3_readphy(tp, MII_ADVERTISE, &adv);
3023                 new_adv = adv & ~(ADVERTISE_1000XFULL | ADVERTISE_1000XHALF |
3024                                   ADVERTISE_1000XPAUSE |
3025                                   ADVERTISE_1000XPSE_ASYM |
3026                                   ADVERTISE_SLCT);
3027
3028                 new_adv |= tg3_advert_flowctrl_1000X(tp->link_config.flowctrl);
3029
3030                 if (tp->link_config.advertising & ADVERTISED_1000baseT_Half)
3031                         new_adv |= ADVERTISE_1000XHALF;
3032                 if (tp->link_config.advertising & ADVERTISED_1000baseT_Full)
3033                         new_adv |= ADVERTISE_1000XFULL;
3034
3035                 if ((new_adv != adv) || !(bmcr & BMCR_ANENABLE)) {
3036                         tg3_writephy(tp, MII_ADVERTISE, new_adv);
3037                         bmcr |= BMCR_ANENABLE | BMCR_ANRESTART;
3038                         tg3_writephy(tp, MII_BMCR, bmcr);
3039
3040                         tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
3041                         tp->serdes_counter = SERDES_AN_TIMEOUT_5714S;
3042                         tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
3043
3044                         return err;
3045                 }
3046         } else {
3047                 u32 new_bmcr;
3048
3049                 bmcr &= ~BMCR_SPEED1000;
3050                 new_bmcr = bmcr & ~(BMCR_ANENABLE | BMCR_FULLDPLX);
3051
3052                 if (tp->link_config.duplex == DUPLEX_FULL)
3053                         new_bmcr |= BMCR_FULLDPLX;
3054
3055                 if (new_bmcr != bmcr) {
3056                         /* BMCR_SPEED1000 is a reserved bit that needs
3057                          * to be set on write.
3058                          */
3059                         new_bmcr |= BMCR_SPEED1000;
3060
3061                         /* Force a linkdown */
3062                         if (netif_carrier_ok(tp->dev)) {
3063                                 u32 adv;
3064
3065                                 err |= tg3_readphy(tp, MII_ADVERTISE, &adv);
3066                                 adv &= ~(ADVERTISE_1000XFULL |
3067                                          ADVERTISE_1000XHALF |
3068                                          ADVERTISE_SLCT);
3069                                 tg3_writephy(tp, MII_ADVERTISE, adv);
3070                                 tg3_writephy(tp, MII_BMCR, bmcr |
3071                                                            BMCR_ANRESTART |
3072                                                            BMCR_ANENABLE);
3073                                 udelay(10);
3074                                 netif_carrier_off(tp->dev);
3075                         }
3076                         tg3_writephy(tp, MII_BMCR, new_bmcr);
3077                         bmcr = new_bmcr;
3078                         err |= tg3_readphy(tp, MII_BMSR, &bmsr);
3079                         err |= tg3_readphy(tp, MII_BMSR, &bmsr);
3080                         if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
3081                             ASIC_REV_5714) {
3082                                 if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
3083                                         bmsr |= BMSR_LSTATUS;
3084                                 else
3085                                         bmsr &= ~BMSR_LSTATUS;
3086                         }
3087                         tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
3088                 }
3089         }
3090
3091         if (bmsr & BMSR_LSTATUS) {
3092                 current_speed = SPEED_1000;
3093                 current_link_up = 1;
3094                 if (bmcr & BMCR_FULLDPLX)
3095                         current_duplex = DUPLEX_FULL;
3096                 else
3097                         current_duplex = DUPLEX_HALF;
3098
3099                 if (bmcr & BMCR_ANENABLE) {
3100                         u32 local_adv, remote_adv, common;
3101
3102                         err |= tg3_readphy(tp, MII_ADVERTISE, &local_adv);
3103                         err |= tg3_readphy(tp, MII_LPA, &remote_adv);
3104                         common = local_adv & remote_adv;
3105                         if (common & (ADVERTISE_1000XHALF |
3106                                       ADVERTISE_1000XFULL)) {
3107                                 if (common & ADVERTISE_1000XFULL)
3108                                         current_duplex = DUPLEX_FULL;
3109                                 else
3110                                         current_duplex = DUPLEX_HALF;
3111
3112                                 tg3_setup_flow_control(tp, local_adv,
3113                                                        remote_adv);
3114                         }
3115                         else
3116                                 current_link_up = 0;
3117                 }
3118         }
3119
3120         tp->mac_mode &= ~MAC_MODE_HALF_DUPLEX;
3121         if (tp->link_config.active_duplex == DUPLEX_HALF)
3122                 tp->mac_mode |= MAC_MODE_HALF_DUPLEX;
3123
3124         tw32_f(MAC_MODE, tp->mac_mode);
3125         udelay(40);
3126
3127         tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
3128
3129         tp->link_config.active_speed = current_speed;
3130         tp->link_config.active_duplex = current_duplex;
3131
3132         if (current_link_up != netif_carrier_ok(tp->dev)) {
3133                 if (current_link_up)
3134                         netif_carrier_on(tp->dev);
3135                 else {
3136                         netif_carrier_off(tp->dev);
3137                         tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
3138                 }
3139                 tg3_link_report(tp);
3140         }
3141         return err;
3142 }
3143
3144 static void tg3_serdes_parallel_detect(struct tg3 *tp)
3145 {
3146         if (tp->serdes_counter) {
3147                 /* Give autoneg time to complete. */
3148                 tp->serdes_counter--;
3149                 return;
3150         }
3151         if (!netif_carrier_ok(tp->dev) &&
3152             (tp->link_config.autoneg == AUTONEG_ENABLE)) {
3153                 u32 bmcr;
3154
3155                 tg3_readphy(tp, MII_BMCR, &bmcr);
3156                 if (bmcr & BMCR_ANENABLE) {
3157                         u32 phy1, phy2;
3158
3159                         /* Select shadow register 0x1f */
3160                         tg3_writephy(tp, 0x1c, 0x7c00);
3161                         tg3_readphy(tp, 0x1c, &phy1);
3162
3163                         /* Select expansion interrupt status register */
3164                         tg3_writephy(tp, 0x17, 0x0f01);
3165                         tg3_readphy(tp, 0x15, &phy2);
3166                         tg3_readphy(tp, 0x15, &phy2);
3167
3168                         if ((phy1 & 0x10) && !(phy2 & 0x20)) {
3169                                 /* We have signal detect and not receiving
3170                                  * config code words, link is up by parallel
3171                                  * detection.
3172                                  */
3173
3174                                 bmcr &= ~BMCR_ANENABLE;
3175                                 bmcr |= BMCR_SPEED1000 | BMCR_FULLDPLX;
3176                                 tg3_writephy(tp, MII_BMCR, bmcr);
3177                                 tp->tg3_flags2 |= TG3_FLG2_PARALLEL_DETECT;
3178                         }
3179                 }
3180         }
3181         else if (netif_carrier_ok(tp->dev) &&
3182                  (tp->link_config.autoneg == AUTONEG_ENABLE) &&
3183                  (tp->tg3_flags2 & TG3_FLG2_PARALLEL_DETECT)) {
3184                 u32 phy2;
3185
3186                 /* Select expansion interrupt status register */
3187                 tg3_writephy(tp, 0x17, 0x0f01);
3188                 tg3_readphy(tp, 0x15, &phy2);
3189                 if (phy2 & 0x20) {
3190                         u32 bmcr;
3191
3192                         /* Config code words received, turn on autoneg. */
3193                         tg3_readphy(tp, MII_BMCR, &bmcr);
3194                         tg3_writephy(tp, MII_BMCR, bmcr | BMCR_ANENABLE);
3195
3196                         tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
3197
3198                 }
3199         }
3200 }
3201
3202 static int tg3_setup_phy(struct tg3 *tp, int force_reset)
3203 {
3204         int err;
3205
3206         if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) {
3207                 err = tg3_setup_fiber_phy(tp, force_reset);
3208         } else if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES) {
3209                 err = tg3_setup_fiber_mii_phy(tp, force_reset);
3210         } else {
3211                 err = tg3_setup_copper_phy(tp, force_reset);
3212         }
3213
3214         if (tp->pci_chip_rev_id == CHIPREV_ID_5784_A0 ||
3215             tp->pci_chip_rev_id == CHIPREV_ID_5784_A1) {
3216                 u32 val, scale;
3217
3218                 val = tr32(TG3_CPMU_CLCK_STAT) & CPMU_CLCK_STAT_MAC_CLCK_MASK;
3219                 if (val == CPMU_CLCK_STAT_MAC_CLCK_62_5)
3220                         scale = 65;
3221                 else if (val == CPMU_CLCK_STAT_MAC_CLCK_6_25)
3222                         scale = 6;
3223                 else
3224                         scale = 12;
3225
3226                 val = tr32(GRC_MISC_CFG) & ~GRC_MISC_CFG_PRESCALAR_MASK;
3227                 val |= (scale << GRC_MISC_CFG_PRESCALAR_SHIFT);
3228                 tw32(GRC_MISC_CFG, val);
3229         }
3230
3231         if (tp->link_config.active_speed == SPEED_1000 &&
3232             tp->link_config.active_duplex == DUPLEX_HALF)
3233                 tw32(MAC_TX_LENGTHS,
3234                      ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
3235                       (6 << TX_LENGTHS_IPG_SHIFT) |
3236                       (0xff << TX_LENGTHS_SLOT_TIME_SHIFT)));
3237         else
3238                 tw32(MAC_TX_LENGTHS,
3239                      ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
3240                       (6 << TX_LENGTHS_IPG_SHIFT) |
3241                       (32 << TX_LENGTHS_SLOT_TIME_SHIFT)));
3242
3243         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
3244                 if (netif_carrier_ok(tp->dev)) {
3245                         tw32(HOSTCC_STAT_COAL_TICKS,
3246                              tp->coal.stats_block_coalesce_usecs);
3247                 } else {
3248                         tw32(HOSTCC_STAT_COAL_TICKS, 0);
3249                 }
3250         }
3251
3252         if (tp->tg3_flags & TG3_FLAG_ASPM_WORKAROUND) {
3253                 u32 val = tr32(PCIE_PWR_MGMT_THRESH);
3254                 if (!netif_carrier_ok(tp->dev))
3255                         val = (val & ~PCIE_PWR_MGMT_L1_THRESH_MSK) |
3256                               tp->pwrmgmt_thresh;
3257                 else
3258                         val |= PCIE_PWR_MGMT_L1_THRESH_MSK;
3259                 tw32(PCIE_PWR_MGMT_THRESH, val);
3260         }
3261
3262         return err;
3263 }
3264
3265 /* This is called whenever we suspect that the system chipset is re-
3266  * ordering the sequence of MMIO to the tx send mailbox. The symptom
3267  * is bogus tx completions. We try to recover by setting the
3268  * TG3_FLAG_MBOX_WRITE_REORDER flag and resetting the chip later
3269  * in the workqueue.
3270  */
3271 static void tg3_tx_recover(struct tg3 *tp)
3272 {
3273         BUG_ON((tp->tg3_flags & TG3_FLAG_MBOX_WRITE_REORDER) ||
3274                tp->write32_tx_mbox == tg3_write_indirect_mbox);
3275
3276         printk(KERN_WARNING PFX "%s: The system may be re-ordering memory-"
3277                "mapped I/O cycles to the network device, attempting to "
3278                "recover. Please report the problem to the driver maintainer "
3279                "and include system chipset information.\n", tp->dev->name);
3280
3281         spin_lock(&tp->lock);
3282         tp->tg3_flags |= TG3_FLAG_TX_RECOVERY_PENDING;
3283         spin_unlock(&tp->lock);
3284 }
3285
3286 static inline u32 tg3_tx_avail(struct tg3 *tp)
3287 {
3288         smp_mb();
3289         return (tp->tx_pending -
3290                 ((tp->tx_prod - tp->tx_cons) & (TG3_TX_RING_SIZE - 1)));
3291 }
3292
3293 /* Tigon3 never reports partial packet sends.  So we do not
3294  * need special logic to handle SKBs that have not had all
3295  * of their frags sent yet, like SunGEM does.
3296  */
3297 static void tg3_tx(struct tg3 *tp)
3298 {
3299         u32 hw_idx = tp->hw_status->idx[0].tx_consumer;
3300         u32 sw_idx = tp->tx_cons;
3301
3302         while (sw_idx != hw_idx) {
3303                 struct tx_ring_info *ri = &tp->tx_buffers[sw_idx];
3304                 struct sk_buff *skb = ri->skb;
3305                 int i, tx_bug = 0;
3306
3307                 if (unlikely(skb == NULL)) {
3308                         tg3_tx_recover(tp);
3309                         return;
3310                 }
3311
3312                 pci_unmap_single(tp->pdev,
3313                                  pci_unmap_addr(ri, mapping),
3314                                  skb_headlen(skb),
3315                                  PCI_DMA_TODEVICE);
3316
3317                 ri->skb = NULL;
3318
3319                 sw_idx = NEXT_TX(sw_idx);
3320
3321                 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
3322                         ri = &tp->tx_buffers[sw_idx];
3323                         if (unlikely(ri->skb != NULL || sw_idx == hw_idx))
3324                                 tx_bug = 1;
3325
3326                         pci_unmap_page(tp->pdev,
3327                                        pci_unmap_addr(ri, mapping),
3328                                        skb_shinfo(skb)->frags[i].size,
3329                                        PCI_DMA_TODEVICE);
3330
3331                         sw_idx = NEXT_TX(sw_idx);
3332                 }
3333
3334                 dev_kfree_skb(skb);
3335
3336                 if (unlikely(tx_bug)) {
3337                         tg3_tx_recover(tp);
3338                         return;
3339                 }
3340         }
3341
3342         tp->tx_cons = sw_idx;
3343
3344         /* Need to make the tx_cons update visible to tg3_start_xmit()
3345          * before checking for netif_queue_stopped().  Without the
3346          * memory barrier, there is a small possibility that tg3_start_xmit()
3347          * will miss it and cause the queue to be stopped forever.
3348          */
3349         smp_mb();
3350
3351         if (unlikely(netif_queue_stopped(tp->dev) &&
3352                      (tg3_tx_avail(tp) > TG3_TX_WAKEUP_THRESH(tp)))) {
3353                 netif_tx_lock(tp->dev);
3354                 if (netif_queue_stopped(tp->dev) &&
3355                     (tg3_tx_avail(tp) > TG3_TX_WAKEUP_THRESH(tp)))
3356                         netif_wake_queue(tp->dev);
3357                 netif_tx_unlock(tp->dev);
3358         }
3359 }
3360
3361 /* Returns size of skb allocated or < 0 on error.
3362  *
3363  * We only need to fill in the address because the other members
3364  * of the RX descriptor are invariant, see tg3_init_rings.
3365  *
3366  * Note the purposeful assymetry of cpu vs. chip accesses.  For
3367  * posting buffers we only dirty the first cache line of the RX
3368  * descriptor (containing the address).  Whereas for the RX status
3369  * buffers the cpu only reads the last cacheline of the RX descriptor
3370  * (to fetch the error flags, vlan tag, checksum, and opaque cookie).
3371  */
3372 static int tg3_alloc_rx_skb(struct tg3 *tp, u32 opaque_key,
3373                             int src_idx, u32 dest_idx_unmasked)
3374 {
3375         struct tg3_rx_buffer_desc *desc;
3376         struct ring_info *map, *src_map;
3377         struct sk_buff *skb;
3378         dma_addr_t mapping;
3379         int skb_size, dest_idx;
3380
3381         src_map = NULL;
3382         switch (opaque_key) {
3383         case RXD_OPAQUE_RING_STD:
3384                 dest_idx = dest_idx_unmasked % TG3_RX_RING_SIZE;
3385                 desc = &tp->rx_std[dest_idx];
3386                 map = &tp->rx_std_buffers[dest_idx];
3387                 if (src_idx >= 0)
3388                         src_map = &tp->rx_std_buffers[src_idx];
3389                 skb_size = tp->rx_pkt_buf_sz;
3390                 break;
3391
3392         case RXD_OPAQUE_RING_JUMBO:
3393                 dest_idx = dest_idx_unmasked % TG3_RX_JUMBO_RING_SIZE;
3394                 desc = &tp->rx_jumbo[dest_idx];
3395                 map = &tp->rx_jumbo_buffers[dest_idx];
3396                 if (src_idx >= 0)
3397                         src_map = &tp->rx_jumbo_buffers[src_idx];
3398                 skb_size = RX_JUMBO_PKT_BUF_SZ;
3399                 break;
3400
3401         default:
3402                 return -EINVAL;
3403         };
3404
3405         /* Do not overwrite any of the map or rp information
3406          * until we are sure we can commit to a new buffer.
3407          *
3408          * Callers depend upon this behavior and assume that
3409          * we leave everything unchanged if we fail.
3410          */
3411         skb = netdev_alloc_skb(tp->dev, skb_size);
3412         if (skb == NULL)
3413                 return -ENOMEM;
3414
3415         skb_reserve(skb, tp->rx_offset);
3416
3417         mapping = pci_map_single(tp->pdev, skb->data,
3418                                  skb_size - tp->rx_offset,
3419                                  PCI_DMA_FROMDEVICE);
3420
3421         map->skb = skb;
3422         pci_unmap_addr_set(map, mapping, mapping);
3423
3424         if (src_map != NULL)
3425                 src_map->skb = NULL;
3426
3427         desc->addr_hi = ((u64)mapping >> 32);
3428         desc->addr_lo = ((u64)mapping & 0xffffffff);
3429
3430         return skb_size;
3431 }
3432
3433 /* We only need to move over in the address because the other
3434  * members of the RX descriptor are invariant.  See notes above
3435  * tg3_alloc_rx_skb for full details.
3436  */
3437 static void tg3_recycle_rx(struct tg3 *tp, u32 opaque_key,
3438                            int src_idx, u32 dest_idx_unmasked)
3439 {
3440         struct tg3_rx_buffer_desc *src_desc, *dest_desc;
3441         struct ring_info *src_map, *dest_map;
3442         int dest_idx;
3443
3444         switch (opaque_key) {
3445         case RXD_OPAQUE_RING_STD:
3446                 dest_idx = dest_idx_unmasked % TG3_RX_RING_SIZE;
3447                 dest_desc = &tp->rx_std[dest_idx];
3448                 dest_map = &tp->rx_std_buffers[dest_idx];
3449                 src_desc = &tp->rx_std[src_idx];
3450                 src_map = &tp->rx_std_buffers[src_idx];
3451                 break;
3452
3453         case RXD_OPAQUE_RING_JUMBO:
3454                 dest_idx = dest_idx_unmasked % TG3_RX_JUMBO_RING_SIZE;
3455                 dest_desc = &tp->rx_jumbo[dest_idx];
3456                 dest_map = &tp->rx_jumbo_buffers[dest_idx];
3457                 src_desc = &tp->rx_jumbo[src_idx];
3458                 src_map = &tp->rx_jumbo_buffers[src_idx];
3459                 break;
3460
3461         default:
3462                 return;
3463         };
3464
3465         dest_map->skb = src_map->skb;
3466         pci_unmap_addr_set(dest_map, mapping,
3467                            pci_unmap_addr(src_map, mapping));
3468         dest_desc->addr_hi = src_desc->addr_hi;
3469         dest_desc->addr_lo = src_desc->addr_lo;
3470
3471         src_map->skb = NULL;
3472 }
3473
3474 #if TG3_VLAN_TAG_USED
3475 static int tg3_vlan_rx(struct tg3 *tp, struct sk_buff *skb, u16 vlan_tag)
3476 {
3477         return vlan_hwaccel_receive_skb(skb, tp->vlgrp, vlan_tag);
3478 }
3479 #endif
3480
3481 /* The RX ring scheme is composed of multiple rings which post fresh
3482  * buffers to the chip, and one special ring the chip uses to report
3483  * status back to the host.
3484  *
3485  * The special ring reports the status of received packets to the
3486  * host.  The chip does not write into the original descriptor the
3487  * RX buffer was obtained from.  The chip simply takes the original
3488  * descriptor as provided by the host, updates the status and length
3489  * field, then writes this into the next status ring entry.
3490  *
3491  * Each ring the host uses to post buffers to the chip is described
3492  * by a TG3_BDINFO entry in the chips SRAM area.  When a packet arrives,
3493  * it is first placed into the on-chip ram.  When the packet's length
3494  * is known, it walks down the TG3_BDINFO entries to select the ring.
3495  * Each TG3_BDINFO specifies a MAXLEN field and the first TG3_BDINFO
3496  * which is within the range of the new packet's length is chosen.
3497  *
3498  * The "separate ring for rx status" scheme may sound queer, but it makes
3499  * sense from a cache coherency perspective.  If only the host writes
3500  * to the buffer post rings, and only the chip writes to the rx status
3501  * rings, then cache lines never move beyond shared-modified state.
3502  * If both the host and chip were to write into the same ring, cache line
3503  * eviction could occur since both entities want it in an exclusive state.
3504  */
3505 static int tg3_rx(struct tg3 *tp, int budget)
3506 {
3507         u32 work_mask, rx_std_posted = 0;
3508         u32 sw_idx = tp->rx_rcb_ptr;
3509         u16 hw_idx;
3510         int received;
3511
3512         hw_idx = tp->hw_status->idx[0].rx_producer;
3513         /*
3514          * We need to order the read of hw_idx and the read of
3515          * the opaque cookie.
3516          */
3517         rmb();
3518         work_mask = 0;
3519         received = 0;
3520         while (sw_idx != hw_idx && budget > 0) {
3521                 struct tg3_rx_buffer_desc *desc = &tp->rx_rcb[sw_idx];
3522                 unsigned int len;
3523                 struct sk_buff *skb;
3524                 dma_addr_t dma_addr;
3525                 u32 opaque_key, desc_idx, *post_ptr;
3526
3527                 desc_idx = desc->opaque & RXD_OPAQUE_INDEX_MASK;
3528                 opaque_key = desc->opaque & RXD_OPAQUE_RING_MASK;
3529                 if (opaque_key == RXD_OPAQUE_RING_STD) {
3530                         dma_addr = pci_unmap_addr(&tp->rx_std_buffers[desc_idx],
3531                                                   mapping);
3532                         skb = tp->rx_std_buffers[desc_idx].skb;
3533                         post_ptr = &tp->rx_std_ptr;
3534                         rx_std_posted++;
3535                 } else if (opaque_key == RXD_OPAQUE_RING_JUMBO) {
3536                         dma_addr = pci_unmap_addr(&tp->rx_jumbo_buffers[desc_idx],
3537                                                   mapping);
3538                         skb = tp->rx_jumbo_buffers[desc_idx].skb;
3539                         post_ptr = &tp->rx_jumbo_ptr;
3540                 }
3541                 else {
3542                         goto next_pkt_nopost;
3543                 }
3544
3545                 work_mask |= opaque_key;
3546
3547                 if ((desc->err_vlan & RXD_ERR_MASK) != 0 &&
3548                     (desc->err_vlan != RXD_ERR_ODD_NIBBLE_RCVD_MII)) {
3549                 drop_it:
3550                         tg3_recycle_rx(tp, opaque_key,
3551                                        desc_idx, *post_ptr);
3552                 drop_it_no_recycle:
3553                         /* Other statistics kept track of by card. */
3554                         tp->net_stats.rx_dropped++;
3555                         goto next_pkt;
3556                 }
3557
3558                 len = ((desc->idx_len & RXD_LEN_MASK) >> RXD_LEN_SHIFT) - 4; /* omit crc */
3559
3560                 if (len > RX_COPY_THRESHOLD
3561                         && tp->rx_offset == 2
3562                         /* rx_offset != 2 iff this is a 5701 card running
3563                          * in PCI-X mode [see tg3_get_invariants()] */
3564                 ) {
3565                         int skb_size;
3566
3567                         skb_size = tg3_alloc_rx_skb(tp, opaque_key,
3568                                                     desc_idx, *post_ptr);
3569                         if (skb_size < 0)
3570                                 goto drop_it;
3571
3572                         pci_unmap_single(tp->pdev, dma_addr,
3573                                          skb_size - tp->rx_offset,
3574                                          PCI_DMA_FROMDEVICE);
3575
3576                         skb_put(skb, len);
3577                 } else {
3578                         struct sk_buff *copy_skb;
3579
3580                         tg3_recycle_rx(tp, opaque_key,
3581                                        desc_idx, *post_ptr);
3582
3583                         copy_skb = netdev_alloc_skb(tp->dev, len + 2);
3584                         if (copy_skb == NULL)
3585                                 goto drop_it_no_recycle;
3586
3587                         skb_reserve(copy_skb, 2);
3588                         skb_put(copy_skb, len);
3589                         pci_dma_sync_single_for_cpu(tp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE);
3590                         skb_copy_from_linear_data(skb, copy_skb->data, len);
3591                         pci_dma_sync_single_for_device(tp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE);
3592
3593                         /* We'll reuse the original ring buffer. */
3594                         skb = copy_skb;
3595                 }
3596
3597                 if ((tp->tg3_flags & TG3_FLAG_RX_CHECKSUMS) &&
3598                     (desc->type_flags & RXD_FLAG_TCPUDP_CSUM) &&
3599                     (((desc->ip_tcp_csum & RXD_TCPCSUM_MASK)
3600                       >> RXD_TCPCSUM_SHIFT) == 0xffff))
3601                         skb->ip_summed = CHECKSUM_UNNECESSARY;
3602                 else
3603                         skb->ip_summed = CHECKSUM_NONE;
3604
3605                 skb->protocol = eth_type_trans(skb, tp->dev);
3606 #if TG3_VLAN_TAG_USED
3607                 if (tp->vlgrp != NULL &&
3608                     desc->type_flags & RXD_FLAG_VLAN) {
3609                         tg3_vlan_rx(tp, skb,
3610                                     desc->err_vlan & RXD_VLAN_MASK);
3611                 } else
3612 #endif
3613                         netif_receive_skb(skb);
3614
3615                 tp->dev->last_rx = jiffies;
3616                 received++;
3617                 budget--;
3618
3619 next_pkt:
3620                 (*post_ptr)++;
3621
3622                 if (unlikely(rx_std_posted >= tp->rx_std_max_post)) {
3623                         u32 idx = *post_ptr % TG3_RX_RING_SIZE;
3624
3625                         tw32_rx_mbox(MAILBOX_RCV_STD_PROD_IDX +
3626                                      TG3_64BIT_REG_LOW, idx);
3627                         work_mask &= ~RXD_OPAQUE_RING_STD;
3628                         rx_std_posted = 0;
3629                 }
3630 next_pkt_nopost:
3631                 sw_idx++;
3632                 sw_idx &= (TG3_RX_RCB_RING_SIZE(tp) - 1);
3633
3634                 /* Refresh hw_idx to see if there is new work */
3635                 if (sw_idx == hw_idx) {
3636                         hw_idx = tp->hw_status->idx[0].rx_producer;
3637                         rmb();
3638                 }
3639         }
3640
3641         /* ACK the status ring. */
3642         tp->rx_rcb_ptr = sw_idx;
3643         tw32_rx_mbox(MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW, sw_idx);
3644
3645         /* Refill RX ring(s). */
3646         if (work_mask & RXD_OPAQUE_RING_STD) {
3647                 sw_idx = tp->rx_std_ptr % TG3_RX_RING_SIZE;
3648                 tw32_rx_mbox(MAILBOX_RCV_STD_PROD_IDX + TG3_64BIT_REG_LOW,
3649                              sw_idx);
3650         }
3651         if (work_mask & RXD_OPAQUE_RING_JUMBO) {
3652                 sw_idx = tp->rx_jumbo_ptr % TG3_RX_JUMBO_RING_SIZE;
3653                 tw32_rx_mbox(MAILBOX_RCV_JUMBO_PROD_IDX + TG3_64BIT_REG_LOW,
3654                              sw_idx);
3655         }
3656         mmiowb();
3657
3658         return received;
3659 }
3660
3661 static int tg3_poll_work(struct tg3 *tp, int work_done, int budget)
3662 {
3663         struct tg3_hw_status *sblk = tp->hw_status;
3664
3665         /* handle link change and other phy events */
3666         if (!(tp->tg3_flags &
3667               (TG3_FLAG_USE_LINKCHG_REG |
3668                TG3_FLAG_POLL_SERDES))) {
3669                 if (sblk->status & SD_STATUS_LINK_CHG) {
3670                         sblk->status = SD_STATUS_UPDATED |
3671                                 (sblk->status & ~SD_STATUS_LINK_CHG);
3672                         spin_lock(&tp->lock);
3673                         tg3_setup_phy(tp, 0);
3674                         spin_unlock(&tp->lock);
3675                 }
3676         }
3677
3678         /* run TX completion thread */
3679         if (sblk->idx[0].tx_consumer != tp->tx_cons) {
3680                 tg3_tx(tp);
3681                 if (unlikely(tp->tg3_flags & TG3_FLAG_TX_RECOVERY_PENDING))
3682                         return work_done;
3683         }
3684
3685         /* run RX thread, within the bounds set by NAPI.
3686          * All RX "locking" is done by ensuring outside
3687          * code synchronizes with tg3->napi.poll()
3688          */
3689         if (sblk->idx[0].rx_producer != tp->rx_rcb_ptr)
3690                 work_done += tg3_rx(tp, budget - work_done);
3691
3692         return work_done;
3693 }
3694
3695 static int tg3_poll(struct napi_struct *napi, int budget)
3696 {
3697         struct tg3 *tp = container_of(napi, struct tg3, napi);
3698         int work_done = 0;
3699         struct tg3_hw_status *sblk = tp->hw_status;
3700
3701         while (1) {
3702                 work_done = tg3_poll_work(tp, work_done, budget);
3703
3704                 if (unlikely(tp->tg3_flags & TG3_FLAG_TX_RECOVERY_PENDING))
3705                         goto tx_recovery;
3706
3707                 if (unlikely(work_done >= budget))
3708                         break;
3709
3710                 if (tp->tg3_flags & TG3_FLAG_TAGGED_STATUS) {
3711                         /* tp->last_tag is used in tg3_restart_ints() below
3712                          * to tell the hw how much work has been processed,
3713                          * so we must read it before checking for more work.
3714                          */
3715                         tp->last_tag = sblk->status_tag;
3716                         rmb();
3717                 } else
3718                         sblk->status &= ~SD_STATUS_UPDATED;
3719
3720                 if (likely(!tg3_has_work(tp))) {
3721                         netif_rx_complete(tp->dev, napi);
3722                         tg3_restart_ints(tp);
3723                         break;
3724                 }
3725         }
3726
3727         return work_done;
3728
3729 tx_recovery:
3730         /* work_done is guaranteed to be less than budget. */
3731         netif_rx_complete(tp->dev, napi);
3732         schedule_work(&tp->reset_task);
3733         return work_done;
3734 }
3735
3736 static void tg3_irq_quiesce(struct tg3 *tp)
3737 {
3738         BUG_ON(tp->irq_sync);
3739
3740         tp->irq_sync = 1;
3741         smp_mb();
3742
3743         synchronize_irq(tp->pdev->irq);
3744 }
3745
3746 static inline int tg3_irq_sync(struct tg3 *tp)
3747 {
3748         return tp->irq_sync;
3749 }
3750
3751 /* Fully shutdown all tg3 driver activity elsewhere in the system.
3752  * If irq_sync is non-zero, then the IRQ handler must be synchronized
3753  * with as well.  Most of the time, this is not necessary except when
3754  * shutting down the device.
3755  */
3756 static inline void tg3_full_lock(struct tg3 *tp, int irq_sync)
3757 {
3758         spin_lock_bh(&tp->lock);
3759         if (irq_sync)
3760                 tg3_irq_quiesce(tp);
3761 }
3762
3763 static inline void tg3_full_unlock(struct tg3 *tp)
3764 {
3765         spin_unlock_bh(&tp->lock);
3766 }
3767
3768 /* One-shot MSI handler - Chip automatically disables interrupt
3769  * after sending MSI so driver doesn't have to do it.
3770  */
3771 static irqreturn_t tg3_msi_1shot(int irq, void *dev_id)
3772 {
3773         struct net_device *dev = dev_id;
3774         struct tg3 *tp = netdev_priv(dev);
3775
3776         prefetch(tp->hw_status);
3777         prefetch(&tp->rx_rcb[tp->rx_rcb_ptr]);
3778
3779         if (likely(!tg3_irq_sync(tp)))
3780                 netif_rx_schedule(dev, &tp->napi);
3781
3782         return IRQ_HANDLED;
3783 }
3784
3785 /* MSI ISR - No need to check for interrupt sharing and no need to
3786  * flush status block and interrupt mailbox. PCI ordering rules
3787  * guarantee that MSI will arrive after the status block.
3788  */
3789 static irqreturn_t tg3_msi(int irq, void *dev_id)
3790 {
3791         struct net_device *dev = dev_id;
3792         struct tg3 *tp = netdev_priv(dev);
3793
3794         prefetch(tp->hw_status);
3795         prefetch(&tp->rx_rcb[tp->rx_rcb_ptr]);
3796         /*
3797          * Writing any value to intr-mbox-0 clears PCI INTA# and
3798          * chip-internal interrupt pending events.
3799          * Writing non-zero to intr-mbox-0 additional tells the
3800          * NIC to stop sending us irqs, engaging "in-intr-handler"
3801          * event coalescing.
3802          */
3803         tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
3804         if (likely(!tg3_irq_sync(tp)))
3805                 netif_rx_schedule(dev, &tp->napi);
3806
3807         return IRQ_RETVAL(1);
3808 }
3809
3810 static irqreturn_t tg3_interrupt(int irq, void *dev_id)
3811 {
3812         struct net_device *dev = dev_id;
3813         struct tg3 *tp = netdev_priv(dev);
3814         struct tg3_hw_status *sblk = tp->hw_status;
3815         unsigned int handled = 1;
3816
3817         /* In INTx mode, it is possible for the interrupt to arrive at
3818          * the CPU before the status block posted prior to the interrupt.
3819          * Reading the PCI State register will confirm whether the
3820          * interrupt is ours and will flush the status block.
3821          */
3822         if (unlikely(!(sblk->status & SD_STATUS_UPDATED))) {
3823                 if ((tp->tg3_flags & TG3_FLAG_CHIP_RESETTING) ||
3824                     (tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
3825                         handled = 0;
3826                         goto out;
3827                 }
3828         }
3829
3830         /*
3831          * Writing any value to intr-mbox-0 clears PCI INTA# and
3832          * chip-internal interrupt pending events.
3833          * Writing non-zero to intr-mbox-0 additional tells the
3834          * NIC to stop sending us irqs, engaging "in-intr-handler"
3835          * event coalescing.
3836          *
3837          * Flush the mailbox to de-assert the IRQ immediately to prevent
3838          * spurious interrupts.  The flush impacts performance but
3839          * excessive spurious interrupts can be worse in some cases.
3840          */
3841         tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
3842         if (tg3_irq_sync(tp))
3843                 goto out;
3844         sblk->status &= ~SD_STATUS_UPDATED;
3845         if (likely(tg3_has_work(tp))) {
3846                 prefetch(&tp->rx_rcb[tp->rx_rcb_ptr]);
3847                 netif_rx_schedule(dev, &tp->napi);
3848         } else {
3849                 /* No work, shared interrupt perhaps?  re-enable
3850                  * interrupts, and flush that PCI write
3851                  */
3852                 tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
3853                                0x00000000);
3854         }
3855 out:
3856         return IRQ_RETVAL(handled);
3857 }
3858
3859 static irqreturn_t tg3_interrupt_tagged(int irq, void *dev_id)
3860 {
3861         struct net_device *dev = dev_id;
3862         struct tg3 *tp = netdev_priv(dev);
3863         struct tg3_hw_status *sblk = tp->hw_status;
3864         unsigned int handled = 1;
3865
3866         /* In INTx mode, it is possible for the interrupt to arrive at
3867          * the CPU before the status block posted prior to the interrupt.
3868          * Reading the PCI State register will confirm whether the
3869          * interrupt is ours and will flush the status block.
3870          */
3871         if (unlikely(sblk->status_tag == tp->last_tag)) {
3872                 if ((tp->tg3_flags & TG3_FLAG_CHIP_RESETTING) ||
3873                     (tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
3874                         handled = 0;
3875                         goto out;
3876                 }
3877         }
3878
3879         /*
3880          * writing any value to intr-mbox-0 clears PCI INTA# and
3881          * chip-internal interrupt pending events.
3882          * writing non-zero to intr-mbox-0 additional tells the
3883          * NIC to stop sending us irqs, engaging "in-intr-handler"
3884          * event coalescing.
3885          *
3886          * Flush the mailbox to de-assert the IRQ immediately to prevent
3887          * spurious interrupts.  The flush impacts performance but
3888          * excessive spurious interrupts can be worse in some cases.
3889          */
3890         tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
3891         if (tg3_irq_sync(tp))
3892                 goto out;
3893         if (netif_rx_schedule_prep(dev, &tp->napi)) {
3894                 prefetch(&tp->rx_rcb[tp->rx_rcb_ptr]);
3895                 /* Update last_tag to mark that this status has been
3896                  * seen. Because interrupt may be shared, we may be
3897                  * racing with tg3_poll(), so only update last_tag
3898                  * if tg3_poll() is not scheduled.
3899                  */
3900                 tp->last_tag = sblk->status_tag;
3901                 __netif_rx_schedule(dev, &tp->napi);
3902         }
3903 out:
3904         return IRQ_RETVAL(handled);
3905 }
3906
3907 /* ISR for interrupt test */
3908 static irqreturn_t tg3_test_isr(int irq, void *dev_id)
3909 {
3910         struct net_device *dev = dev_id;
3911         struct tg3 *tp = netdev_priv(dev);
3912         struct tg3_hw_status *sblk = tp->hw_status;
3913
3914         if ((sblk->status & SD_STATUS_UPDATED) ||
3915             !(tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
3916                 tg3_disable_ints(tp);
3917                 return IRQ_RETVAL(1);
3918         }
3919         return IRQ_RETVAL(0);
3920 }
3921
3922 static int tg3_init_hw(struct tg3 *, int);
3923 static int tg3_halt(struct tg3 *, int, int);
3924
3925 /* Restart hardware after configuration changes, self-test, etc.
3926  * Invoked with tp->lock held.
3927  */
3928 static int tg3_restart_hw(struct tg3 *tp, int reset_phy)
3929 {
3930         int err;
3931
3932         err = tg3_init_hw(tp, reset_phy);
3933         if (err) {
3934                 printk(KERN_ERR PFX "%s: Failed to re-initialize device, "
3935                        "aborting.\n", tp->dev->name);
3936                 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
3937                 tg3_full_unlock(tp);
3938                 del_timer_sync(&tp->timer);
3939                 tp->irq_sync = 0;
3940                 napi_enable(&tp->napi);
3941                 dev_close(tp->dev);
3942                 tg3_full_lock(tp, 0);
3943         }
3944         return err;
3945 }
3946
3947 #ifdef CONFIG_NET_POLL_CONTROLLER
3948 static void tg3_poll_controller(struct net_device *dev)
3949 {
3950         struct tg3 *tp = netdev_priv(dev);
3951
3952         tg3_interrupt(tp->pdev->irq, dev);
3953 }
3954 #endif
3955
3956 static void tg3_reset_task(struct work_struct *work)
3957 {
3958         struct tg3 *tp = container_of(work, struct tg3, reset_task);
3959         unsigned int restart_timer;
3960
3961         tg3_full_lock(tp, 0);
3962
3963         if (!netif_running(tp->dev)) {
3964                 tg3_full_unlock(tp);
3965                 return;
3966         }
3967
3968         tg3_full_unlock(tp);
3969
3970         tg3_netif_stop(tp);
3971
3972         tg3_full_lock(tp, 1);
3973
3974         restart_timer = tp->tg3_flags2 & TG3_FLG2_RESTART_TIMER;
3975         tp->tg3_flags2 &= ~TG3_FLG2_RESTART_TIMER;
3976
3977         if (tp->tg3_flags & TG3_FLAG_TX_RECOVERY_PENDING) {
3978                 tp->write32_tx_mbox = tg3_write32_tx_mbox;
3979                 tp->write32_rx_mbox = tg3_write_flush_reg32;
3980                 tp->tg3_flags |= TG3_FLAG_MBOX_WRITE_REORDER;
3981                 tp->tg3_flags &= ~TG3_FLAG_TX_RECOVERY_PENDING;
3982         }
3983
3984         tg3_halt(tp, RESET_KIND_SHUTDOWN, 0);
3985         if (tg3_init_hw(tp, 1))
3986                 goto out;
3987
3988         tg3_netif_start(tp);
3989
3990         if (restart_timer)
3991                 mod_timer(&tp->timer, jiffies + 1);
3992
3993 out:
3994         tg3_full_unlock(tp);
3995 }
3996
3997 static void tg3_dump_short_state(struct tg3 *tp)
3998 {
3999         printk(KERN_ERR PFX "DEBUG: MAC_TX_STATUS[%08x] MAC_RX_STATUS[%08x]\n",
4000                tr32(MAC_TX_STATUS), tr32(MAC_RX_STATUS));
4001         printk(KERN_ERR PFX "DEBUG: RDMAC_STATUS[%08x] WDMAC_STATUS[%08x]\n",
4002                tr32(RDMAC_STATUS), tr32(WDMAC_STATUS));
4003 }
4004
4005 static void tg3_tx_timeout(struct net_device *dev)
4006 {
4007         struct tg3 *tp = netdev_priv(dev);
4008
4009         if (netif_msg_tx_err(tp)) {
4010                 printk(KERN_ERR PFX "%s: transmit timed out, resetting\n",
4011                        dev->name);
4012                 tg3_dump_short_state(tp);
4013         }
4014
4015         schedule_work(&tp->reset_task);
4016 }
4017
4018 /* Test for DMA buffers crossing any 4GB boundaries: 4G, 8G, etc */
4019 static inline int tg3_4g_overflow_test(dma_addr_t mapping, int len)
4020 {
4021         u32 base = (u32) mapping & 0xffffffff;
4022
4023         return ((base > 0xffffdcc0) &&
4024                 (base + len + 8 < base));
4025 }
4026
4027 /* Test for DMA addresses > 40-bit */
4028 static inline int tg3_40bit_overflow_test(struct tg3 *tp, dma_addr_t mapping,
4029                                           int len)
4030 {
4031 #if defined(CONFIG_HIGHMEM) && (BITS_PER_LONG == 64)
4032         if (tp->tg3_flags & TG3_FLAG_40BIT_DMA_BUG)
4033                 return (((u64) mapping + len) > DMA_40BIT_MASK);
4034         return 0;
4035 #else
4036         return 0;
4037 #endif
4038 }
4039
4040 static void tg3_set_txd(struct tg3 *, int, dma_addr_t, int, u32, u32);
4041
4042 /* Workaround 4GB and 40-bit hardware DMA bugs. */
4043 static int tigon3_dma_hwbug_workaround(struct tg3 *tp, struct sk_buff *skb,
4044                                        u32 last_plus_one, u32 *start,
4045                                        u32 base_flags, u32 mss)
4046 {
4047         struct sk_buff *new_skb = skb_copy(skb, GFP_ATOMIC);
4048         dma_addr_t new_addr = 0;
4049         u32 entry = *start;
4050         int i, ret = 0;
4051
4052         if (!new_skb) {
4053                 ret = -1;
4054         } else {
4055                 /* New SKB is guaranteed to be linear. */
4056                 entry = *start;
4057                 new_addr = pci_map_single(tp->pdev, new_skb->data, new_skb->len,
4058                                           PCI_DMA_TODEVICE);
4059                 /* Make sure new skb does not cross any 4G boundaries.
4060                  * Drop the packet if it does.
4061                  */
4062                 if (tg3_4g_overflow_test(new_addr, new_skb->len)) {
4063                         ret = -1;
4064                         dev_kfree_skb(new_skb);
4065                         new_skb = NULL;
4066                 } else {
4067                         tg3_set_txd(tp, entry, new_addr, new_skb->len,
4068                                     base_flags, 1 | (mss << 1));
4069                         *start = NEXT_TX(entry);
4070                 }
4071         }
4072
4073         /* Now clean up the sw ring entries. */
4074         i = 0;
4075         while (entry != last_plus_one) {
4076                 int len;
4077
4078                 if (i == 0)
4079                         len = skb_headlen(skb);
4080                 else
4081                         len = skb_shinfo(skb)->frags[i-1].size;
4082                 pci_unmap_single(tp->pdev,
4083                                  pci_unmap_addr(&tp->tx_buffers[entry], mapping),
4084                                  len, PCI_DMA_TODEVICE);
4085                 if (i == 0) {
4086                         tp->tx_buffers[entry].skb = new_skb;
4087                         pci_unmap_addr_set(&tp->tx_buffers[entry], mapping, new_addr);
4088                 } else {
4089                         tp->tx_buffers[entry].skb = NULL;
4090                 }
4091                 entry = NEXT_TX(entry);
4092                 i++;
4093         }
4094
4095         dev_kfree_skb(skb);
4096
4097         return ret;
4098 }
4099
4100 static void tg3_set_txd(struct tg3 *tp, int entry,
4101                         dma_addr_t mapping, int len, u32 flags,
4102                         u32 mss_and_is_end)
4103 {
4104         struct tg3_tx_buffer_desc *txd = &tp->tx_ring[entry];
4105         int is_end = (mss_and_is_end & 0x1);
4106         u32 mss = (mss_and_is_end >> 1);
4107         u32 vlan_tag = 0;
4108
4109         if (is_end)
4110                 flags |= TXD_FLAG_END;
4111         if (flags & TXD_FLAG_VLAN) {
4112                 vlan_tag = flags >> 16;
4113                 flags &= 0xffff;
4114         }
4115         vlan_tag |= (mss << TXD_MSS_SHIFT);
4116
4117         txd->addr_hi = ((u64) mapping >> 32);
4118         txd->addr_lo = ((u64) mapping & 0xffffffff);
4119         txd->len_flags = (len << TXD_LEN_SHIFT) | flags;
4120         txd->vlan_tag = vlan_tag << TXD_VLAN_TAG_SHIFT;
4121 }
4122
4123 /* hard_start_xmit for devices that don't have any bugs and
4124  * support TG3_FLG2_HW_TSO_2 only.
4125  */
4126 static int tg3_start_xmit(struct sk_buff *skb, struct net_device *dev)
4127 {
4128         struct tg3 *tp = netdev_priv(dev);
4129         dma_addr_t mapping;
4130         u32 len, entry, base_flags, mss;
4131
4132         len = skb_headlen(skb);
4133
4134         /* We are running in BH disabled context with netif_tx_lock
4135          * and TX reclaim runs via tp->napi.poll inside of a software
4136          * interrupt.  Furthermore, IRQ processing runs lockless so we have
4137          * no IRQ context deadlocks to worry about either.  Rejoice!
4138          */
4139         if (unlikely(tg3_tx_avail(tp) <= (skb_shinfo(skb)->nr_frags + 1))) {
4140                 if (!netif_queue_stopped(dev)) {
4141                         netif_stop_queue(dev);
4142
4143                         /* This is a hard error, log it. */
4144                         printk(KERN_ERR PFX "%s: BUG! Tx Ring full when "
4145                                "queue awake!\n", dev->name);
4146                 }
4147                 return NETDEV_TX_BUSY;
4148         }
4149
4150         entry = tp->tx_prod;
4151         base_flags = 0;
4152         mss = 0;
4153         if ((mss = skb_shinfo(skb)->gso_size) != 0) {
4154                 int tcp_opt_len, ip_tcp_len;
4155
4156                 if (skb_header_cloned(skb) &&
4157                     pskb_expand_head(skb, 0, 0, GFP_ATOMIC)) {
4158                         dev_kfree_skb(skb);
4159                         goto out_unlock;
4160                 }
4161
4162                 if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6)
4163                         mss |= (skb_headlen(skb) - ETH_HLEN) << 9;
4164                 else {
4165                         struct iphdr *iph = ip_hdr(skb);
4166
4167                         tcp_opt_len = tcp_optlen(skb);
4168                         ip_tcp_len = ip_hdrlen(skb) + sizeof(struct tcphdr);
4169
4170                         iph->check = 0;
4171                         iph->tot_len = htons(mss + ip_tcp_len + tcp_opt_len);
4172                         mss |= (ip_tcp_len + tcp_opt_len) << 9;
4173                 }
4174
4175                 base_flags |= (TXD_FLAG_CPU_PRE_DMA |
4176                                TXD_FLAG_CPU_POST_DMA);
4177
4178                 tcp_hdr(skb)->check = 0;
4179
4180         }
4181         else if (skb->ip_summed == CHECKSUM_PARTIAL)
4182                 base_flags |= TXD_FLAG_TCPUDP_CSUM;
4183 #if TG3_VLAN_TAG_USED
4184         if (tp->vlgrp != NULL && vlan_tx_tag_present(skb))
4185                 base_flags |= (TXD_FLAG_VLAN |
4186                                (vlan_tx_tag_get(skb) << 16));
4187 #endif
4188
4189         /* Queue skb data, a.k.a. the main skb fragment. */
4190         mapping = pci_map_single(tp->pdev, skb->data, len, PCI_DMA_TODEVICE);
4191
4192         tp->tx_buffers[entry].skb = skb;
4193         pci_unmap_addr_set(&tp->tx_buffers[entry], mapping, mapping);
4194
4195         tg3_set_txd(tp, entry, mapping, len, base_flags,
4196                     (skb_shinfo(skb)->nr_frags == 0) | (mss << 1));
4197
4198         entry = NEXT_TX(entry);
4199
4200         /* Now loop through additional data fragments, and queue them. */
4201         if (skb_shinfo(skb)->nr_frags > 0) {
4202                 unsigned int i, last;
4203
4204                 last = skb_shinfo(skb)->nr_frags - 1;
4205                 for (i = 0; i <= last; i++) {
4206                         skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
4207
4208                         len = frag->size;
4209                         mapping = pci_map_page(tp->pdev,
4210                                                frag->page,
4211                                                frag->page_offset,
4212                                                len, PCI_DMA_TODEVICE);
4213
4214                         tp->tx_buffers[entry].skb = NULL;
4215                         pci_unmap_addr_set(&tp->tx_buffers[entry], mapping, mapping);
4216
4217                         tg3_set_txd(tp, entry, mapping, len,
4218                                     base_flags, (i == last) | (mss << 1));
4219
4220                         entry = NEXT_TX(entry);
4221                 }
4222         }
4223
4224         /* Packets are ready, update Tx producer idx local and on card. */
4225         tw32_tx_mbox((MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW), entry);
4226
4227         tp->tx_prod = entry;
4228         if (unlikely(tg3_tx_avail(tp) <= (MAX_SKB_FRAGS + 1))) {
4229                 netif_stop_queue(dev);
4230                 if (tg3_tx_avail(tp) > TG3_TX_WAKEUP_THRESH(tp))
4231                         netif_wake_queue(tp->dev);
4232         }
4233
4234 out_unlock:
4235         mmiowb();
4236
4237         dev->trans_start = jiffies;
4238
4239         return NETDEV_TX_OK;
4240 }
4241
4242 static int tg3_start_xmit_dma_bug(struct sk_buff *, struct net_device *);
4243
4244 /* Use GSO to workaround a rare TSO bug that may be triggered when the
4245  * TSO header is greater than 80 bytes.
4246  */
4247 static int tg3_tso_bug(struct tg3 *tp, struct sk_buff *skb)
4248 {
4249         struct sk_buff *segs, *nskb;
4250
4251         /* Estimate the number of fragments in the worst case */
4252         if (unlikely(tg3_tx_avail(tp) <= (skb_shinfo(skb)->gso_segs * 3))) {
4253                 netif_stop_queue(tp->dev);
4254                 if (tg3_tx_avail(tp) <= (skb_shinfo(skb)->gso_segs * 3))
4255                         return NETDEV_TX_BUSY;
4256
4257                 netif_wake_queue(tp->dev);
4258         }
4259
4260         segs = skb_gso_segment(skb, tp->dev->features & ~NETIF_F_TSO);
4261         if (unlikely(IS_ERR(segs)))
4262                 goto tg3_tso_bug_end;
4263
4264         do {
4265                 nskb = segs;
4266                 segs = segs->next;
4267                 nskb->next = NULL;
4268                 tg3_start_xmit_dma_bug(nskb, tp->dev);
4269         } while (segs);
4270
4271 tg3_tso_bug_end:
4272         dev_kfree_skb(skb);
4273
4274         return NETDEV_TX_OK;
4275 }
4276
4277 /* hard_start_xmit for devices that have the 4G bug and/or 40-bit bug and
4278  * support TG3_FLG2_HW_TSO_1 or firmware TSO only.
4279  */
4280 static int tg3_start_xmit_dma_bug(struct sk_buff *skb, struct net_device *dev)
4281 {
4282         struct tg3 *tp = netdev_priv(dev);
4283         dma_addr_t mapping;
4284         u32 len, entry, base_flags, mss;
4285         int would_hit_hwbug;
4286
4287         len = skb_headlen(skb);
4288
4289         /* We are running in BH disabled context with netif_tx_lock
4290          * and TX reclaim runs via tp->napi.poll inside of a software
4291          * interrupt.  Furthermore, IRQ processing runs lockless so we have
4292          * no IRQ context deadlocks to worry about either.  Rejoice!
4293          */
4294         if (unlikely(tg3_tx_avail(tp) <= (skb_shinfo(skb)->nr_frags + 1))) {
4295                 if (!netif_queue_stopped(dev)) {
4296                         netif_stop_queue(dev);
4297
4298                         /* This is a hard error, log it. */
4299                         printk(KERN_ERR PFX "%s: BUG! Tx Ring full when "
4300                                "queue awake!\n", dev->name);
4301                 }
4302                 return NETDEV_TX_BUSY;
4303         }
4304
4305         entry = tp->tx_prod;
4306         base_flags = 0;
4307         if (skb->ip_summed == CHECKSUM_PARTIAL)
4308                 base_flags |= TXD_FLAG_TCPUDP_CSUM;
4309         mss = 0;
4310         if ((mss = skb_shinfo(skb)->gso_size) != 0) {
4311                 struct iphdr *iph;
4312                 int tcp_opt_len, ip_tcp_len, hdr_len;
4313
4314                 if (skb_header_cloned(skb) &&
4315                     pskb_expand_head(skb, 0, 0, GFP_ATOMIC)) {
4316                         dev_kfree_skb(skb);
4317                         goto out_unlock;
4318                 }
4319
4320                 tcp_opt_len = tcp_optlen(skb);
4321                 ip_tcp_len = ip_hdrlen(skb) + sizeof(struct tcphdr);
4322
4323                 hdr_len = ip_tcp_len + tcp_opt_len;
4324                 if (unlikely((ETH_HLEN + hdr_len) > 80) &&
4325                              (tp->tg3_flags2 & TG3_FLG2_TSO_BUG))
4326                         return (tg3_tso_bug(tp, skb));
4327
4328                 base_flags |= (TXD_FLAG_CPU_PRE_DMA |
4329                                TXD_FLAG_CPU_POST_DMA);
4330
4331                 iph = ip_hdr(skb);
4332                 iph->check = 0;
4333                 iph->tot_len = htons(mss + hdr_len);
4334                 if (tp->tg3_flags2 & TG3_FLG2_HW_TSO) {
4335                         tcp_hdr(skb)->check = 0;
4336                         base_flags &= ~TXD_FLAG_TCPUDP_CSUM;
4337                 } else
4338                         tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr,
4339                                                                  iph->daddr, 0,
4340                                                                  IPPROTO_TCP,
4341                                                                  0);
4342
4343                 if ((tp->tg3_flags2 & TG3_FLG2_HW_TSO) ||
4344                     (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705)) {
4345                         if (tcp_opt_len || iph->ihl > 5) {
4346                                 int tsflags;
4347
4348                                 tsflags = (iph->ihl - 5) + (tcp_opt_len >> 2);
4349                                 mss |= (tsflags << 11);
4350                         }
4351                 } else {
4352                         if (tcp_opt_len || iph->ihl > 5) {
4353                                 int tsflags;
4354
4355                                 tsflags = (iph->ihl - 5) + (tcp_opt_len >> 2);
4356                                 base_flags |= tsflags << 12;
4357                         }
4358                 }
4359         }
4360 #if TG3_VLAN_TAG_USED
4361         if (tp->vlgrp != NULL && vlan_tx_tag_present(skb))
4362                 base_flags |= (TXD_FLAG_VLAN |
4363                                (vlan_tx_tag_get(skb) << 16));
4364 #endif
4365
4366         /* Queue skb data, a.k.a. the main skb fragment. */
4367         mapping = pci_map_single(tp->pdev, skb->data, len, PCI_DMA_TODEVICE);
4368
4369         tp->tx_buffers[entry].skb = skb;
4370         pci_unmap_addr_set(&tp->tx_buffers[entry], mapping, mapping);
4371
4372         would_hit_hwbug = 0;
4373
4374         if (tg3_4g_overflow_test(mapping, len))
4375                 would_hit_hwbug = 1;
4376
4377         tg3_set_txd(tp, entry, mapping, len, base_flags,
4378                     (skb_shinfo(skb)->nr_frags == 0) | (mss << 1));
4379
4380         entry = NEXT_TX(entry);
4381
4382         /* Now loop through additional data fragments, and queue them. */
4383         if (skb_shinfo(skb)->nr_frags > 0) {
4384                 unsigned int i, last;
4385
4386                 last = skb_shinfo(skb)->nr_frags - 1;
4387                 for (i = 0; i <= last; i++) {
4388                         skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
4389
4390                         len = frag->size;
4391                         mapping = pci_map_page(tp->pdev,
4392                                                frag->page,
4393                                                frag->page_offset,
4394                                                len, PCI_DMA_TODEVICE);
4395
4396                         tp->tx_buffers[entry].skb = NULL;
4397                         pci_unmap_addr_set(&tp->tx_buffers[entry], mapping, mapping);
4398
4399                         if (tg3_4g_overflow_test(mapping, len))
4400                                 would_hit_hwbug = 1;
4401
4402                         if (tg3_40bit_overflow_test(tp, mapping, len))
4403                                 would_hit_hwbug = 1;
4404
4405                         if (tp->tg3_flags2 & TG3_FLG2_HW_TSO)
4406                                 tg3_set_txd(tp, entry, mapping, len,
4407                                             base_flags, (i == last)|(mss << 1));
4408                         else
4409                                 tg3_set_txd(tp, entry, mapping, len,
4410                                             base_flags, (i == last));
4411
4412                         entry = NEXT_TX(entry);
4413                 }
4414         }
4415
4416         if (would_hit_hwbug) {
4417                 u32 last_plus_one = entry;
4418                 u32 start;
4419
4420                 start = entry - 1 - skb_shinfo(skb)->nr_frags;
4421                 start &= (TG3_TX_RING_SIZE - 1);
4422
4423                 /* If the workaround fails due to memory/mapping
4424                  * failure, silently drop this packet.
4425                  */
4426                 if (tigon3_dma_hwbug_workaround(tp, skb, last_plus_one,
4427                                                 &start, base_flags, mss))
4428                         goto out_unlock;
4429
4430                 entry = start;
4431         }
4432
4433         /* Packets are ready, update Tx producer idx local and on card. */
4434         tw32_tx_mbox((MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW), entry);
4435
4436         tp->tx_prod = entry;
4437         if (unlikely(tg3_tx_avail(tp) <= (MAX_SKB_FRAGS + 1))) {
4438                 netif_stop_queue(dev);
4439                 if (tg3_tx_avail(tp) > TG3_TX_WAKEUP_THRESH(tp))
4440                         netif_wake_queue(tp->dev);
4441         }
4442
4443 out_unlock:
4444         mmiowb();
4445
4446         dev->trans_start = jiffies;
4447
4448         return NETDEV_TX_OK;
4449 }
4450
4451 static inline void tg3_set_mtu(struct net_device *dev, struct tg3 *tp,
4452                                int new_mtu)
4453 {
4454         dev->mtu = new_mtu;
4455
4456         if (new_mtu > ETH_DATA_LEN) {
4457                 if (tp->tg3_flags2 & TG3_FLG2_5780_CLASS) {
4458                         tp->tg3_flags2 &= ~TG3_FLG2_TSO_CAPABLE;
4459                         ethtool_op_set_tso(dev, 0);
4460                 }
4461                 else
4462                         tp->tg3_flags |= TG3_FLAG_JUMBO_RING_ENABLE;
4463         } else {
4464                 if (tp->tg3_flags2 & TG3_FLG2_5780_CLASS)
4465                         tp->tg3_flags2 |= TG3_FLG2_TSO_CAPABLE;
4466                 tp->tg3_flags &= ~TG3_FLAG_JUMBO_RING_ENABLE;
4467         }
4468 }
4469
4470 static int tg3_change_mtu(struct net_device *dev, int new_mtu)
4471 {
4472         struct tg3 *tp = netdev_priv(dev);
4473         int err;
4474
4475         if (new_mtu < TG3_MIN_MTU || new_mtu > TG3_MAX_MTU(tp))
4476                 return -EINVAL;
4477
4478         if (!netif_running(dev)) {
4479                 /* We'll just catch it later when the
4480                  * device is up'd.
4481                  */
4482                 tg3_set_mtu(dev, tp, new_mtu);
4483                 return 0;
4484         }
4485
4486         tg3_netif_stop(tp);
4487
4488         tg3_full_lock(tp, 1);
4489
4490         tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
4491
4492         tg3_set_mtu(dev, tp, new_mtu);
4493
4494         err = tg3_restart_hw(tp, 0);
4495
4496         if (!err)
4497                 tg3_netif_start(tp);
4498
4499         tg3_full_unlock(tp);
4500
4501         return err;
4502 }
4503
4504 /* Free up pending packets in all rx/tx rings.
4505  *
4506  * The chip has been shut down and the driver detached from
4507  * the networking, so no interrupts or new tx packets will
4508  * end up in the driver.  tp->{tx,}lock is not held and we are not
4509  * in an interrupt context and thus may sleep.
4510  */
4511 static void tg3_free_rings(struct tg3 *tp)
4512 {
4513         struct ring_info *rxp;
4514         int i;
4515
4516         for (i = 0; i < TG3_RX_RING_SIZE; i++) {
4517                 rxp = &tp->rx_std_buffers[i];
4518
4519                 if (rxp->skb == NULL)
4520                         continue;
4521                 pci_unmap_single(tp->pdev,
4522                                  pci_unmap_addr(rxp, mapping),
4523                                  tp->rx_pkt_buf_sz - tp->rx_offset,
4524                                  PCI_DMA_FROMDEVICE);
4525                 dev_kfree_skb_any(rxp->skb);
4526                 rxp->skb = NULL;
4527         }
4528
4529         for (i = 0; i < TG3_RX_JUMBO_RING_SIZE; i++) {
4530                 rxp = &tp->rx_jumbo_buffers[i];
4531
4532                 if (rxp->skb == NULL)
4533                         continue;
4534                 pci_unmap_single(tp->pdev,
4535                                  pci_unmap_addr(rxp, mapping),
4536                                  RX_JUMBO_PKT_BUF_SZ - tp->rx_offset,
4537                                  PCI_DMA_FROMDEVICE);
4538                 dev_kfree_skb_any(rxp->skb);
4539                 rxp->skb = NULL;
4540         }
4541
4542         for (i = 0; i < TG3_TX_RING_SIZE; ) {
4543                 struct tx_ring_info *txp;
4544                 struct sk_buff *skb;
4545                 int j;
4546
4547                 txp = &tp->tx_buffers[i];
4548                 skb = txp->skb;
4549
4550                 if (skb == NULL) {
4551                         i++;
4552                         continue;
4553                 }
4554
4555                 pci_unmap_single(tp->pdev,
4556                                  pci_unmap_addr(txp, mapping),
4557                                  skb_headlen(skb),
4558                                  PCI_DMA_TODEVICE);
4559                 txp->skb = NULL;
4560
4561                 i++;
4562
4563                 for (j = 0; j < skb_shinfo(skb)->nr_frags; j++) {
4564                         txp = &tp->tx_buffers[i & (TG3_TX_RING_SIZE - 1)];
4565                         pci_unmap_page(tp->pdev,
4566                                        pci_unmap_addr(txp, mapping),
4567                                        skb_shinfo(skb)->frags[j].size,
4568                                        PCI_DMA_TODEVICE);
4569                         i++;
4570                 }
4571
4572                 dev_kfree_skb_any(skb);
4573         }
4574 }
4575
4576 /* Initialize tx/rx rings for packet processing.
4577  *
4578  * The chip has been shut down and the driver detached from
4579  * the networking, so no interrupts or new tx packets will
4580  * end up in the driver.  tp->{tx,}lock are held and thus
4581  * we may not sleep.
4582  */
4583 static int tg3_init_rings(struct tg3 *tp)
4584 {
4585         u32 i;
4586
4587         /* Free up all the SKBs. */
4588         tg3_free_rings(tp);
4589
4590         /* Zero out all descriptors. */
4591         memset(tp->rx_std, 0, TG3_RX_RING_BYTES);
4592         memset(tp->rx_jumbo, 0, TG3_RX_JUMBO_RING_BYTES);
4593         memset(tp->rx_rcb, 0, TG3_RX_RCB_RING_BYTES(tp));
4594         memset(tp->tx_ring, 0, TG3_TX_RING_BYTES);
4595
4596         tp->rx_pkt_buf_sz = RX_PKT_BUF_SZ;
4597         if ((tp->tg3_flags2 & TG3_FLG2_5780_CLASS) &&
4598             (tp->dev->mtu > ETH_DATA_LEN))
4599                 tp->rx_pkt_buf_sz = RX_JUMBO_PKT_BUF_SZ;
4600
4601         /* Initialize invariants of the rings, we only set this
4602          * stuff once.  This works because the card does not
4603          * write into the rx buffer posting rings.
4604          */
4605         for (i = 0; i < TG3_RX_RING_SIZE; i++) {
4606                 struct tg3_rx_buffer_desc *rxd;
4607
4608                 rxd = &tp->rx_std[i];
4609                 rxd->idx_len = (tp->rx_pkt_buf_sz - tp->rx_offset - 64)
4610                         << RXD_LEN_SHIFT;
4611                 rxd->type_flags = (RXD_FLAG_END << RXD_FLAGS_SHIFT);
4612                 rxd->opaque = (RXD_OPAQUE_RING_STD |
4613                                (i << RXD_OPAQUE_INDEX_SHIFT));
4614         }
4615
4616         if (tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE) {
4617                 for (i = 0; i < TG3_RX_JUMBO_RING_SIZE; i++) {
4618                         struct tg3_rx_buffer_desc *rxd;
4619
4620                         rxd = &tp->rx_jumbo[i];
4621                         rxd->idx_len = (RX_JUMBO_PKT_BUF_SZ - tp->rx_offset - 64)
4622                                 << RXD_LEN_SHIFT;
4623                         rxd->type_flags = (RXD_FLAG_END << RXD_FLAGS_SHIFT) |
4624                                 RXD_FLAG_JUMBO;
4625                         rxd->opaque = (RXD_OPAQUE_RING_JUMBO |
4626                                (i << RXD_OPAQUE_INDEX_SHIFT));
4627                 }
4628         }
4629
4630         /* Now allocate fresh SKBs for each rx ring. */
4631         for (i = 0; i < tp->rx_pending; i++) {
4632                 if (tg3_alloc_rx_skb(tp, RXD_OPAQUE_RING_STD, -1, i) < 0) {
4633                         printk(KERN_WARNING PFX
4634                                "%s: Using a smaller RX standard ring, "
4635                                "only %d out of %d buffers were allocated "
4636                                "successfully.\n",
4637                                tp->dev->name, i, tp->rx_pending);
4638                         if (i == 0)
4639                                 return -ENOMEM;
4640                         tp->rx_pending = i;
4641                         break;
4642                 }
4643         }
4644
4645         if (tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE) {
4646                 for (i = 0; i < tp->rx_jumbo_pending; i++) {
4647                         if (tg3_alloc_rx_skb(tp, RXD_OPAQUE_RING_JUMBO,
4648                                              -1, i) < 0) {
4649                                 printk(KERN_WARNING PFX
4650                                        "%s: Using a smaller RX jumbo ring, "
4651                                        "only %d out of %d buffers were "
4652                                        "allocated successfully.\n",
4653                                        tp->dev->name, i, tp->rx_jumbo_pending);
4654                                 if (i == 0) {
4655                                         tg3_free_rings(tp);
4656                                         return -ENOMEM;
4657                                 }
4658                                 tp->rx_jumbo_pending = i;
4659                                 break;
4660                         }
4661                 }
4662         }
4663         return 0;
4664 }
4665
4666 /*
4667  * Must not be invoked with interrupt sources disabled and
4668  * the hardware shutdown down.
4669  */
4670 static void tg3_free_consistent(struct tg3 *tp)
4671 {
4672         kfree(tp->rx_std_buffers);
4673         tp->rx_std_buffers = NULL;
4674         if (tp->rx_std) {
4675                 pci_free_consistent(tp->pdev, TG3_RX_RING_BYTES,
4676                                     tp->rx_std, tp->rx_std_mapping);
4677                 tp->rx_std = NULL;
4678         }
4679         if (tp->rx_jumbo) {
4680                 pci_free_consistent(tp->pdev, TG3_RX_JUMBO_RING_BYTES,
4681                                     tp->rx_jumbo, tp->rx_jumbo_mapping);
4682                 tp->rx_jumbo = NULL;
4683         }
4684         if (tp->rx_rcb) {
4685                 pci_free_consistent(tp->pdev, TG3_RX_RCB_RING_BYTES(tp),
4686                                     tp->rx_rcb, tp->rx_rcb_mapping);
4687                 tp->rx_rcb = NULL;
4688         }
4689         if (tp->tx_ring) {
4690                 pci_free_consistent(tp->pdev, TG3_TX_RING_BYTES,
4691                         tp->tx_ring, tp->tx_desc_mapping);
4692                 tp->tx_ring = NULL;
4693         }
4694         if (tp->hw_status) {
4695                 pci_free_consistent(tp->pdev, TG3_HW_STATUS_SIZE,
4696                                     tp->hw_status, tp->status_mapping);
4697                 tp->hw_status = NULL;
4698         }
4699         if (tp->hw_stats) {
4700                 pci_free_consistent(tp->pdev, sizeof(struct tg3_hw_stats),
4701                                     tp->hw_stats, tp->stats_mapping);
4702                 tp->hw_stats = NULL;
4703         }
4704 }
4705
4706 /*
4707  * Must not be invoked with interrupt sources disabled and
4708  * the hardware shutdown down.  Can sleep.
4709  */
4710 static int tg3_alloc_consistent(struct tg3 *tp)
4711 {
4712         tp->rx_std_buffers = kzalloc((sizeof(struct ring_info) *
4713                                       (TG3_RX_RING_SIZE +
4714                                        TG3_RX_JUMBO_RING_SIZE)) +
4715                                      (sizeof(struct tx_ring_info) *
4716                                       TG3_TX_RING_SIZE),
4717                                      GFP_KERNEL);
4718         if (!tp->rx_std_buffers)
4719                 return -ENOMEM;
4720
4721         tp->rx_jumbo_buffers = &tp->rx_std_buffers[TG3_RX_RING_SIZE];
4722         tp->tx_buffers = (struct tx_ring_info *)
4723                 &tp->rx_jumbo_buffers[TG3_RX_JUMBO_RING_SIZE];
4724
4725         tp->rx_std = pci_alloc_consistent(tp->pdev, TG3_RX_RING_BYTES,
4726                                           &tp->rx_std_mapping);
4727         if (!tp->rx_std)
4728                 goto err_out;
4729
4730         tp->rx_jumbo = pci_alloc_consistent(tp->pdev, TG3_RX_JUMBO_RING_BYTES,
4731                                             &tp->rx_jumbo_mapping);
4732
4733         if (!tp->rx_jumbo)
4734                 goto err_out;
4735
4736         tp->rx_rcb = pci_alloc_consistent(tp->pdev, TG3_RX_RCB_RING_BYTES(tp),
4737                                           &tp->rx_rcb_mapping);
4738         if (!tp->rx_rcb)
4739                 goto err_out;
4740
4741         tp->tx_ring = pci_alloc_consistent(tp->pdev, TG3_TX_RING_BYTES,
4742                                            &tp->tx_desc_mapping);
4743         if (!tp->tx_ring)
4744                 goto err_out;
4745
4746         tp->hw_status = pci_alloc_consistent(tp->pdev,
4747                                              TG3_HW_STATUS_SIZE,
4748                                              &tp->status_mapping);
4749         if (!tp->hw_status)
4750                 goto err_out;
4751
4752         tp->hw_stats = pci_alloc_consistent(tp->pdev,
4753                                             sizeof(struct tg3_hw_stats),
4754                                             &tp->stats_mapping);
4755         if (!tp->hw_stats)
4756                 goto err_out;
4757
4758         memset(tp->hw_status, 0, TG3_HW_STATUS_SIZE);
4759         memset(tp->hw_stats, 0, sizeof(struct tg3_hw_stats));
4760
4761         return 0;
4762
4763 err_out:
4764         tg3_free_consistent(tp);
4765         return -ENOMEM;
4766 }
4767
4768 #define MAX_WAIT_CNT 1000
4769
4770 /* To stop a block, clear the enable bit and poll till it
4771  * clears.  tp->lock is held.
4772  */
4773 static int tg3_stop_block(struct tg3 *tp, unsigned long ofs, u32 enable_bit, int silent)
4774 {
4775         unsigned int i;
4776         u32 val;
4777
4778         if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
4779                 switch (ofs) {
4780                 case RCVLSC_MODE:
4781                 case DMAC_MODE:
4782                 case MBFREE_MODE:
4783                 case BUFMGR_MODE:
4784                 case MEMARB_MODE:
4785                         /* We can't enable/disable these bits of the
4786                          * 5705/5750, just say success.
4787                          */
4788                         return 0;
4789
4790                 default:
4791                         break;
4792                 };
4793         }
4794
4795         val = tr32(ofs);
4796         val &= ~enable_bit;
4797         tw32_f(ofs, val);
4798
4799         for (i = 0; i < MAX_WAIT_CNT; i++) {
4800                 udelay(100);
4801                 val = tr32(ofs);
4802                 if ((val & enable_bit) == 0)
4803                         break;
4804         }
4805
4806         if (i == MAX_WAIT_CNT && !silent) {
4807                 printk(KERN_ERR PFX "tg3_stop_block timed out, "
4808                        "ofs=%lx enable_bit=%x\n",
4809                        ofs, enable_bit);
4810                 return -ENODEV;
4811         }
4812
4813         return 0;
4814 }
4815
4816 /* tp->lock is held. */
4817 static int tg3_abort_hw(struct tg3 *tp, int silent)
4818 {
4819         int i, err;
4820
4821         tg3_disable_ints(tp);
4822
4823         tp->rx_mode &= ~RX_MODE_ENABLE;
4824         tw32_f(MAC_RX_MODE, tp->rx_mode);
4825         udelay(10);
4826
4827         err  = tg3_stop_block(tp, RCVBDI_MODE, RCVBDI_MODE_ENABLE, silent);
4828         err |= tg3_stop_block(tp, RCVLPC_MODE, RCVLPC_MODE_ENABLE, silent);
4829         err |= tg3_stop_block(tp, RCVLSC_MODE, RCVLSC_MODE_ENABLE, silent);
4830         err |= tg3_stop_block(tp, RCVDBDI_MODE, RCVDBDI_MODE_ENABLE, silent);
4831         err |= tg3_stop_block(tp, RCVDCC_MODE, RCVDCC_MODE_ENABLE, silent);
4832         err |= tg3_stop_block(tp, RCVCC_MODE, RCVCC_MODE_ENABLE, silent);
4833
4834         err |= tg3_stop_block(tp, SNDBDS_MODE, SNDBDS_MODE_ENABLE, silent);
4835         err |= tg3_stop_block(tp, SNDBDI_MODE, SNDBDI_MODE_ENABLE, silent);
4836         err |= tg3_stop_block(tp, SNDDATAI_MODE, SNDDATAI_MODE_ENABLE, silent);
4837         err |= tg3_stop_block(tp, RDMAC_MODE, RDMAC_MODE_ENABLE, silent);
4838         err |= tg3_stop_block(tp, SNDDATAC_MODE, SNDDATAC_MODE_ENABLE, silent);
4839         err |= tg3_stop_block(tp, DMAC_MODE, DMAC_MODE_ENABLE, silent);
4840         err |= tg3_stop_block(tp, SNDBDC_MODE, SNDBDC_MODE_ENABLE, silent);
4841
4842         tp->mac_mode &= ~MAC_MODE_TDE_ENABLE;
4843         tw32_f(MAC_MODE, tp->mac_mode);
4844         udelay(40);
4845
4846         tp->tx_mode &= ~TX_MODE_ENABLE;
4847         tw32_f(MAC_TX_MODE, tp->tx_mode);
4848
4849         for (i = 0; i < MAX_WAIT_CNT; i++) {
4850                 udelay(100);
4851                 if (!(tr32(MAC_TX_MODE) & TX_MODE_ENABLE))
4852                         break;
4853         }
4854         if (i >= MAX_WAIT_CNT) {
4855                 printk(KERN_ERR PFX "tg3_abort_hw timed out for %s, "
4856                        "TX_MODE_ENABLE will not clear MAC_TX_MODE=%08x\n",
4857                        tp->dev->name, tr32(MAC_TX_MODE));
4858                 err |= -ENODEV;
4859         }
4860
4861         err |= tg3_stop_block(tp, HOSTCC_MODE, HOSTCC_MODE_ENABLE, silent);
4862         err |= tg3_stop_block(tp, WDMAC_MODE, WDMAC_MODE_ENABLE, silent);
4863         err |= tg3_stop_block(tp, MBFREE_MODE, MBFREE_MODE_ENABLE, silent);
4864
4865         tw32(FTQ_RESET, 0xffffffff);
4866         tw32(FTQ_RESET, 0x00000000);
4867
4868         err |= tg3_stop_block(tp, BUFMGR_MODE, BUFMGR_MODE_ENABLE, silent);
4869         err |= tg3_stop_block(tp, MEMARB_MODE, MEMARB_MODE_ENABLE, silent);
4870
4871         if (tp->hw_status)
4872                 memset(tp->hw_status, 0, TG3_HW_STATUS_SIZE);
4873         if (tp->hw_stats)
4874                 memset(tp->hw_stats, 0, sizeof(struct tg3_hw_stats));
4875
4876         return err;
4877 }
4878
4879 /* tp->lock is held. */
4880 static int tg3_nvram_lock(struct tg3 *tp)
4881 {
4882         if (tp->tg3_flags & TG3_FLAG_NVRAM) {
4883                 int i;
4884
4885                 if (tp->nvram_lock_cnt == 0) {
4886                         tw32(NVRAM_SWARB, SWARB_REQ_SET1);
4887                         for (i = 0; i < 8000; i++) {
4888                                 if (tr32(NVRAM_SWARB) & SWARB_GNT1)
4889                                         break;
4890                                 udelay(20);
4891                         }
4892                         if (i == 8000) {
4893                                 tw32(NVRAM_SWARB, SWARB_REQ_CLR1);
4894                                 return -ENODEV;
4895                         }
4896                 }
4897                 tp->nvram_lock_cnt++;
4898         }
4899         return 0;
4900 }
4901
4902 /* tp->lock is held. */
4903 static void tg3_nvram_unlock(struct tg3 *tp)
4904 {
4905         if (tp->tg3_flags & TG3_FLAG_NVRAM) {
4906                 if (tp->nvram_lock_cnt > 0)
4907                         tp->nvram_lock_cnt--;
4908                 if (tp->nvram_lock_cnt == 0)
4909                         tw32_f(NVRAM_SWARB, SWARB_REQ_CLR1);
4910         }
4911 }
4912
4913 /* tp->lock is held. */
4914 static void tg3_enable_nvram_access(struct tg3 *tp)
4915 {
4916         if ((tp->tg3_flags2 & TG3_FLG2_5750_PLUS) &&
4917             !(tp->tg3_flags2 & TG3_FLG2_PROTECTED_NVRAM)) {
4918                 u32 nvaccess = tr32(NVRAM_ACCESS);
4919
4920                 tw32(NVRAM_ACCESS, nvaccess | ACCESS_ENABLE);
4921         }
4922 }
4923
4924 /* tp->lock is held. */
4925 static void tg3_disable_nvram_access(struct tg3 *tp)
4926 {
4927         if ((tp->tg3_flags2 & TG3_FLG2_5750_PLUS) &&
4928             !(tp->tg3_flags2 & TG3_FLG2_PROTECTED_NVRAM)) {
4929                 u32 nvaccess = tr32(NVRAM_ACCESS);
4930
4931                 tw32(NVRAM_ACCESS, nvaccess & ~ACCESS_ENABLE);
4932         }
4933 }
4934
4935 static void tg3_ape_send_event(struct tg3 *tp, u32 event)
4936 {
4937         int i;
4938         u32 apedata;
4939
4940         apedata = tg3_ape_read32(tp, TG3_APE_SEG_SIG);
4941         if (apedata != APE_SEG_SIG_MAGIC)
4942                 return;
4943
4944         apedata = tg3_ape_read32(tp, TG3_APE_FW_STATUS);
4945         if (apedata != APE_FW_STATUS_READY)
4946                 return;
4947
4948         /* Wait for up to 1 millisecond for APE to service previous event. */
4949         for (i = 0; i < 10; i++) {
4950                 if (tg3_ape_lock(tp, TG3_APE_LOCK_MEM))
4951                         return;
4952
4953                 apedata = tg3_ape_read32(tp, TG3_APE_EVENT_STATUS);
4954
4955                 if (!(apedata & APE_EVENT_STATUS_EVENT_PENDING))
4956                         tg3_ape_write32(tp, TG3_APE_EVENT_STATUS,
4957                                         event | APE_EVENT_STATUS_EVENT_PENDING);
4958
4959                 tg3_ape_unlock(tp, TG3_APE_LOCK_MEM);
4960
4961                 if (!(apedata & APE_EVENT_STATUS_EVENT_PENDING))
4962                         break;
4963
4964                 udelay(100);
4965         }
4966
4967         if (!(apedata & APE_EVENT_STATUS_EVENT_PENDING))
4968                 tg3_ape_write32(tp, TG3_APE_EVENT, APE_EVENT_1);
4969 }
4970
4971 static void tg3_ape_driver_state_change(struct tg3 *tp, int kind)
4972 {
4973         u32 event;
4974         u32 apedata;
4975
4976         if (!(tp->tg3_flags3 & TG3_FLG3_ENABLE_APE))
4977                 return;
4978
4979         switch (kind) {
4980                 case RESET_KIND_INIT:
4981                         tg3_ape_write32(tp, TG3_APE_HOST_SEG_SIG,
4982                                         APE_HOST_SEG_SIG_MAGIC);
4983                         tg3_ape_write32(tp, TG3_APE_HOST_SEG_LEN,
4984                                         APE_HOST_SEG_LEN_MAGIC);
4985                         apedata = tg3_ape_read32(tp, TG3_APE_HOST_INIT_COUNT);
4986                         tg3_ape_write32(tp, TG3_APE_HOST_INIT_COUNT, ++apedata);
4987                         tg3_ape_write32(tp, TG3_APE_HOST_DRIVER_ID,
4988                                         APE_HOST_DRIVER_ID_MAGIC);
4989                         tg3_ape_write32(tp, TG3_APE_HOST_BEHAVIOR,
4990                                         APE_HOST_BEHAV_NO_PHYLOCK);
4991
4992                         event = APE_EVENT_STATUS_STATE_START;
4993                         break;
4994                 case RESET_KIND_SHUTDOWN:
4995                         event = APE_EVENT_STATUS_STATE_UNLOAD;
4996                         break;
4997                 case RESET_KIND_SUSPEND:
4998                         event = APE_EVENT_STATUS_STATE_SUSPEND;
4999                         break;
5000                 default:
5001                         return;
5002         }
5003
5004         event |= APE_EVENT_STATUS_DRIVER_EVNT | APE_EVENT_STATUS_STATE_CHNGE;
5005
5006         tg3_ape_send_event(tp, event);
5007 }
5008
5009 /* tp->lock is held. */
5010 static void tg3_write_sig_pre_reset(struct tg3 *tp, int kind)
5011 {
5012         tg3_write_mem(tp, NIC_SRAM_FIRMWARE_MBOX,
5013                       NIC_SRAM_FIRMWARE_MBOX_MAGIC1);
5014
5015         if (tp->tg3_flags2 & TG3_FLG2_ASF_NEW_HANDSHAKE) {
5016                 switch (kind) {
5017                 case RESET_KIND_INIT:
5018                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
5019                                       DRV_STATE_START);
5020                         break;
5021
5022                 case RESET_KIND_SHUTDOWN:
5023                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
5024                                       DRV_STATE_UNLOAD);
5025                         break;
5026
5027                 case RESET_KIND_SUSPEND:
5028                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
5029                                       DRV_STATE_SUSPEND);
5030                         break;
5031
5032                 default:
5033                         break;
5034                 };
5035         }
5036
5037         if (kind == RESET_KIND_INIT ||
5038             kind == RESET_KIND_SUSPEND)
5039                 tg3_ape_driver_state_change(tp, kind);
5040 }
5041
5042 /* tp->lock is held. */
5043 static void tg3_write_sig_post_reset(struct tg3 *tp, int kind)
5044 {
5045         if (tp->tg3_flags2 & TG3_FLG2_ASF_NEW_HANDSHAKE) {
5046                 switch (kind) {
5047                 case RESET_KIND_INIT:
5048                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
5049                                       DRV_STATE_START_DONE);
5050                         break;
5051
5052                 case RESET_KIND_SHUTDOWN:
5053                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
5054                                       DRV_STATE_UNLOAD_DONE);
5055                         break;
5056
5057                 default:
5058                         break;
5059                 };
5060         }
5061
5062         if (kind == RESET_KIND_SHUTDOWN)
5063                 tg3_ape_driver_state_change(tp, kind);
5064 }
5065
5066 /* tp->lock is held. */
5067 static void tg3_write_sig_legacy(struct tg3 *tp, int kind)
5068 {
5069         if (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) {
5070                 switch (kind) {
5071                 case RESET_KIND_INIT:
5072                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
5073                                       DRV_STATE_START);
5074                         break;
5075
5076                 case RESET_KIND_SHUTDOWN:
5077                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
5078                                       DRV_STATE_UNLOAD);
5079                         break;
5080
5081                 case RESET_KIND_SUSPEND:
5082                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
5083                                       DRV_STATE_SUSPEND);
5084                         break;
5085
5086                 default:
5087                         break;
5088                 };
5089         }
5090 }
5091
5092 static int tg3_poll_fw(struct tg3 *tp)
5093 {
5094         int i;
5095         u32 val;
5096
5097         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
5098                 /* Wait up to 20ms for init done. */
5099                 for (i = 0; i < 200; i++) {
5100                         if (tr32(VCPU_STATUS) & VCPU_STATUS_INIT_DONE)
5101                                 return 0;
5102                         udelay(100);
5103                 }
5104                 return -ENODEV;
5105         }
5106
5107         /* Wait for firmware initialization to complete. */
5108         for (i = 0; i < 100000; i++) {
5109                 tg3_read_mem(tp, NIC_SRAM_FIRMWARE_MBOX, &val);
5110                 if (val == ~NIC_SRAM_FIRMWARE_MBOX_MAGIC1)
5111                         break;
5112                 udelay(10);
5113         }
5114
5115         /* Chip might not be fitted with firmware.  Some Sun onboard
5116          * parts are configured like that.  So don't signal the timeout
5117          * of the above loop as an error, but do report the lack of
5118          * running firmware once.
5119          */
5120         if (i >= 100000 &&
5121             !(tp->tg3_flags2 & TG3_FLG2_NO_FWARE_REPORTED)) {
5122                 tp->tg3_flags2 |= TG3_FLG2_NO_FWARE_REPORTED;
5123
5124                 printk(KERN_INFO PFX "%s: No firmware running.\n",
5125                        tp->dev->name);
5126         }
5127
5128         return 0;
5129 }
5130
5131 /* Save PCI command register before chip reset */
5132 static void tg3_save_pci_state(struct tg3 *tp)
5133 {
5134         pci_read_config_word(tp->pdev, PCI_COMMAND, &tp->pci_cmd);
5135 }
5136
5137 /* Restore PCI state after chip reset */
5138 static void tg3_restore_pci_state(struct tg3 *tp)
5139 {
5140         u32 val;
5141
5142         /* Re-enable indirect register accesses. */
5143         pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
5144                                tp->misc_host_ctrl);
5145
5146         /* Set MAX PCI retry to zero. */
5147         val = (PCISTATE_ROM_ENABLE | PCISTATE_ROM_RETRY_ENABLE);
5148         if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0 &&
5149             (tp->tg3_flags & TG3_FLAG_PCIX_MODE))
5150                 val |= PCISTATE_RETRY_SAME_DMA;
5151         /* Allow reads and writes to the APE register and memory space. */
5152         if (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE)
5153                 val |= PCISTATE_ALLOW_APE_CTLSPC_WR |
5154                        PCISTATE_ALLOW_APE_SHMEM_WR;
5155         pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE, val);
5156
5157         pci_write_config_word(tp->pdev, PCI_COMMAND, tp->pci_cmd);
5158
5159         if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS)
5160                 pcie_set_readrq(tp->pdev, 4096);
5161         else {
5162                 pci_write_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE,
5163                                       tp->pci_cacheline_sz);
5164                 pci_write_config_byte(tp->pdev, PCI_LATENCY_TIMER,
5165                                       tp->pci_lat_timer);
5166         }
5167
5168         /* Make sure PCI-X relaxed ordering bit is clear. */
5169         if (tp->pcix_cap) {
5170                 u16 pcix_cmd;
5171
5172                 pci_read_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
5173                                      &pcix_cmd);
5174                 pcix_cmd &= ~PCI_X_CMD_ERO;
5175                 pci_write_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
5176                                       pcix_cmd);
5177         }
5178
5179         if (tp->tg3_flags2 & TG3_FLG2_5780_CLASS) {
5180
5181                 /* Chip reset on 5780 will reset MSI enable bit,
5182                  * so need to restore it.
5183                  */
5184                 if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
5185                         u16 ctrl;
5186
5187                         pci_read_config_word(tp->pdev,
5188                                              tp->msi_cap + PCI_MSI_FLAGS,
5189                                              &ctrl);
5190                         pci_write_config_word(tp->pdev,
5191                                               tp->msi_cap + PCI_MSI_FLAGS,
5192                                               ctrl | PCI_MSI_FLAGS_ENABLE);
5193                         val = tr32(MSGINT_MODE);
5194                         tw32(MSGINT_MODE, val | MSGINT_MODE_ENABLE);
5195                 }
5196         }
5197 }
5198
5199 static void tg3_stop_fw(struct tg3 *);
5200
5201 /* tp->lock is held. */
5202 static int tg3_chip_reset(struct tg3 *tp)
5203 {
5204         u32 val;
5205         void (*write_op)(struct tg3 *, u32, u32);
5206         int err;
5207
5208         tg3_nvram_lock(tp);
5209
5210         /* No matching tg3_nvram_unlock() after this because
5211          * chip reset below will undo the nvram lock.
5212          */
5213         tp->nvram_lock_cnt = 0;
5214
5215         /* GRC_MISC_CFG core clock reset will clear the memory
5216          * enable bit in PCI register 4 and the MSI enable bit
5217          * on some chips, so we save relevant registers here.
5218          */
5219         tg3_save_pci_state(tp);
5220
5221         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752 ||
5222             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
5223             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
5224             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
5225             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
5226                 tw32(GRC_FASTBOOT_PC, 0);
5227
5228         /*
5229          * We must avoid the readl() that normally takes place.
5230          * It locks machines, causes machine checks, and other
5231          * fun things.  So, temporarily disable the 5701
5232          * hardware workaround, while we do the reset.
5233          */
5234         write_op = tp->write32;
5235         if (write_op == tg3_write_flush_reg32)
5236                 tp->write32 = tg3_write32;
5237
5238         /* Prevent the irq handler from reading or writing PCI registers
5239          * during chip reset when the memory enable bit in the PCI command
5240          * register may be cleared.  The chip does not generate interrupt
5241          * at this time, but the irq handler may still be called due to irq
5242          * sharing or irqpoll.
5243          */
5244         tp->tg3_flags |= TG3_FLAG_CHIP_RESETTING;
5245         if (tp->hw_status) {
5246                 tp->hw_status->status = 0;
5247                 tp->hw_status->status_tag = 0;
5248         }
5249         tp->last_tag = 0;
5250         smp_mb();
5251         synchronize_irq(tp->pdev->irq);
5252
5253         /* do the reset */
5254         val = GRC_MISC_CFG_CORECLK_RESET;
5255
5256         if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) {
5257                 if (tr32(0x7e2c) == 0x60) {
5258                         tw32(0x7e2c, 0x20);
5259                 }
5260                 if (tp->pci_chip_rev_id != CHIPREV_ID_5750_A0) {
5261                         tw32(GRC_MISC_CFG, (1 << 29));
5262                         val |= (1 << 29);
5263                 }
5264         }
5265
5266         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
5267                 tw32(VCPU_STATUS, tr32(VCPU_STATUS) | VCPU_STATUS_DRV_RESET);
5268                 tw32(GRC_VCPU_EXT_CTRL,
5269                      tr32(GRC_VCPU_EXT_CTRL) & ~GRC_VCPU_EXT_CTRL_HALT_CPU);
5270         }
5271
5272         if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS)
5273                 val |= GRC_MISC_CFG_KEEP_GPHY_POWER;
5274         tw32(GRC_MISC_CFG, val);
5275
5276         /* restore 5701 hardware bug workaround write method */
5277         tp->write32 = write_op;
5278
5279         /* Unfortunately, we have to delay before the PCI read back.
5280          * Some 575X chips even will not respond to a PCI cfg access
5281          * when the reset command is given to the chip.
5282          *
5283          * How do these hardware designers expect things to work
5284          * properly if the PCI write is posted for a long period
5285          * of time?  It is always necessary to have some method by
5286          * which a register read back can occur to push the write
5287          * out which does the reset.
5288          *
5289          * For most tg3 variants the trick below was working.
5290          * Ho hum...
5291          */
5292         udelay(120);
5293
5294         /* Flush PCI posted writes.  The normal MMIO registers
5295          * are inaccessible at this time so this is the only
5296          * way to make this reliably (actually, this is no longer
5297          * the case, see above).  I tried to use indirect
5298          * register read/write but this upset some 5701 variants.
5299          */
5300         pci_read_config_dword(tp->pdev, PCI_COMMAND, &val);
5301
5302         udelay(120);
5303
5304         if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) {
5305                 if (tp->pci_chip_rev_id == CHIPREV_ID_5750_A0) {
5306                         int i;
5307                         u32 cfg_val;
5308
5309                         /* Wait for link training to complete.  */
5310                         for (i = 0; i < 5000; i++)
5311                                 udelay(100);
5312
5313                         pci_read_config_dword(tp->pdev, 0xc4, &cfg_val);
5314                         pci_write_config_dword(tp->pdev, 0xc4,
5315                                                cfg_val | (1 << 15));
5316                 }
5317                 /* Set PCIE max payload size and clear error status.  */
5318                 pci_write_config_dword(tp->pdev, 0xd8, 0xf5000);
5319         }
5320
5321         tg3_restore_pci_state(tp);
5322
5323         tp->tg3_flags &= ~TG3_FLAG_CHIP_RESETTING;
5324
5325         val = 0;
5326         if (tp->tg3_flags2 & TG3_FLG2_5780_CLASS)
5327                 val = tr32(MEMARB_MODE);
5328         tw32(MEMARB_MODE, val | MEMARB_MODE_ENABLE);
5329
5330         if (tp->pci_chip_rev_id == CHIPREV_ID_5750_A3) {
5331                 tg3_stop_fw(tp);
5332                 tw32(0x5000, 0x400);
5333         }
5334
5335         tw32(GRC_MODE, tp->grc_mode);
5336
5337         if (tp->pci_chip_rev_id == CHIPREV_ID_5705_A0) {
5338                 val = tr32(0xc4);
5339
5340                 tw32(0xc4, val | (1 << 15));
5341         }
5342
5343         if ((tp->nic_sram_data_cfg & NIC_SRAM_DATA_CFG_MINI_PCI) != 0 &&
5344             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
5345                 tp->pci_clock_ctrl |= CLOCK_CTRL_CLKRUN_OENABLE;
5346                 if (tp->pci_chip_rev_id == CHIPREV_ID_5705_A0)
5347                         tp->pci_clock_ctrl |= CLOCK_CTRL_FORCE_CLKRUN;
5348                 tw32(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl);
5349         }
5350
5351         if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) {
5352                 tp->mac_mode = MAC_MODE_PORT_MODE_TBI;
5353                 tw32_f(MAC_MODE, tp->mac_mode);
5354         } else if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES) {
5355                 tp->mac_mode = MAC_MODE_PORT_MODE_GMII;
5356                 tw32_f(MAC_MODE, tp->mac_mode);
5357         } else
5358                 tw32_f(MAC_MODE, 0);
5359         udelay(40);
5360
5361         err = tg3_poll_fw(tp);
5362         if (err)
5363                 return err;
5364
5365         if ((tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) &&
5366             tp->pci_chip_rev_id != CHIPREV_ID_5750_A0) {
5367                 val = tr32(0x7c00);
5368
5369                 tw32(0x7c00, val | (1 << 25));
5370         }
5371
5372         /* Reprobe ASF enable state.  */
5373         tp->tg3_flags &= ~TG3_FLAG_ENABLE_ASF;
5374         tp->tg3_flags2 &= ~TG3_FLG2_ASF_NEW_HANDSHAKE;
5375         tg3_read_mem(tp, NIC_SRAM_DATA_SIG, &val);
5376         if (val == NIC_SRAM_DATA_SIG_MAGIC) {
5377                 u32 nic_cfg;
5378
5379                 tg3_read_mem(tp, NIC_SRAM_DATA_CFG, &nic_cfg);
5380                 if (nic_cfg & NIC_SRAM_DATA_CFG_ASF_ENABLE) {
5381                         tp->tg3_flags |= TG3_FLAG_ENABLE_ASF;
5382                         if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS)
5383                                 tp->tg3_flags2 |= TG3_FLG2_ASF_NEW_HANDSHAKE;
5384                 }
5385         }
5386
5387         return 0;
5388 }
5389
5390 /* tp->lock is held. */
5391 static void tg3_stop_fw(struct tg3 *tp)
5392 {
5393         if ((tp->tg3_flags & TG3_FLAG_ENABLE_ASF) &&
5394            !(tp->tg3_flags3 & TG3_FLG3_ENABLE_APE)) {
5395                 u32 val;
5396                 int i;
5397
5398                 tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX, FWCMD_NICDRV_PAUSE_FW);
5399                 val = tr32(GRC_RX_CPU_EVENT);
5400                 val |= (1 << 14);
5401                 tw32(GRC_RX_CPU_EVENT, val);
5402
5403                 /* Wait for RX cpu to ACK the event.  */
5404                 for (i = 0; i < 100; i++) {
5405                         if (!(tr32(GRC_RX_CPU_EVENT) & (1 << 14)))
5406                                 break;
5407                         udelay(1);
5408                 }
5409         }
5410 }
5411
5412 /* tp->lock is held. */
5413 static int tg3_halt(struct tg3 *tp, int kind, int silent)
5414 {
5415         int err;
5416
5417         tg3_stop_fw(tp);
5418
5419         tg3_write_sig_pre_reset(tp, kind);
5420
5421         tg3_abort_hw(tp, silent);
5422         err = tg3_chip_reset(tp);
5423
5424         tg3_write_sig_legacy(tp, kind);
5425         tg3_write_sig_post_reset(tp, kind);
5426
5427         if (err)
5428                 return err;
5429
5430         return 0;
5431 }
5432
5433 #define TG3_FW_RELEASE_MAJOR    0x0
5434 #define TG3_FW_RELASE_MINOR     0x0
5435 #define TG3_FW_RELEASE_FIX      0x0
5436 #define TG3_FW_START_ADDR       0x08000000
5437 #define TG3_FW_TEXT_ADDR        0x08000000
5438 #define TG3_FW_TEXT_LEN         0x9c0
5439 #define TG3_FW_RODATA_ADDR      0x080009c0
5440 #define TG3_FW_RODATA_LEN       0x60
5441 #define TG3_FW_DATA_ADDR        0x08000a40
5442 #define TG3_FW_DATA_LEN         0x20
5443 #define TG3_FW_SBSS_ADDR        0x08000a60
5444 #define TG3_FW_SBSS_LEN         0xc
5445 #define TG3_FW_BSS_ADDR         0x08000a70
5446 #define TG3_FW_BSS_LEN          0x10
5447
5448 static const u32 tg3FwText[(TG3_FW_TEXT_LEN / sizeof(u32)) + 1] = {
5449         0x00000000, 0x10000003, 0x00000000, 0x0000000d, 0x0000000d, 0x3c1d0800,
5450         0x37bd3ffc, 0x03a0f021, 0x3c100800, 0x26100000, 0x0e000018, 0x00000000,
5451         0x0000000d, 0x3c1d0800, 0x37bd3ffc, 0x03a0f021, 0x3c100800, 0x26100034,
5452         0x0e00021c, 0x00000000, 0x0000000d, 0x00000000, 0x00000000, 0x00000000,
5453         0x27bdffe0, 0x3c1cc000, 0xafbf0018, 0xaf80680c, 0x0e00004c, 0x241b2105,
5454         0x97850000, 0x97870002, 0x9782002c, 0x9783002e, 0x3c040800, 0x248409c0,
5455         0xafa00014, 0x00021400, 0x00621825, 0x00052c00, 0xafa30010, 0x8f860010,
5456         0x00e52825, 0x0e000060, 0x24070102, 0x3c02ac00, 0x34420100, 0x3c03ac01,
5457         0x34630100, 0xaf820490, 0x3c02ffff, 0xaf820494, 0xaf830498, 0xaf82049c,
5458         0x24020001, 0xaf825ce0, 0x0e00003f, 0xaf825d00, 0x0e000140, 0x00000000,
5459         0x8fbf0018, 0x03e00008, 0x27bd0020, 0x2402ffff, 0xaf825404, 0x8f835400,
5460         0x34630400, 0xaf835400, 0xaf825404, 0x3c020800, 0x24420034, 0xaf82541c,
5461         0x03e00008, 0xaf805400, 0x00000000, 0x00000000, 0x3c020800, 0x34423000,
5462         0x3c030800, 0x34633000, 0x3c040800, 0x348437ff, 0x3c010800, 0xac220a64,
5463         0x24020040, 0x3c010800, 0xac220a68, 0x3c010800, 0xac200a60, 0xac600000,
5464         0x24630004, 0x0083102b, 0x5040fffd, 0xac600000, 0x03e00008, 0x00000000,
5465         0x00804821, 0x8faa0010, 0x3c020800, 0x8c420a60, 0x3c040800, 0x8c840a68,
5466         0x8fab0014, 0x24430001, 0x0044102b, 0x3c010800, 0xac230a60, 0x14400003,
5467         0x00004021, 0x3c010800, 0xac200a60, 0x3c020800, 0x8c420a60, 0x3c030800,
5468         0x8c630a64, 0x91240000, 0x00021140, 0x00431021, 0x00481021, 0x25080001,
5469         0xa0440000, 0x29020008, 0x1440fff4, 0x25290001, 0x3c020800, 0x8c420a60,
5470         0x3c030800, 0x8c630a64, 0x8f84680c, 0x00021140, 0x00431021, 0xac440008,
5471         0xac45000c, 0xac460010, 0xac470014, 0xac4a0018, 0x03e00008, 0xac4b001c,
5472         0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
5473         0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
5474         0, 0, 0, 0, 0, 0,
5475         0x02000008, 0x00000000, 0x0a0001e3, 0x3c0a0001, 0x0a0001e3, 0x3c0a0002,
5476         0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000,
5477         0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000,
5478         0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000,
5479         0x0a0001e3, 0x3c0a0007, 0x0a0001e3, 0x3c0a0008, 0x0a0001e3, 0x3c0a0009,
5480         0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x3c0a000b,
5481         0x0a0001e3, 0x3c0a000c, 0x0a0001e3, 0x3c0a000d, 0x0a0001e3, 0x00000000,
5482         0x0a0001e3, 0x00000000, 0x0a0001e3, 0x3c0a000e, 0x0a0001e3, 0x00000000,
5483         0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000,
5484         0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000,
5485         0x0a0001e3, 0x00000000, 0x0a0001e3, 0x3c0a0013, 0x0a0001e3, 0x3c0a0014,
5486         0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
5487         0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
5488         0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
5489         0x27bdffe0, 0x00001821, 0x00001021, 0xafbf0018, 0xafb10014, 0xafb00010,
5490         0x3c010800, 0x00220821, 0xac200a70, 0x3c010800, 0x00220821, 0xac200a74,
5491         0x3c010800, 0x00220821, 0xac200a78, 0x24630001, 0x1860fff5, 0x2442000c,
5492         0x24110001, 0x8f906810, 0x32020004, 0x14400005, 0x24040001, 0x3c020800,
5493         0x8c420a78, 0x18400003, 0x00002021, 0x0e000182, 0x00000000, 0x32020001,
5494         0x10400003, 0x00000000, 0x0e000169, 0x00000000, 0x0a000153, 0xaf915028,
5495         0x8fbf0018, 0x8fb10014, 0x8fb00010, 0x03e00008, 0x27bd0020, 0x3c050800,
5496         0x8ca50a70, 0x3c060800, 0x8cc60a80, 0x3c070800, 0x8ce70a78, 0x27bdffe0,
5497         0x3c040800, 0x248409d0, 0xafbf0018, 0xafa00010, 0x0e000060, 0xafa00014,
5498         0x0e00017b, 0x00002021, 0x8fbf0018, 0x03e00008, 0x27bd0020, 0x24020001,
5499         0x8f836810, 0x00821004, 0x00021027, 0x00621824, 0x03e00008, 0xaf836810,
5500         0x27bdffd8, 0xafbf0024, 0x1080002e, 0xafb00020, 0x8f825cec, 0xafa20018,
5501         0x8f825cec, 0x3c100800, 0x26100a78, 0xafa2001c, 0x34028000, 0xaf825cec,
5502         0x8e020000, 0x18400016, 0x00000000, 0x3c020800, 0x94420a74, 0x8fa3001c,
5503         0x000221c0, 0xac830004, 0x8fa2001c, 0x3c010800, 0x0e000201, 0xac220a74,
5504         0x10400005, 0x00000000, 0x8e020000, 0x24420001, 0x0a0001df, 0xae020000,
5505         0x3c020800, 0x8c420a70, 0x00021c02, 0x000321c0, 0x0a0001c5, 0xafa2001c,
5506         0x0e000201, 0x00000000, 0x1040001f, 0x00000000, 0x8e020000, 0x8fa3001c,
5507         0x24420001, 0x3c010800, 0xac230a70, 0x3c010800, 0xac230a74, 0x0a0001df,
5508         0xae020000, 0x3c100800, 0x26100a78, 0x8e020000, 0x18400028, 0x00000000,
5509         0x0e000201, 0x00000000, 0x14400024, 0x00000000, 0x8e020000, 0x3c030800,
5510         0x8c630a70, 0x2442ffff, 0xafa3001c, 0x18400006, 0xae020000, 0x00031402,
5511         0x000221c0, 0x8c820004, 0x3c010800, 0xac220a70, 0x97a2001e, 0x2442ff00,
5512         0x2c420300, 0x1440000b, 0x24024000, 0x3c040800, 0x248409dc, 0xafa00010,
5513         0xafa00014, 0x8fa6001c, 0x24050008, 0x0e000060, 0x00003821, 0x0a0001df,
5514         0x00000000, 0xaf825cf8, 0x3c020800, 0x8c420a40, 0x8fa3001c, 0x24420001,
5515         0xaf835cf8, 0x3c010800, 0xac220a40, 0x8fbf0024, 0x8fb00020, 0x03e00008,
5516         0x27bd0028, 0x27bdffe0, 0x3c040800, 0x248409e8, 0x00002821, 0x00003021,
5517         0x00003821, 0xafbf0018, 0xafa00010, 0x0e000060, 0xafa00014, 0x8fbf0018,
5518         0x03e00008, 0x27bd0020, 0x8f82680c, 0x8f85680c, 0x00021827, 0x0003182b,
5519         0x00031823, 0x00431024, 0x00441021, 0x00a2282b, 0x10a00006, 0x00000000,
5520         0x00401821, 0x8f82680c, 0x0043102b, 0x1440fffd, 0x00000000, 0x03e00008,
5521         0x00000000, 0x3c040800, 0x8c840000, 0x3c030800, 0x8c630a40, 0x0064102b,
5522         0x54400002, 0x00831023, 0x00641023, 0x2c420008, 0x03e00008, 0x38420001,
5523         0x27bdffe0, 0x00802821, 0x3c040800, 0x24840a00, 0x00003021, 0x00003821,
5524         0xafbf0018, 0xafa00010, 0x0e000060, 0xafa00014, 0x0a000216, 0x00000000,
5525         0x8fbf0018, 0x03e00008, 0x27bd0020, 0x00000000, 0x27bdffe0, 0x3c1cc000,
5526         0xafbf0018, 0x0e00004c, 0xaf80680c, 0x3c040800, 0x24840a10, 0x03802821,
5527         0x00003021, 0x00003821, 0xafa00010, 0x0e000060, 0xafa00014, 0x2402ffff,
5528         0xaf825404, 0x3c0200aa, 0x0e000234, 0xaf825434, 0x8fbf0018, 0x03e00008,
5529         0x27bd0020, 0x00000000, 0x00000000, 0x00000000, 0x27bdffe8, 0xafb00010,
5530         0x24100001, 0xafbf0014, 0x3c01c003, 0xac200000, 0x8f826810, 0x30422000,
5531         0x10400003, 0x00000000, 0x0e000246, 0x00000000, 0x0a00023a, 0xaf905428,
5532         0x8fbf0014, 0x8fb00010, 0x03e00008, 0x27bd0018, 0x27bdfff8, 0x8f845d0c,
5533         0x3c0200ff, 0x3c030800, 0x8c630a50, 0x3442fff8, 0x00821024, 0x1043001e,
5534         0x3c0500ff, 0x34a5fff8, 0x3c06c003, 0x3c074000, 0x00851824, 0x8c620010,
5535         0x3c010800, 0xac230a50, 0x30420008, 0x10400005, 0x00871025, 0x8cc20000,
5536         0x24420001, 0xacc20000, 0x00871025, 0xaf825d0c, 0x8fa20000, 0x24420001,
5537         0xafa20000, 0x8fa20000, 0x8fa20000, 0x24420001, 0xafa20000, 0x8fa20000,
5538         0x8f845d0c, 0x3c030800, 0x8c630a50, 0x00851024, 0x1443ffe8, 0x00851824,
5539         0x27bd0008, 0x03e00008, 0x00000000, 0x00000000, 0x00000000
5540 };
5541
5542 static const u32 tg3FwRodata[(TG3_FW_RODATA_LEN / sizeof(u32)) + 1] = {
5543         0x35373031, 0x726c7341, 0x00000000, 0x00000000, 0x53774576, 0x656e7430,
5544         0x00000000, 0x726c7045, 0x76656e74, 0x31000000, 0x556e6b6e, 0x45766e74,
5545         0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x66617461, 0x6c457272,
5546         0x00000000, 0x00000000, 0x4d61696e, 0x43707542, 0x00000000, 0x00000000,
5547         0x00000000
5548 };
5549
5550 #if 0 /* All zeros, don't eat up space with it. */
5551 u32 tg3FwData[(TG3_FW_DATA_LEN / sizeof(u32)) + 1] = {
5552         0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
5553         0x00000000, 0x00000000, 0x00000000, 0x00000000
5554 };
5555 #endif
5556
5557 #define RX_CPU_SCRATCH_BASE     0x30000
5558 #define RX_CPU_SCRATCH_SIZE     0x04000
5559 #define TX_CPU_SCRATCH_BASE     0x34000
5560 #define TX_CPU_SCRATCH_SIZE     0x04000
5561
5562 /* tp->lock is held. */
5563 static int tg3_halt_cpu(struct tg3 *tp, u32 offset)
5564 {
5565         int i;
5566
5567         BUG_ON(offset == TX_CPU_BASE &&
5568             (tp->tg3_flags2 & TG3_FLG2_5705_PLUS));
5569
5570         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
5571                 u32 val = tr32(GRC_VCPU_EXT_CTRL);
5572
5573                 tw32(GRC_VCPU_EXT_CTRL, val | GRC_VCPU_EXT_CTRL_HALT_CPU);
5574                 return 0;
5575         }
5576         if (offset == RX_CPU_BASE) {
5577                 for (i = 0; i < 10000; i++) {
5578                         tw32(offset + CPU_STATE, 0xffffffff);
5579                         tw32(offset + CPU_MODE,  CPU_MODE_HALT);
5580                         if (tr32(offset + CPU_MODE) & CPU_MODE_HALT)
5581                                 break;
5582                 }
5583
5584                 tw32(offset + CPU_STATE, 0xffffffff);
5585                 tw32_f(offset + CPU_MODE,  CPU_MODE_HALT);
5586                 udelay(10);
5587         } else {
5588                 for (i = 0; i < 10000; i++) {
5589                         tw32(offset + CPU_STATE, 0xffffffff);
5590                         tw32(offset + CPU_MODE,  CPU_MODE_HALT);
5591                         if (tr32(offset + CPU_MODE) & CPU_MODE_HALT)
5592                                 break;
5593                 }
5594         }
5595
5596         if (i >= 10000) {
5597                 printk(KERN_ERR PFX "tg3_reset_cpu timed out for %s, "
5598                        "and %s CPU\n",
5599                        tp->dev->name,
5600                        (offset == RX_CPU_BASE ? "RX" : "TX"));
5601                 return -ENODEV;
5602         }
5603
5604         /* Clear firmware's nvram arbitration. */
5605         if (tp->tg3_flags & TG3_FLAG_NVRAM)
5606                 tw32(NVRAM_SWARB, SWARB_REQ_CLR0);
5607         return 0;
5608 }
5609
5610 struct fw_info {
5611         unsigned int text_base;
5612         unsigned int text_len;
5613         const u32 *text_data;
5614         unsigned int rodata_base;
5615         unsigned int rodata_len;
5616         const u32 *rodata_data;
5617         unsigned int data_base;
5618         unsigned int data_len;
5619         const u32 *data_data;
5620 };
5621
5622 /* tp->lock is held. */
5623 static int tg3_load_firmware_cpu(struct tg3 *tp, u32 cpu_base, u32 cpu_scratch_base,
5624                                  int cpu_scratch_size, struct fw_info *info)
5625 {
5626         int err, lock_err, i;
5627         void (*write_op)(struct tg3 *, u32, u32);
5628
5629         if (cpu_base == TX_CPU_BASE &&
5630             (tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
5631                 printk(KERN_ERR PFX "tg3_load_firmware_cpu: Trying to load "
5632                        "TX cpu firmware on %s which is 5705.\n",
5633                        tp->dev->name);
5634                 return -EINVAL;
5635         }
5636
5637         if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS)
5638                 write_op = tg3_write_mem;
5639         else
5640                 write_op = tg3_write_indirect_reg32;
5641
5642         /* It is possible that bootcode is still loading at this point.
5643          * Get the nvram lock first before halting the cpu.
5644          */
5645         lock_err = tg3_nvram_lock(tp);
5646         err = tg3_halt_cpu(tp, cpu_base);
5647         if (!lock_err)
5648                 tg3_nvram_unlock(tp);
5649         if (err)
5650                 goto out;
5651
5652         for (i = 0; i < cpu_scratch_size; i += sizeof(u32))
5653                 write_op(tp, cpu_scratch_base + i, 0);
5654         tw32(cpu_base + CPU_STATE, 0xffffffff);
5655         tw32(cpu_base + CPU_MODE, tr32(cpu_base+CPU_MODE)|CPU_MODE_HALT);
5656         for (i = 0; i < (info->text_len / sizeof(u32)); i++)
5657                 write_op(tp, (cpu_scratch_base +
5658                               (info->text_base & 0xffff) +
5659                               (i * sizeof(u32))),
5660                          (info->text_data ?
5661                           info->text_data[i] : 0));
5662         for (i = 0; i < (info->rodata_len / sizeof(u32)); i++)
5663                 write_op(tp, (cpu_scratch_base +
5664                               (info->rodata_base & 0xffff) +
5665                               (i * sizeof(u32))),
5666                          (info->rodata_data ?
5667                           info->rodata_data[i] : 0));
5668         for (i = 0; i < (info->data_len / sizeof(u32)); i++)
5669                 write_op(tp, (cpu_scratch_base +
5670                               (info->data_base & 0xffff) +
5671                               (i * sizeof(u32))),
5672                          (info->data_data ?
5673                           info->data_data[i] : 0));
5674
5675         err = 0;
5676
5677 out:
5678         return err;
5679 }
5680
5681 /* tp->lock is held. */
5682 static int tg3_load_5701_a0_firmware_fix(struct tg3 *tp)
5683 {
5684         struct fw_info info;
5685         int err, i;
5686
5687         info.text_base = TG3_FW_TEXT_ADDR;
5688         info.text_len = TG3_FW_TEXT_LEN;
5689         info.text_data = &tg3FwText[0];
5690         info.rodata_base = TG3_FW_RODATA_ADDR;
5691         info.rodata_len = TG3_FW_RODATA_LEN;
5692         info.rodata_data = &tg3FwRodata[0];
5693         info.data_base = TG3_FW_DATA_ADDR;
5694         info.data_len = TG3_FW_DATA_LEN;
5695         info.data_data = NULL;
5696
5697         err = tg3_load_firmware_cpu(tp, RX_CPU_BASE,
5698                                     RX_CPU_SCRATCH_BASE, RX_CPU_SCRATCH_SIZE,
5699                                     &info);
5700         if (err)
5701                 return err;
5702
5703         err = tg3_load_firmware_cpu(tp, TX_CPU_BASE,
5704                                     TX_CPU_SCRATCH_BASE, TX_CPU_SCRATCH_SIZE,
5705                                     &info);
5706         if (err)
5707                 return err;
5708
5709         /* Now startup only the RX cpu. */
5710         tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
5711         tw32_f(RX_CPU_BASE + CPU_PC,    TG3_FW_TEXT_ADDR);
5712
5713         for (i = 0; i < 5; i++) {
5714                 if (tr32(RX_CPU_BASE + CPU_PC) == TG3_FW_TEXT_ADDR)
5715                         break;
5716                 tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
5717                 tw32(RX_CPU_BASE + CPU_MODE,  CPU_MODE_HALT);
5718                 tw32_f(RX_CPU_BASE + CPU_PC,    TG3_FW_TEXT_ADDR);
5719                 udelay(1000);
5720         }
5721         if (i >= 5) {