tg3: Add TG3_FLG3_USE_PHYLIB
[linux-2.6.git] / drivers / net / tg3.c
1 /*
2  * tg3.c: Broadcom Tigon3 ethernet driver.
3  *
4  * Copyright (C) 2001, 2002, 2003, 2004 David S. Miller (davem@redhat.com)
5  * Copyright (C) 2001, 2002, 2003 Jeff Garzik (jgarzik@pobox.com)
6  * Copyright (C) 2004 Sun Microsystems Inc.
7  * Copyright (C) 2005-2007 Broadcom Corporation.
8  *
9  * Firmware is:
10  *      Derived from proprietary unpublished source code,
11  *      Copyright (C) 2000-2003 Broadcom Corporation.
12  *
13  *      Permission is hereby granted for the distribution of this firmware
14  *      data in hexadecimal or equivalent format, provided this copyright
15  *      notice is accompanying it.
16  */
17
18
19 #include <linux/module.h>
20 #include <linux/moduleparam.h>
21 #include <linux/kernel.h>
22 #include <linux/types.h>
23 #include <linux/compiler.h>
24 #include <linux/slab.h>
25 #include <linux/delay.h>
26 #include <linux/in.h>
27 #include <linux/init.h>
28 #include <linux/ioport.h>
29 #include <linux/pci.h>
30 #include <linux/netdevice.h>
31 #include <linux/etherdevice.h>
32 #include <linux/skbuff.h>
33 #include <linux/ethtool.h>
34 #include <linux/mii.h>
35 #include <linux/if_vlan.h>
36 #include <linux/ip.h>
37 #include <linux/tcp.h>
38 #include <linux/workqueue.h>
39 #include <linux/prefetch.h>
40 #include <linux/dma-mapping.h>
41
42 #include <net/checksum.h>
43 #include <net/ip.h>
44
45 #include <asm/system.h>
46 #include <asm/io.h>
47 #include <asm/byteorder.h>
48 #include <asm/uaccess.h>
49
50 #ifdef CONFIG_SPARC
51 #include <asm/idprom.h>
52 #include <asm/prom.h>
53 #endif
54
55 #if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE)
56 #define TG3_VLAN_TAG_USED 1
57 #else
58 #define TG3_VLAN_TAG_USED 0
59 #endif
60
61 #define TG3_TSO_SUPPORT 1
62
63 #include "tg3.h"
64
65 #define DRV_MODULE_NAME         "tg3"
66 #define PFX DRV_MODULE_NAME     ": "
67 #define DRV_MODULE_VERSION      "3.92"
68 #define DRV_MODULE_RELDATE      "May 2, 2008"
69
70 #define TG3_DEF_MAC_MODE        0
71 #define TG3_DEF_RX_MODE         0
72 #define TG3_DEF_TX_MODE         0
73 #define TG3_DEF_MSG_ENABLE        \
74         (NETIF_MSG_DRV          | \
75          NETIF_MSG_PROBE        | \
76          NETIF_MSG_LINK         | \
77          NETIF_MSG_TIMER        | \
78          NETIF_MSG_IFDOWN       | \
79          NETIF_MSG_IFUP         | \
80          NETIF_MSG_RX_ERR       | \
81          NETIF_MSG_TX_ERR)
82
83 /* length of time before we decide the hardware is borked,
84  * and dev->tx_timeout() should be called to fix the problem
85  */
86 #define TG3_TX_TIMEOUT                  (5 * HZ)
87
88 /* hardware minimum and maximum for a single frame's data payload */
89 #define TG3_MIN_MTU                     60
90 #define TG3_MAX_MTU(tp) \
91         ((tp->tg3_flags2 & TG3_FLG2_JUMBO_CAPABLE) ? 9000 : 1500)
92
93 /* These numbers seem to be hard coded in the NIC firmware somehow.
94  * You can't change the ring sizes, but you can change where you place
95  * them in the NIC onboard memory.
96  */
97 #define TG3_RX_RING_SIZE                512
98 #define TG3_DEF_RX_RING_PENDING         200
99 #define TG3_RX_JUMBO_RING_SIZE          256
100 #define TG3_DEF_RX_JUMBO_RING_PENDING   100
101
102 /* Do not place this n-ring entries value into the tp struct itself,
103  * we really want to expose these constants to GCC so that modulo et
104  * al.  operations are done with shifts and masks instead of with
105  * hw multiply/modulo instructions.  Another solution would be to
106  * replace things like '% foo' with '& (foo - 1)'.
107  */
108 #define TG3_RX_RCB_RING_SIZE(tp)        \
109         ((tp->tg3_flags2 & TG3_FLG2_5705_PLUS) ?  512 : 1024)
110
111 #define TG3_TX_RING_SIZE                512
112 #define TG3_DEF_TX_RING_PENDING         (TG3_TX_RING_SIZE - 1)
113
114 #define TG3_RX_RING_BYTES       (sizeof(struct tg3_rx_buffer_desc) * \
115                                  TG3_RX_RING_SIZE)
116 #define TG3_RX_JUMBO_RING_BYTES (sizeof(struct tg3_rx_buffer_desc) * \
117                                  TG3_RX_JUMBO_RING_SIZE)
118 #define TG3_RX_RCB_RING_BYTES(tp) (sizeof(struct tg3_rx_buffer_desc) * \
119                                    TG3_RX_RCB_RING_SIZE(tp))
120 #define TG3_TX_RING_BYTES       (sizeof(struct tg3_tx_buffer_desc) * \
121                                  TG3_TX_RING_SIZE)
122 #define NEXT_TX(N)              (((N) + 1) & (TG3_TX_RING_SIZE - 1))
123
124 #define RX_PKT_BUF_SZ           (1536 + tp->rx_offset + 64)
125 #define RX_JUMBO_PKT_BUF_SZ     (9046 + tp->rx_offset + 64)
126
127 /* minimum number of free TX descriptors required to wake up TX process */
128 #define TG3_TX_WAKEUP_THRESH(tp)                ((tp)->tx_pending / 4)
129
130 /* number of ETHTOOL_GSTATS u64's */
131 #define TG3_NUM_STATS           (sizeof(struct tg3_ethtool_stats)/sizeof(u64))
132
133 #define TG3_NUM_TEST            6
134
135 static char version[] __devinitdata =
136         DRV_MODULE_NAME ".c:v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
137
138 MODULE_AUTHOR("David S. Miller (davem@redhat.com) and Jeff Garzik (jgarzik@pobox.com)");
139 MODULE_DESCRIPTION("Broadcom Tigon3 ethernet driver");
140 MODULE_LICENSE("GPL");
141 MODULE_VERSION(DRV_MODULE_VERSION);
142
143 static int tg3_debug = -1;      /* -1 == use TG3_DEF_MSG_ENABLE as value */
144 module_param(tg3_debug, int, 0);
145 MODULE_PARM_DESC(tg3_debug, "Tigon3 bitmapped debugging message enable value");
146
147 static struct pci_device_id tg3_pci_tbl[] = {
148         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5700)},
149         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5701)},
150         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702)},
151         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703)},
152         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704)},
153         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702FE)},
154         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705)},
155         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705_2)},
156         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705M)},
157         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705M_2)},
158         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702X)},
159         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703X)},
160         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704S)},
161         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702A3)},
162         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703A3)},
163         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5782)},
164         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5788)},
165         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5789)},
166         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5901)},
167         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5901_2)},
168         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704S_2)},
169         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705F)},
170         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5720)},
171         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5721)},
172         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5722)},
173         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5750)},
174         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751)},
175         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5750M)},
176         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751M)},
177         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751F)},
178         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5752)},
179         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5752M)},
180         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753)},
181         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753M)},
182         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753F)},
183         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5754)},
184         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5754M)},
185         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5755)},
186         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5755M)},
187         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5756)},
188         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5786)},
189         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787)},
190         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787M)},
191         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787F)},
192         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5714)},
193         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5714S)},
194         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5715)},
195         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5715S)},
196         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5780)},
197         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5780S)},
198         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5781)},
199         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5906)},
200         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5906M)},
201         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5784)},
202         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5764)},
203         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5723)},
204         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5761)},
205         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5761E)},
206         {PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9DXX)},
207         {PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9MXX)},
208         {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1000)},
209         {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1001)},
210         {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1003)},
211         {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC9100)},
212         {PCI_DEVICE(PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_TIGON3)},
213         {}
214 };
215
216 MODULE_DEVICE_TABLE(pci, tg3_pci_tbl);
217
218 static const struct {
219         const char string[ETH_GSTRING_LEN];
220 } ethtool_stats_keys[TG3_NUM_STATS] = {
221         { "rx_octets" },
222         { "rx_fragments" },
223         { "rx_ucast_packets" },
224         { "rx_mcast_packets" },
225         { "rx_bcast_packets" },
226         { "rx_fcs_errors" },
227         { "rx_align_errors" },
228         { "rx_xon_pause_rcvd" },
229         { "rx_xoff_pause_rcvd" },
230         { "rx_mac_ctrl_rcvd" },
231         { "rx_xoff_entered" },
232         { "rx_frame_too_long_errors" },
233         { "rx_jabbers" },
234         { "rx_undersize_packets" },
235         { "rx_in_length_errors" },
236         { "rx_out_length_errors" },
237         { "rx_64_or_less_octet_packets" },
238         { "rx_65_to_127_octet_packets" },
239         { "rx_128_to_255_octet_packets" },
240         { "rx_256_to_511_octet_packets" },
241         { "rx_512_to_1023_octet_packets" },
242         { "rx_1024_to_1522_octet_packets" },
243         { "rx_1523_to_2047_octet_packets" },
244         { "rx_2048_to_4095_octet_packets" },
245         { "rx_4096_to_8191_octet_packets" },
246         { "rx_8192_to_9022_octet_packets" },
247
248         { "tx_octets" },
249         { "tx_collisions" },
250
251         { "tx_xon_sent" },
252         { "tx_xoff_sent" },
253         { "tx_flow_control" },
254         { "tx_mac_errors" },
255         { "tx_single_collisions" },
256         { "tx_mult_collisions" },
257         { "tx_deferred" },
258         { "tx_excessive_collisions" },
259         { "tx_late_collisions" },
260         { "tx_collide_2times" },
261         { "tx_collide_3times" },
262         { "tx_collide_4times" },
263         { "tx_collide_5times" },
264         { "tx_collide_6times" },
265         { "tx_collide_7times" },
266         { "tx_collide_8times" },
267         { "tx_collide_9times" },
268         { "tx_collide_10times" },
269         { "tx_collide_11times" },
270         { "tx_collide_12times" },
271         { "tx_collide_13times" },
272         { "tx_collide_14times" },
273         { "tx_collide_15times" },
274         { "tx_ucast_packets" },
275         { "tx_mcast_packets" },
276         { "tx_bcast_packets" },
277         { "tx_carrier_sense_errors" },
278         { "tx_discards" },
279         { "tx_errors" },
280
281         { "dma_writeq_full" },
282         { "dma_write_prioq_full" },
283         { "rxbds_empty" },
284         { "rx_discards" },
285         { "rx_errors" },
286         { "rx_threshold_hit" },
287
288         { "dma_readq_full" },
289         { "dma_read_prioq_full" },
290         { "tx_comp_queue_full" },
291
292         { "ring_set_send_prod_index" },
293         { "ring_status_update" },
294         { "nic_irqs" },
295         { "nic_avoided_irqs" },
296         { "nic_tx_threshold_hit" }
297 };
298
299 static const struct {
300         const char string[ETH_GSTRING_LEN];
301 } ethtool_test_keys[TG3_NUM_TEST] = {
302         { "nvram test     (online) " },
303         { "link test      (online) " },
304         { "register test  (offline)" },
305         { "memory test    (offline)" },
306         { "loopback test  (offline)" },
307         { "interrupt test (offline)" },
308 };
309
310 static void tg3_write32(struct tg3 *tp, u32 off, u32 val)
311 {
312         writel(val, tp->regs + off);
313 }
314
315 static u32 tg3_read32(struct tg3 *tp, u32 off)
316 {
317         return (readl(tp->regs + off));
318 }
319
320 static void tg3_ape_write32(struct tg3 *tp, u32 off, u32 val)
321 {
322         writel(val, tp->aperegs + off);
323 }
324
325 static u32 tg3_ape_read32(struct tg3 *tp, u32 off)
326 {
327         return (readl(tp->aperegs + off));
328 }
329
330 static void tg3_write_indirect_reg32(struct tg3 *tp, u32 off, u32 val)
331 {
332         unsigned long flags;
333
334         spin_lock_irqsave(&tp->indirect_lock, flags);
335         pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off);
336         pci_write_config_dword(tp->pdev, TG3PCI_REG_DATA, val);
337         spin_unlock_irqrestore(&tp->indirect_lock, flags);
338 }
339
340 static void tg3_write_flush_reg32(struct tg3 *tp, u32 off, u32 val)
341 {
342         writel(val, tp->regs + off);
343         readl(tp->regs + off);
344 }
345
346 static u32 tg3_read_indirect_reg32(struct tg3 *tp, u32 off)
347 {
348         unsigned long flags;
349         u32 val;
350
351         spin_lock_irqsave(&tp->indirect_lock, flags);
352         pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off);
353         pci_read_config_dword(tp->pdev, TG3PCI_REG_DATA, &val);
354         spin_unlock_irqrestore(&tp->indirect_lock, flags);
355         return val;
356 }
357
358 static void tg3_write_indirect_mbox(struct tg3 *tp, u32 off, u32 val)
359 {
360         unsigned long flags;
361
362         if (off == (MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW)) {
363                 pci_write_config_dword(tp->pdev, TG3PCI_RCV_RET_RING_CON_IDX +
364                                        TG3_64BIT_REG_LOW, val);
365                 return;
366         }
367         if (off == (MAILBOX_RCV_STD_PROD_IDX + TG3_64BIT_REG_LOW)) {
368                 pci_write_config_dword(tp->pdev, TG3PCI_STD_RING_PROD_IDX +
369                                        TG3_64BIT_REG_LOW, val);
370                 return;
371         }
372
373         spin_lock_irqsave(&tp->indirect_lock, flags);
374         pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off + 0x5600);
375         pci_write_config_dword(tp->pdev, TG3PCI_REG_DATA, val);
376         spin_unlock_irqrestore(&tp->indirect_lock, flags);
377
378         /* In indirect mode when disabling interrupts, we also need
379          * to clear the interrupt bit in the GRC local ctrl register.
380          */
381         if ((off == (MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW)) &&
382             (val == 0x1)) {
383                 pci_write_config_dword(tp->pdev, TG3PCI_MISC_LOCAL_CTRL,
384                                        tp->grc_local_ctrl|GRC_LCLCTRL_CLEARINT);
385         }
386 }
387
388 static u32 tg3_read_indirect_mbox(struct tg3 *tp, u32 off)
389 {
390         unsigned long flags;
391         u32 val;
392
393         spin_lock_irqsave(&tp->indirect_lock, flags);
394         pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off + 0x5600);
395         pci_read_config_dword(tp->pdev, TG3PCI_REG_DATA, &val);
396         spin_unlock_irqrestore(&tp->indirect_lock, flags);
397         return val;
398 }
399
400 /* usec_wait specifies the wait time in usec when writing to certain registers
401  * where it is unsafe to read back the register without some delay.
402  * GRC_LOCAL_CTRL is one example if the GPIOs are toggled to switch power.
403  * TG3PCI_CLOCK_CTRL is another example if the clock frequencies are changed.
404  */
405 static void _tw32_flush(struct tg3 *tp, u32 off, u32 val, u32 usec_wait)
406 {
407         if ((tp->tg3_flags & TG3_FLAG_PCIX_TARGET_HWBUG) ||
408             (tp->tg3_flags2 & TG3_FLG2_ICH_WORKAROUND))
409                 /* Non-posted methods */
410                 tp->write32(tp, off, val);
411         else {
412                 /* Posted method */
413                 tg3_write32(tp, off, val);
414                 if (usec_wait)
415                         udelay(usec_wait);
416                 tp->read32(tp, off);
417         }
418         /* Wait again after the read for the posted method to guarantee that
419          * the wait time is met.
420          */
421         if (usec_wait)
422                 udelay(usec_wait);
423 }
424
425 static inline void tw32_mailbox_flush(struct tg3 *tp, u32 off, u32 val)
426 {
427         tp->write32_mbox(tp, off, val);
428         if (!(tp->tg3_flags & TG3_FLAG_MBOX_WRITE_REORDER) &&
429             !(tp->tg3_flags2 & TG3_FLG2_ICH_WORKAROUND))
430                 tp->read32_mbox(tp, off);
431 }
432
433 static void tg3_write32_tx_mbox(struct tg3 *tp, u32 off, u32 val)
434 {
435         void __iomem *mbox = tp->regs + off;
436         writel(val, mbox);
437         if (tp->tg3_flags & TG3_FLAG_TXD_MBOX_HWBUG)
438                 writel(val, mbox);
439         if (tp->tg3_flags & TG3_FLAG_MBOX_WRITE_REORDER)
440                 readl(mbox);
441 }
442
443 static u32 tg3_read32_mbox_5906(struct tg3 *tp, u32 off)
444 {
445         return (readl(tp->regs + off + GRCMBOX_BASE));
446 }
447
448 static void tg3_write32_mbox_5906(struct tg3 *tp, u32 off, u32 val)
449 {
450         writel(val, tp->regs + off + GRCMBOX_BASE);
451 }
452
453 #define tw32_mailbox(reg, val)  tp->write32_mbox(tp, reg, val)
454 #define tw32_mailbox_f(reg, val)        tw32_mailbox_flush(tp, (reg), (val))
455 #define tw32_rx_mbox(reg, val)  tp->write32_rx_mbox(tp, reg, val)
456 #define tw32_tx_mbox(reg, val)  tp->write32_tx_mbox(tp, reg, val)
457 #define tr32_mailbox(reg)       tp->read32_mbox(tp, reg)
458
459 #define tw32(reg,val)           tp->write32(tp, reg, val)
460 #define tw32_f(reg,val)         _tw32_flush(tp,(reg),(val), 0)
461 #define tw32_wait_f(reg,val,us) _tw32_flush(tp,(reg),(val), (us))
462 #define tr32(reg)               tp->read32(tp, reg)
463
464 static void tg3_write_mem(struct tg3 *tp, u32 off, u32 val)
465 {
466         unsigned long flags;
467
468         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) &&
469             (off >= NIC_SRAM_STATS_BLK) && (off < NIC_SRAM_TX_BUFFER_DESC))
470                 return;
471
472         spin_lock_irqsave(&tp->indirect_lock, flags);
473         if (tp->tg3_flags & TG3_FLAG_SRAM_USE_CONFIG) {
474                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off);
475                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
476
477                 /* Always leave this as zero. */
478                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
479         } else {
480                 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, off);
481                 tw32_f(TG3PCI_MEM_WIN_DATA, val);
482
483                 /* Always leave this as zero. */
484                 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, 0);
485         }
486         spin_unlock_irqrestore(&tp->indirect_lock, flags);
487 }
488
489 static void tg3_read_mem(struct tg3 *tp, u32 off, u32 *val)
490 {
491         unsigned long flags;
492
493         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) &&
494             (off >= NIC_SRAM_STATS_BLK) && (off < NIC_SRAM_TX_BUFFER_DESC)) {
495                 *val = 0;
496                 return;
497         }
498
499         spin_lock_irqsave(&tp->indirect_lock, flags);
500         if (tp->tg3_flags & TG3_FLAG_SRAM_USE_CONFIG) {
501                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off);
502                 pci_read_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
503
504                 /* Always leave this as zero. */
505                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
506         } else {
507                 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, off);
508                 *val = tr32(TG3PCI_MEM_WIN_DATA);
509
510                 /* Always leave this as zero. */
511                 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, 0);
512         }
513         spin_unlock_irqrestore(&tp->indirect_lock, flags);
514 }
515
516 static void tg3_ape_lock_init(struct tg3 *tp)
517 {
518         int i;
519
520         /* Make sure the driver hasn't any stale locks. */
521         for (i = 0; i < 8; i++)
522                 tg3_ape_write32(tp, TG3_APE_LOCK_GRANT + 4 * i,
523                                 APE_LOCK_GRANT_DRIVER);
524 }
525
526 static int tg3_ape_lock(struct tg3 *tp, int locknum)
527 {
528         int i, off;
529         int ret = 0;
530         u32 status;
531
532         if (!(tp->tg3_flags3 & TG3_FLG3_ENABLE_APE))
533                 return 0;
534
535         switch (locknum) {
536                 case TG3_APE_LOCK_MEM:
537                         break;
538                 default:
539                         return -EINVAL;
540         }
541
542         off = 4 * locknum;
543
544         tg3_ape_write32(tp, TG3_APE_LOCK_REQ + off, APE_LOCK_REQ_DRIVER);
545
546         /* Wait for up to 1 millisecond to acquire lock. */
547         for (i = 0; i < 100; i++) {
548                 status = tg3_ape_read32(tp, TG3_APE_LOCK_GRANT + off);
549                 if (status == APE_LOCK_GRANT_DRIVER)
550                         break;
551                 udelay(10);
552         }
553
554         if (status != APE_LOCK_GRANT_DRIVER) {
555                 /* Revoke the lock request. */
556                 tg3_ape_write32(tp, TG3_APE_LOCK_GRANT + off,
557                                 APE_LOCK_GRANT_DRIVER);
558
559                 ret = -EBUSY;
560         }
561
562         return ret;
563 }
564
565 static void tg3_ape_unlock(struct tg3 *tp, int locknum)
566 {
567         int off;
568
569         if (!(tp->tg3_flags3 & TG3_FLG3_ENABLE_APE))
570                 return;
571
572         switch (locknum) {
573                 case TG3_APE_LOCK_MEM:
574                         break;
575                 default:
576                         return;
577         }
578
579         off = 4 * locknum;
580         tg3_ape_write32(tp, TG3_APE_LOCK_GRANT + off, APE_LOCK_GRANT_DRIVER);
581 }
582
583 static void tg3_disable_ints(struct tg3 *tp)
584 {
585         tw32(TG3PCI_MISC_HOST_CTRL,
586              (tp->misc_host_ctrl | MISC_HOST_CTRL_MASK_PCI_INT));
587         tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
588 }
589
590 static inline void tg3_cond_int(struct tg3 *tp)
591 {
592         if (!(tp->tg3_flags & TG3_FLAG_TAGGED_STATUS) &&
593             (tp->hw_status->status & SD_STATUS_UPDATED))
594                 tw32(GRC_LOCAL_CTRL, tp->grc_local_ctrl | GRC_LCLCTRL_SETINT);
595         else
596                 tw32(HOSTCC_MODE, tp->coalesce_mode |
597                      (HOSTCC_MODE_ENABLE | HOSTCC_MODE_NOW));
598 }
599
600 static void tg3_enable_ints(struct tg3 *tp)
601 {
602         tp->irq_sync = 0;
603         wmb();
604
605         tw32(TG3PCI_MISC_HOST_CTRL,
606              (tp->misc_host_ctrl & ~MISC_HOST_CTRL_MASK_PCI_INT));
607         tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
608                        (tp->last_tag << 24));
609         if (tp->tg3_flags2 & TG3_FLG2_1SHOT_MSI)
610                 tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
611                                (tp->last_tag << 24));
612         tg3_cond_int(tp);
613 }
614
615 static inline unsigned int tg3_has_work(struct tg3 *tp)
616 {
617         struct tg3_hw_status *sblk = tp->hw_status;
618         unsigned int work_exists = 0;
619
620         /* check for phy events */
621         if (!(tp->tg3_flags &
622               (TG3_FLAG_USE_LINKCHG_REG |
623                TG3_FLAG_POLL_SERDES))) {
624                 if (sblk->status & SD_STATUS_LINK_CHG)
625                         work_exists = 1;
626         }
627         /* check for RX/TX work to do */
628         if (sblk->idx[0].tx_consumer != tp->tx_cons ||
629             sblk->idx[0].rx_producer != tp->rx_rcb_ptr)
630                 work_exists = 1;
631
632         return work_exists;
633 }
634
635 /* tg3_restart_ints
636  *  similar to tg3_enable_ints, but it accurately determines whether there
637  *  is new work pending and can return without flushing the PIO write
638  *  which reenables interrupts
639  */
640 static void tg3_restart_ints(struct tg3 *tp)
641 {
642         tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
643                      tp->last_tag << 24);
644         mmiowb();
645
646         /* When doing tagged status, this work check is unnecessary.
647          * The last_tag we write above tells the chip which piece of
648          * work we've completed.
649          */
650         if (!(tp->tg3_flags & TG3_FLAG_TAGGED_STATUS) &&
651             tg3_has_work(tp))
652                 tw32(HOSTCC_MODE, tp->coalesce_mode |
653                      (HOSTCC_MODE_ENABLE | HOSTCC_MODE_NOW));
654 }
655
656 static inline void tg3_netif_stop(struct tg3 *tp)
657 {
658         tp->dev->trans_start = jiffies; /* prevent tx timeout */
659         napi_disable(&tp->napi);
660         netif_tx_disable(tp->dev);
661 }
662
663 static inline void tg3_netif_start(struct tg3 *tp)
664 {
665         netif_wake_queue(tp->dev);
666         /* NOTE: unconditional netif_wake_queue is only appropriate
667          * so long as all callers are assured to have free tx slots
668          * (such as after tg3_init_hw)
669          */
670         napi_enable(&tp->napi);
671         tp->hw_status->status |= SD_STATUS_UPDATED;
672         tg3_enable_ints(tp);
673 }
674
675 static void tg3_switch_clocks(struct tg3 *tp)
676 {
677         u32 clock_ctrl = tr32(TG3PCI_CLOCK_CTRL);
678         u32 orig_clock_ctrl;
679
680         if ((tp->tg3_flags & TG3_FLAG_CPMU_PRESENT) ||
681             (tp->tg3_flags2 & TG3_FLG2_5780_CLASS))
682                 return;
683
684         orig_clock_ctrl = clock_ctrl;
685         clock_ctrl &= (CLOCK_CTRL_FORCE_CLKRUN |
686                        CLOCK_CTRL_CLKRUN_OENABLE |
687                        0x1f);
688         tp->pci_clock_ctrl = clock_ctrl;
689
690         if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
691                 if (orig_clock_ctrl & CLOCK_CTRL_625_CORE) {
692                         tw32_wait_f(TG3PCI_CLOCK_CTRL,
693                                     clock_ctrl | CLOCK_CTRL_625_CORE, 40);
694                 }
695         } else if ((orig_clock_ctrl & CLOCK_CTRL_44MHZ_CORE) != 0) {
696                 tw32_wait_f(TG3PCI_CLOCK_CTRL,
697                             clock_ctrl |
698                             (CLOCK_CTRL_44MHZ_CORE | CLOCK_CTRL_ALTCLK),
699                             40);
700                 tw32_wait_f(TG3PCI_CLOCK_CTRL,
701                             clock_ctrl | (CLOCK_CTRL_ALTCLK),
702                             40);
703         }
704         tw32_wait_f(TG3PCI_CLOCK_CTRL, clock_ctrl, 40);
705 }
706
707 #define PHY_BUSY_LOOPS  5000
708
709 static int tg3_readphy(struct tg3 *tp, int reg, u32 *val)
710 {
711         u32 frame_val;
712         unsigned int loops;
713         int ret;
714
715         if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
716                 tw32_f(MAC_MI_MODE,
717                      (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
718                 udelay(80);
719         }
720
721         *val = 0x0;
722
723         frame_val  = ((PHY_ADDR << MI_COM_PHY_ADDR_SHIFT) &
724                       MI_COM_PHY_ADDR_MASK);
725         frame_val |= ((reg << MI_COM_REG_ADDR_SHIFT) &
726                       MI_COM_REG_ADDR_MASK);
727         frame_val |= (MI_COM_CMD_READ | MI_COM_START);
728
729         tw32_f(MAC_MI_COM, frame_val);
730
731         loops = PHY_BUSY_LOOPS;
732         while (loops != 0) {
733                 udelay(10);
734                 frame_val = tr32(MAC_MI_COM);
735
736                 if ((frame_val & MI_COM_BUSY) == 0) {
737                         udelay(5);
738                         frame_val = tr32(MAC_MI_COM);
739                         break;
740                 }
741                 loops -= 1;
742         }
743
744         ret = -EBUSY;
745         if (loops != 0) {
746                 *val = frame_val & MI_COM_DATA_MASK;
747                 ret = 0;
748         }
749
750         if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
751                 tw32_f(MAC_MI_MODE, tp->mi_mode);
752                 udelay(80);
753         }
754
755         return ret;
756 }
757
758 static int tg3_writephy(struct tg3 *tp, int reg, u32 val)
759 {
760         u32 frame_val;
761         unsigned int loops;
762         int ret;
763
764         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906 &&
765             (reg == MII_TG3_CTRL || reg == MII_TG3_AUX_CTRL))
766                 return 0;
767
768         if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
769                 tw32_f(MAC_MI_MODE,
770                      (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
771                 udelay(80);
772         }
773
774         frame_val  = ((PHY_ADDR << MI_COM_PHY_ADDR_SHIFT) &
775                       MI_COM_PHY_ADDR_MASK);
776         frame_val |= ((reg << MI_COM_REG_ADDR_SHIFT) &
777                       MI_COM_REG_ADDR_MASK);
778         frame_val |= (val & MI_COM_DATA_MASK);
779         frame_val |= (MI_COM_CMD_WRITE | MI_COM_START);
780
781         tw32_f(MAC_MI_COM, frame_val);
782
783         loops = PHY_BUSY_LOOPS;
784         while (loops != 0) {
785                 udelay(10);
786                 frame_val = tr32(MAC_MI_COM);
787                 if ((frame_val & MI_COM_BUSY) == 0) {
788                         udelay(5);
789                         frame_val = tr32(MAC_MI_COM);
790                         break;
791                 }
792                 loops -= 1;
793         }
794
795         ret = -EBUSY;
796         if (loops != 0)
797                 ret = 0;
798
799         if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
800                 tw32_f(MAC_MI_MODE, tp->mi_mode);
801                 udelay(80);
802         }
803
804         return ret;
805 }
806
807 static int tg3_bmcr_reset(struct tg3 *tp)
808 {
809         u32 phy_control;
810         int limit, err;
811
812         /* OK, reset it, and poll the BMCR_RESET bit until it
813          * clears or we time out.
814          */
815         phy_control = BMCR_RESET;
816         err = tg3_writephy(tp, MII_BMCR, phy_control);
817         if (err != 0)
818                 return -EBUSY;
819
820         limit = 5000;
821         while (limit--) {
822                 err = tg3_readphy(tp, MII_BMCR, &phy_control);
823                 if (err != 0)
824                         return -EBUSY;
825
826                 if ((phy_control & BMCR_RESET) == 0) {
827                         udelay(40);
828                         break;
829                 }
830                 udelay(10);
831         }
832         if (limit <= 0)
833                 return -EBUSY;
834
835         return 0;
836 }
837
838 /* tp->lock is held. */
839 static void tg3_wait_for_event_ack(struct tg3 *tp)
840 {
841         int i;
842
843         /* Wait for up to 2.5 milliseconds */
844         for (i = 0; i < 250000; i++) {
845                 if (!(tr32(GRC_RX_CPU_EVENT) & GRC_RX_CPU_DRIVER_EVENT))
846                         break;
847                 udelay(10);
848         }
849 }
850
851 /* tp->lock is held. */
852 static void tg3_ump_link_report(struct tg3 *tp)
853 {
854         u32 reg;
855         u32 val;
856
857         if (!(tp->tg3_flags2 & TG3_FLG2_5780_CLASS) ||
858             !(tp->tg3_flags  & TG3_FLAG_ENABLE_ASF))
859                 return;
860
861         tg3_wait_for_event_ack(tp);
862
863         tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX, FWCMD_NICDRV_LINK_UPDATE);
864
865         tg3_write_mem(tp, NIC_SRAM_FW_CMD_LEN_MBOX, 14);
866
867         val = 0;
868         if (!tg3_readphy(tp, MII_BMCR, &reg))
869                 val = reg << 16;
870         if (!tg3_readphy(tp, MII_BMSR, &reg))
871                 val |= (reg & 0xffff);
872         tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX, val);
873
874         val = 0;
875         if (!tg3_readphy(tp, MII_ADVERTISE, &reg))
876                 val = reg << 16;
877         if (!tg3_readphy(tp, MII_LPA, &reg))
878                 val |= (reg & 0xffff);
879         tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 4, val);
880
881         val = 0;
882         if (!(tp->tg3_flags2 & TG3_FLG2_MII_SERDES)) {
883                 if (!tg3_readphy(tp, MII_CTRL1000, &reg))
884                         val = reg << 16;
885                 if (!tg3_readphy(tp, MII_STAT1000, &reg))
886                         val |= (reg & 0xffff);
887         }
888         tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 8, val);
889
890         if (!tg3_readphy(tp, MII_PHYADDR, &reg))
891                 val = reg << 16;
892         else
893                 val = 0;
894         tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 12, val);
895
896         val = tr32(GRC_RX_CPU_EVENT);
897         val |= GRC_RX_CPU_DRIVER_EVENT;
898         tw32_f(GRC_RX_CPU_EVENT, val);
899 }
900
901 static void tg3_link_report(struct tg3 *tp)
902 {
903         if (!netif_carrier_ok(tp->dev)) {
904                 if (netif_msg_link(tp))
905                         printk(KERN_INFO PFX "%s: Link is down.\n",
906                                tp->dev->name);
907                 tg3_ump_link_report(tp);
908         } else if (netif_msg_link(tp)) {
909                 printk(KERN_INFO PFX "%s: Link is up at %d Mbps, %s duplex.\n",
910                        tp->dev->name,
911                        (tp->link_config.active_speed == SPEED_1000 ?
912                         1000 :
913                         (tp->link_config.active_speed == SPEED_100 ?
914                          100 : 10)),
915                        (tp->link_config.active_duplex == DUPLEX_FULL ?
916                         "full" : "half"));
917
918                 printk(KERN_INFO PFX
919                        "%s: Flow control is %s for TX and %s for RX.\n",
920                        tp->dev->name,
921                        (tp->link_config.active_flowctrl & TG3_FLOW_CTRL_TX) ?
922                        "on" : "off",
923                        (tp->link_config.active_flowctrl & TG3_FLOW_CTRL_RX) ?
924                        "on" : "off");
925                 tg3_ump_link_report(tp);
926         }
927 }
928
929 static u16 tg3_advert_flowctrl_1000T(u8 flow_ctrl)
930 {
931         u16 miireg;
932
933         if ((flow_ctrl & TG3_FLOW_CTRL_TX) && (flow_ctrl & TG3_FLOW_CTRL_RX))
934                 miireg = ADVERTISE_PAUSE_CAP;
935         else if (flow_ctrl & TG3_FLOW_CTRL_TX)
936                 miireg = ADVERTISE_PAUSE_ASYM;
937         else if (flow_ctrl & TG3_FLOW_CTRL_RX)
938                 miireg = ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM;
939         else
940                 miireg = 0;
941
942         return miireg;
943 }
944
945 static u16 tg3_advert_flowctrl_1000X(u8 flow_ctrl)
946 {
947         u16 miireg;
948
949         if ((flow_ctrl & TG3_FLOW_CTRL_TX) && (flow_ctrl & TG3_FLOW_CTRL_RX))
950                 miireg = ADVERTISE_1000XPAUSE;
951         else if (flow_ctrl & TG3_FLOW_CTRL_TX)
952                 miireg = ADVERTISE_1000XPSE_ASYM;
953         else if (flow_ctrl & TG3_FLOW_CTRL_RX)
954                 miireg = ADVERTISE_1000XPAUSE | ADVERTISE_1000XPSE_ASYM;
955         else
956                 miireg = 0;
957
958         return miireg;
959 }
960
961 static u8 tg3_resolve_flowctrl_1000T(u16 lcladv, u16 rmtadv)
962 {
963         u8 cap = 0;
964
965         if (lcladv & ADVERTISE_PAUSE_CAP) {
966                 if (lcladv & ADVERTISE_PAUSE_ASYM) {
967                         if (rmtadv & LPA_PAUSE_CAP)
968                                 cap = TG3_FLOW_CTRL_TX | TG3_FLOW_CTRL_RX;
969                         else if (rmtadv & LPA_PAUSE_ASYM)
970                                 cap = TG3_FLOW_CTRL_RX;
971                 } else {
972                         if (rmtadv & LPA_PAUSE_CAP)
973                                 cap = TG3_FLOW_CTRL_TX | TG3_FLOW_CTRL_RX;
974                 }
975         } else if (lcladv & ADVERTISE_PAUSE_ASYM) {
976                 if ((rmtadv & LPA_PAUSE_CAP) && (rmtadv & LPA_PAUSE_ASYM))
977                         cap = TG3_FLOW_CTRL_TX;
978         }
979
980         return cap;
981 }
982
983 static u8 tg3_resolve_flowctrl_1000X(u16 lcladv, u16 rmtadv)
984 {
985         u8 cap = 0;
986
987         if (lcladv & ADVERTISE_1000XPAUSE) {
988                 if (lcladv & ADVERTISE_1000XPSE_ASYM) {
989                         if (rmtadv & LPA_1000XPAUSE)
990                                 cap = TG3_FLOW_CTRL_TX | TG3_FLOW_CTRL_RX;
991                         else if (rmtadv & LPA_1000XPAUSE_ASYM)
992                                 cap = TG3_FLOW_CTRL_RX;
993                 } else {
994                         if (rmtadv & LPA_1000XPAUSE)
995                                 cap = TG3_FLOW_CTRL_TX | TG3_FLOW_CTRL_RX;
996                 }
997         } else if (lcladv & ADVERTISE_1000XPSE_ASYM) {
998                 if ((rmtadv & LPA_1000XPAUSE) && (rmtadv & LPA_1000XPAUSE_ASYM))
999                         cap = TG3_FLOW_CTRL_TX;
1000         }
1001
1002         return cap;
1003 }
1004
1005 static void tg3_setup_flow_control(struct tg3 *tp, u32 lcladv, u32 rmtadv)
1006 {
1007         u8 flowctrl = 0;
1008         u32 old_rx_mode = tp->rx_mode;
1009         u32 old_tx_mode = tp->tx_mode;
1010
1011         if (tp->link_config.autoneg == AUTONEG_ENABLE &&
1012             (tp->tg3_flags & TG3_FLAG_PAUSE_AUTONEG)) {
1013                 if (tp->tg3_flags2 & TG3_FLG2_ANY_SERDES)
1014                         flowctrl = tg3_resolve_flowctrl_1000X(lcladv, rmtadv);
1015                 else
1016                         flowctrl = tg3_resolve_flowctrl_1000T(lcladv, rmtadv);
1017         } else
1018                 flowctrl = tp->link_config.flowctrl;
1019
1020         tp->link_config.active_flowctrl = flowctrl;
1021
1022         if (flowctrl & TG3_FLOW_CTRL_RX)
1023                 tp->rx_mode |= RX_MODE_FLOW_CTRL_ENABLE;
1024         else
1025                 tp->rx_mode &= ~RX_MODE_FLOW_CTRL_ENABLE;
1026
1027         if (old_rx_mode != tp->rx_mode)
1028                 tw32_f(MAC_RX_MODE, tp->rx_mode);
1029
1030         if (flowctrl & TG3_FLOW_CTRL_TX)
1031                 tp->tx_mode |= TX_MODE_FLOW_CTRL_ENABLE;
1032         else
1033                 tp->tx_mode &= ~TX_MODE_FLOW_CTRL_ENABLE;
1034
1035         if (old_tx_mode != tp->tx_mode)
1036                 tw32_f(MAC_TX_MODE, tp->tx_mode);
1037 }
1038
1039 static void tg3_phydsp_write(struct tg3 *tp, u32 reg, u32 val)
1040 {
1041         tg3_writephy(tp, MII_TG3_DSP_ADDRESS, reg);
1042         tg3_writephy(tp, MII_TG3_DSP_RW_PORT, val);
1043 }
1044
1045 static void tg3_phy_toggle_automdix(struct tg3 *tp, int enable)
1046 {
1047         u32 phy;
1048
1049         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS) ||
1050             (tp->tg3_flags2 & TG3_FLG2_ANY_SERDES))
1051                 return;
1052
1053         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
1054                 u32 ephy;
1055
1056                 if (!tg3_readphy(tp, MII_TG3_EPHY_TEST, &ephy)) {
1057                         tg3_writephy(tp, MII_TG3_EPHY_TEST,
1058                                      ephy | MII_TG3_EPHY_SHADOW_EN);
1059                         if (!tg3_readphy(tp, MII_TG3_EPHYTST_MISCCTRL, &phy)) {
1060                                 if (enable)
1061                                         phy |= MII_TG3_EPHYTST_MISCCTRL_MDIX;
1062                                 else
1063                                         phy &= ~MII_TG3_EPHYTST_MISCCTRL_MDIX;
1064                                 tg3_writephy(tp, MII_TG3_EPHYTST_MISCCTRL, phy);
1065                         }
1066                         tg3_writephy(tp, MII_TG3_EPHY_TEST, ephy);
1067                 }
1068         } else {
1069                 phy = MII_TG3_AUXCTL_MISC_RDSEL_MISC |
1070                       MII_TG3_AUXCTL_SHDWSEL_MISC;
1071                 if (!tg3_writephy(tp, MII_TG3_AUX_CTRL, phy) &&
1072                     !tg3_readphy(tp, MII_TG3_AUX_CTRL, &phy)) {
1073                         if (enable)
1074                                 phy |= MII_TG3_AUXCTL_MISC_FORCE_AMDIX;
1075                         else
1076                                 phy &= ~MII_TG3_AUXCTL_MISC_FORCE_AMDIX;
1077                         phy |= MII_TG3_AUXCTL_MISC_WREN;
1078                         tg3_writephy(tp, MII_TG3_AUX_CTRL, phy);
1079                 }
1080         }
1081 }
1082
1083 static void tg3_phy_set_wirespeed(struct tg3 *tp)
1084 {
1085         u32 val;
1086
1087         if (tp->tg3_flags2 & TG3_FLG2_NO_ETH_WIRE_SPEED)
1088                 return;
1089
1090         if (!tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x7007) &&
1091             !tg3_readphy(tp, MII_TG3_AUX_CTRL, &val))
1092                 tg3_writephy(tp, MII_TG3_AUX_CTRL,
1093                              (val | (1 << 15) | (1 << 4)));
1094 }
1095
1096 static void tg3_phy_apply_otp(struct tg3 *tp)
1097 {
1098         u32 otp, phy;
1099
1100         if (!tp->phy_otp)
1101                 return;
1102
1103         otp = tp->phy_otp;
1104
1105         /* Enable SM_DSP clock and tx 6dB coding. */
1106         phy = MII_TG3_AUXCTL_SHDWSEL_AUXCTL |
1107               MII_TG3_AUXCTL_ACTL_SMDSP_ENA |
1108               MII_TG3_AUXCTL_ACTL_TX_6DB;
1109         tg3_writephy(tp, MII_TG3_AUX_CTRL, phy);
1110
1111         phy = ((otp & TG3_OTP_AGCTGT_MASK) >> TG3_OTP_AGCTGT_SHIFT);
1112         phy |= MII_TG3_DSP_TAP1_AGCTGT_DFLT;
1113         tg3_phydsp_write(tp, MII_TG3_DSP_TAP1, phy);
1114
1115         phy = ((otp & TG3_OTP_HPFFLTR_MASK) >> TG3_OTP_HPFFLTR_SHIFT) |
1116               ((otp & TG3_OTP_HPFOVER_MASK) >> TG3_OTP_HPFOVER_SHIFT);
1117         tg3_phydsp_write(tp, MII_TG3_DSP_AADJ1CH0, phy);
1118
1119         phy = ((otp & TG3_OTP_LPFDIS_MASK) >> TG3_OTP_LPFDIS_SHIFT);
1120         phy |= MII_TG3_DSP_AADJ1CH3_ADCCKADJ;
1121         tg3_phydsp_write(tp, MII_TG3_DSP_AADJ1CH3, phy);
1122
1123         phy = ((otp & TG3_OTP_VDAC_MASK) >> TG3_OTP_VDAC_SHIFT);
1124         tg3_phydsp_write(tp, MII_TG3_DSP_EXP75, phy);
1125
1126         phy = ((otp & TG3_OTP_10BTAMP_MASK) >> TG3_OTP_10BTAMP_SHIFT);
1127         tg3_phydsp_write(tp, MII_TG3_DSP_EXP96, phy);
1128
1129         phy = ((otp & TG3_OTP_ROFF_MASK) >> TG3_OTP_ROFF_SHIFT) |
1130               ((otp & TG3_OTP_RCOFF_MASK) >> TG3_OTP_RCOFF_SHIFT);
1131         tg3_phydsp_write(tp, MII_TG3_DSP_EXP97, phy);
1132
1133         /* Turn off SM_DSP clock. */
1134         phy = MII_TG3_AUXCTL_SHDWSEL_AUXCTL |
1135               MII_TG3_AUXCTL_ACTL_TX_6DB;
1136         tg3_writephy(tp, MII_TG3_AUX_CTRL, phy);
1137 }
1138
1139 static int tg3_wait_macro_done(struct tg3 *tp)
1140 {
1141         int limit = 100;
1142
1143         while (limit--) {
1144                 u32 tmp32;
1145
1146                 if (!tg3_readphy(tp, 0x16, &tmp32)) {
1147                         if ((tmp32 & 0x1000) == 0)
1148                                 break;
1149                 }
1150         }
1151         if (limit <= 0)
1152                 return -EBUSY;
1153
1154         return 0;
1155 }
1156
1157 static int tg3_phy_write_and_check_testpat(struct tg3 *tp, int *resetp)
1158 {
1159         static const u32 test_pat[4][6] = {
1160         { 0x00005555, 0x00000005, 0x00002aaa, 0x0000000a, 0x00003456, 0x00000003 },
1161         { 0x00002aaa, 0x0000000a, 0x00003333, 0x00000003, 0x0000789a, 0x00000005 },
1162         { 0x00005a5a, 0x00000005, 0x00002a6a, 0x0000000a, 0x00001bcd, 0x00000003 },
1163         { 0x00002a5a, 0x0000000a, 0x000033c3, 0x00000003, 0x00002ef1, 0x00000005 }
1164         };
1165         int chan;
1166
1167         for (chan = 0; chan < 4; chan++) {
1168                 int i;
1169
1170                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
1171                              (chan * 0x2000) | 0x0200);
1172                 tg3_writephy(tp, 0x16, 0x0002);
1173
1174                 for (i = 0; i < 6; i++)
1175                         tg3_writephy(tp, MII_TG3_DSP_RW_PORT,
1176                                      test_pat[chan][i]);
1177
1178                 tg3_writephy(tp, 0x16, 0x0202);
1179                 if (tg3_wait_macro_done(tp)) {
1180                         *resetp = 1;
1181                         return -EBUSY;
1182                 }
1183
1184                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
1185                              (chan * 0x2000) | 0x0200);
1186                 tg3_writephy(tp, 0x16, 0x0082);
1187                 if (tg3_wait_macro_done(tp)) {
1188                         *resetp = 1;
1189                         return -EBUSY;
1190                 }
1191
1192                 tg3_writephy(tp, 0x16, 0x0802);
1193                 if (tg3_wait_macro_done(tp)) {
1194                         *resetp = 1;
1195                         return -EBUSY;
1196                 }
1197
1198                 for (i = 0; i < 6; i += 2) {
1199                         u32 low, high;
1200
1201                         if (tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &low) ||
1202                             tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &high) ||
1203                             tg3_wait_macro_done(tp)) {
1204                                 *resetp = 1;
1205                                 return -EBUSY;
1206                         }
1207                         low &= 0x7fff;
1208                         high &= 0x000f;
1209                         if (low != test_pat[chan][i] ||
1210                             high != test_pat[chan][i+1]) {
1211                                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000b);
1212                                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x4001);
1213                                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x4005);
1214
1215                                 return -EBUSY;
1216                         }
1217                 }
1218         }
1219
1220         return 0;
1221 }
1222
1223 static int tg3_phy_reset_chanpat(struct tg3 *tp)
1224 {
1225         int chan;
1226
1227         for (chan = 0; chan < 4; chan++) {
1228                 int i;
1229
1230                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
1231                              (chan * 0x2000) | 0x0200);
1232                 tg3_writephy(tp, 0x16, 0x0002);
1233                 for (i = 0; i < 6; i++)
1234                         tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x000);
1235                 tg3_writephy(tp, 0x16, 0x0202);
1236                 if (tg3_wait_macro_done(tp))
1237                         return -EBUSY;
1238         }
1239
1240         return 0;
1241 }
1242
1243 static int tg3_phy_reset_5703_4_5(struct tg3 *tp)
1244 {
1245         u32 reg32, phy9_orig;
1246         int retries, do_phy_reset, err;
1247
1248         retries = 10;
1249         do_phy_reset = 1;
1250         do {
1251                 if (do_phy_reset) {
1252                         err = tg3_bmcr_reset(tp);
1253                         if (err)
1254                                 return err;
1255                         do_phy_reset = 0;
1256                 }
1257
1258                 /* Disable transmitter and interrupt.  */
1259                 if (tg3_readphy(tp, MII_TG3_EXT_CTRL, &reg32))
1260                         continue;
1261
1262                 reg32 |= 0x3000;
1263                 tg3_writephy(tp, MII_TG3_EXT_CTRL, reg32);
1264
1265                 /* Set full-duplex, 1000 mbps.  */
1266                 tg3_writephy(tp, MII_BMCR,
1267                              BMCR_FULLDPLX | TG3_BMCR_SPEED1000);
1268
1269                 /* Set to master mode.  */
1270                 if (tg3_readphy(tp, MII_TG3_CTRL, &phy9_orig))
1271                         continue;
1272
1273                 tg3_writephy(tp, MII_TG3_CTRL,
1274                              (MII_TG3_CTRL_AS_MASTER |
1275                               MII_TG3_CTRL_ENABLE_AS_MASTER));
1276
1277                 /* Enable SM_DSP_CLOCK and 6dB.  */
1278                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0c00);
1279
1280                 /* Block the PHY control access.  */
1281                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8005);
1282                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0800);
1283
1284                 err = tg3_phy_write_and_check_testpat(tp, &do_phy_reset);
1285                 if (!err)
1286                         break;
1287         } while (--retries);
1288
1289         err = tg3_phy_reset_chanpat(tp);
1290         if (err)
1291                 return err;
1292
1293         tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8005);
1294         tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0000);
1295
1296         tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8200);
1297         tg3_writephy(tp, 0x16, 0x0000);
1298
1299         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
1300             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
1301                 /* Set Extended packet length bit for jumbo frames */
1302                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x4400);
1303         }
1304         else {
1305                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0400);
1306         }
1307
1308         tg3_writephy(tp, MII_TG3_CTRL, phy9_orig);
1309
1310         if (!tg3_readphy(tp, MII_TG3_EXT_CTRL, &reg32)) {
1311                 reg32 &= ~0x3000;
1312                 tg3_writephy(tp, MII_TG3_EXT_CTRL, reg32);
1313         } else if (!err)
1314                 err = -EBUSY;
1315
1316         return err;
1317 }
1318
1319 /* This will reset the tigon3 PHY if there is no valid
1320  * link unless the FORCE argument is non-zero.
1321  */
1322 static int tg3_phy_reset(struct tg3 *tp)
1323 {
1324         u32 cpmuctrl;
1325         u32 phy_status;
1326         int err;
1327
1328         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
1329                 u32 val;
1330
1331                 val = tr32(GRC_MISC_CFG);
1332                 tw32_f(GRC_MISC_CFG, val & ~GRC_MISC_CFG_EPHY_IDDQ);
1333                 udelay(40);
1334         }
1335         err  = tg3_readphy(tp, MII_BMSR, &phy_status);
1336         err |= tg3_readphy(tp, MII_BMSR, &phy_status);
1337         if (err != 0)
1338                 return -EBUSY;
1339
1340         if (netif_running(tp->dev) && netif_carrier_ok(tp->dev)) {
1341                 netif_carrier_off(tp->dev);
1342                 tg3_link_report(tp);
1343         }
1344
1345         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
1346             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
1347             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
1348                 err = tg3_phy_reset_5703_4_5(tp);
1349                 if (err)
1350                         return err;
1351                 goto out;
1352         }
1353
1354         cpmuctrl = 0;
1355         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 &&
1356             GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5784_AX) {
1357                 cpmuctrl = tr32(TG3_CPMU_CTRL);
1358                 if (cpmuctrl & CPMU_CTRL_GPHY_10MB_RXONLY)
1359                         tw32(TG3_CPMU_CTRL,
1360                              cpmuctrl & ~CPMU_CTRL_GPHY_10MB_RXONLY);
1361         }
1362
1363         err = tg3_bmcr_reset(tp);
1364         if (err)
1365                 return err;
1366
1367         if (cpmuctrl & CPMU_CTRL_GPHY_10MB_RXONLY) {
1368                 u32 phy;
1369
1370                 phy = MII_TG3_DSP_EXP8_AEDW | MII_TG3_DSP_EXP8_REJ2MHz;
1371                 tg3_phydsp_write(tp, MII_TG3_DSP_EXP8, phy);
1372
1373                 tw32(TG3_CPMU_CTRL, cpmuctrl);
1374         }
1375
1376         if (tp->tg3_flags3 & TG3_FLG3_5761_5784_AX_FIXES) {
1377                 u32 val;
1378
1379                 val = tr32(TG3_CPMU_LSPD_1000MB_CLK);
1380                 if ((val & CPMU_LSPD_1000MB_MACCLK_MASK) ==
1381                     CPMU_LSPD_1000MB_MACCLK_12_5) {
1382                         val &= ~CPMU_LSPD_1000MB_MACCLK_MASK;
1383                         udelay(40);
1384                         tw32_f(TG3_CPMU_LSPD_1000MB_CLK, val);
1385                 }
1386
1387                 /* Disable GPHY autopowerdown. */
1388                 tg3_writephy(tp, MII_TG3_MISC_SHDW,
1389                              MII_TG3_MISC_SHDW_WREN |
1390                              MII_TG3_MISC_SHDW_APD_SEL |
1391                              MII_TG3_MISC_SHDW_APD_WKTM_84MS);
1392         }
1393
1394         tg3_phy_apply_otp(tp);
1395
1396 out:
1397         if (tp->tg3_flags2 & TG3_FLG2_PHY_ADC_BUG) {
1398                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0c00);
1399                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x201f);
1400                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x2aaa);
1401                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000a);
1402                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0323);
1403                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0400);
1404         }
1405         if (tp->tg3_flags2 & TG3_FLG2_PHY_5704_A0_BUG) {
1406                 tg3_writephy(tp, 0x1c, 0x8d68);
1407                 tg3_writephy(tp, 0x1c, 0x8d68);
1408         }
1409         if (tp->tg3_flags2 & TG3_FLG2_PHY_BER_BUG) {
1410                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0c00);
1411                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000a);
1412                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x310b);
1413                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x201f);
1414                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x9506);
1415                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x401f);
1416                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x14e2);
1417                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0400);
1418         }
1419         else if (tp->tg3_flags2 & TG3_FLG2_PHY_JITTER_BUG) {
1420                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0c00);
1421                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000a);
1422                 if (tp->tg3_flags2 & TG3_FLG2_PHY_ADJUST_TRIM) {
1423                         tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x110b);
1424                         tg3_writephy(tp, MII_TG3_TEST1,
1425                                      MII_TG3_TEST1_TRIM_EN | 0x4);
1426                 } else
1427                         tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x010b);
1428                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0400);
1429         }
1430         /* Set Extended packet length bit (bit 14) on all chips that */
1431         /* support jumbo frames */
1432         if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5401) {
1433                 /* Cannot do read-modify-write on 5401 */
1434                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x4c20);
1435         } else if (tp->tg3_flags2 & TG3_FLG2_JUMBO_CAPABLE) {
1436                 u32 phy_reg;
1437
1438                 /* Set bit 14 with read-modify-write to preserve other bits */
1439                 if (!tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0007) &&
1440                     !tg3_readphy(tp, MII_TG3_AUX_CTRL, &phy_reg))
1441                         tg3_writephy(tp, MII_TG3_AUX_CTRL, phy_reg | 0x4000);
1442         }
1443
1444         /* Set phy register 0x10 bit 0 to high fifo elasticity to support
1445          * jumbo frames transmission.
1446          */
1447         if (tp->tg3_flags2 & TG3_FLG2_JUMBO_CAPABLE) {
1448                 u32 phy_reg;
1449
1450                 if (!tg3_readphy(tp, MII_TG3_EXT_CTRL, &phy_reg))
1451                     tg3_writephy(tp, MII_TG3_EXT_CTRL,
1452                                  phy_reg | MII_TG3_EXT_CTRL_FIFO_ELASTIC);
1453         }
1454
1455         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
1456                 /* adjust output voltage */
1457                 tg3_writephy(tp, MII_TG3_EPHY_PTEST, 0x12);
1458         }
1459
1460         tg3_phy_toggle_automdix(tp, 1);
1461         tg3_phy_set_wirespeed(tp);
1462         return 0;
1463 }
1464
1465 static void tg3_frob_aux_power(struct tg3 *tp)
1466 {
1467         struct tg3 *tp_peer = tp;
1468
1469         if ((tp->tg3_flags2 & TG3_FLG2_IS_NIC) == 0)
1470                 return;
1471
1472         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) ||
1473             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714)) {
1474                 struct net_device *dev_peer;
1475
1476                 dev_peer = pci_get_drvdata(tp->pdev_peer);
1477                 /* remove_one() may have been run on the peer. */
1478                 if (!dev_peer)
1479                         tp_peer = tp;
1480                 else
1481                         tp_peer = netdev_priv(dev_peer);
1482         }
1483
1484         if ((tp->tg3_flags & TG3_FLAG_WOL_ENABLE) != 0 ||
1485             (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) != 0 ||
1486             (tp_peer->tg3_flags & TG3_FLAG_WOL_ENABLE) != 0 ||
1487             (tp_peer->tg3_flags & TG3_FLAG_ENABLE_ASF) != 0) {
1488                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
1489                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
1490                         tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1491                                     (GRC_LCLCTRL_GPIO_OE0 |
1492                                      GRC_LCLCTRL_GPIO_OE1 |
1493                                      GRC_LCLCTRL_GPIO_OE2 |
1494                                      GRC_LCLCTRL_GPIO_OUTPUT0 |
1495                                      GRC_LCLCTRL_GPIO_OUTPUT1),
1496                                     100);
1497                 } else {
1498                         u32 no_gpio2;
1499                         u32 grc_local_ctrl = 0;
1500
1501                         if (tp_peer != tp &&
1502                             (tp_peer->tg3_flags & TG3_FLAG_INIT_COMPLETE) != 0)
1503                                 return;
1504
1505                         /* Workaround to prevent overdrawing Amps. */
1506                         if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
1507                             ASIC_REV_5714) {
1508                                 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE3;
1509                                 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1510                                             grc_local_ctrl, 100);
1511                         }
1512
1513                         /* On 5753 and variants, GPIO2 cannot be used. */
1514                         no_gpio2 = tp->nic_sram_data_cfg &
1515                                     NIC_SRAM_DATA_CFG_NO_GPIO2;
1516
1517                         grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE0 |
1518                                          GRC_LCLCTRL_GPIO_OE1 |
1519                                          GRC_LCLCTRL_GPIO_OE2 |
1520                                          GRC_LCLCTRL_GPIO_OUTPUT1 |
1521                                          GRC_LCLCTRL_GPIO_OUTPUT2;
1522                         if (no_gpio2) {
1523                                 grc_local_ctrl &= ~(GRC_LCLCTRL_GPIO_OE2 |
1524                                                     GRC_LCLCTRL_GPIO_OUTPUT2);
1525                         }
1526                         tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1527                                                     grc_local_ctrl, 100);
1528
1529                         grc_local_ctrl |= GRC_LCLCTRL_GPIO_OUTPUT0;
1530
1531                         tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1532                                                     grc_local_ctrl, 100);
1533
1534                         if (!no_gpio2) {
1535                                 grc_local_ctrl &= ~GRC_LCLCTRL_GPIO_OUTPUT2;
1536                                 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1537                                             grc_local_ctrl, 100);
1538                         }
1539                 }
1540         } else {
1541                 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
1542                     GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701) {
1543                         if (tp_peer != tp &&
1544                             (tp_peer->tg3_flags & TG3_FLAG_INIT_COMPLETE) != 0)
1545                                 return;
1546
1547                         tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1548                                     (GRC_LCLCTRL_GPIO_OE1 |
1549                                      GRC_LCLCTRL_GPIO_OUTPUT1), 100);
1550
1551                         tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1552                                     GRC_LCLCTRL_GPIO_OE1, 100);
1553
1554                         tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1555                                     (GRC_LCLCTRL_GPIO_OE1 |
1556                                      GRC_LCLCTRL_GPIO_OUTPUT1), 100);
1557                 }
1558         }
1559 }
1560
1561 static int tg3_5700_link_polarity(struct tg3 *tp, u32 speed)
1562 {
1563         if (tp->led_ctrl == LED_CTRL_MODE_PHY_2)
1564                 return 1;
1565         else if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5411) {
1566                 if (speed != SPEED_10)
1567                         return 1;
1568         } else if (speed == SPEED_10)
1569                 return 1;
1570
1571         return 0;
1572 }
1573
1574 static int tg3_setup_phy(struct tg3 *, int);
1575
1576 #define RESET_KIND_SHUTDOWN     0
1577 #define RESET_KIND_INIT         1
1578 #define RESET_KIND_SUSPEND      2
1579
1580 static void tg3_write_sig_post_reset(struct tg3 *, int);
1581 static int tg3_halt_cpu(struct tg3 *, u32);
1582 static int tg3_nvram_lock(struct tg3 *);
1583 static void tg3_nvram_unlock(struct tg3 *);
1584
1585 static void tg3_power_down_phy(struct tg3 *tp)
1586 {
1587         u32 val;
1588
1589         if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) {
1590                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
1591                         u32 sg_dig_ctrl = tr32(SG_DIG_CTRL);
1592                         u32 serdes_cfg = tr32(MAC_SERDES_CFG);
1593
1594                         sg_dig_ctrl |=
1595                                 SG_DIG_USING_HW_AUTONEG | SG_DIG_SOFT_RESET;
1596                         tw32(SG_DIG_CTRL, sg_dig_ctrl);
1597                         tw32(MAC_SERDES_CFG, serdes_cfg | (1 << 15));
1598                 }
1599                 return;
1600         }
1601
1602         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
1603                 tg3_bmcr_reset(tp);
1604                 val = tr32(GRC_MISC_CFG);
1605                 tw32_f(GRC_MISC_CFG, val | GRC_MISC_CFG_EPHY_IDDQ);
1606                 udelay(40);
1607                 return;
1608         } else if (!(tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB)) {
1609                 tg3_writephy(tp, MII_TG3_EXT_CTRL,
1610                              MII_TG3_EXT_CTRL_FORCE_LED_OFF);
1611                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x01b2);
1612         }
1613
1614         /* The PHY should not be powered down on some chips because
1615          * of bugs.
1616          */
1617         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
1618             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
1619             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780 &&
1620              (tp->tg3_flags2 & TG3_FLG2_MII_SERDES)))
1621                 return;
1622
1623         if (tp->tg3_flags3 & TG3_FLG3_5761_5784_AX_FIXES) {
1624                 val = tr32(TG3_CPMU_LSPD_1000MB_CLK);
1625                 val &= ~CPMU_LSPD_1000MB_MACCLK_MASK;
1626                 val |= CPMU_LSPD_1000MB_MACCLK_12_5;
1627                 tw32_f(TG3_CPMU_LSPD_1000MB_CLK, val);
1628         }
1629
1630         tg3_writephy(tp, MII_BMCR, BMCR_PDOWN);
1631 }
1632
1633 static int tg3_set_power_state(struct tg3 *tp, pci_power_t state)
1634 {
1635         u32 misc_host_ctrl;
1636         u16 power_control, power_caps;
1637         int pm = tp->pm_cap;
1638
1639         /* Make sure register accesses (indirect or otherwise)
1640          * will function correctly.
1641          */
1642         pci_write_config_dword(tp->pdev,
1643                                TG3PCI_MISC_HOST_CTRL,
1644                                tp->misc_host_ctrl);
1645
1646         pci_read_config_word(tp->pdev,
1647                              pm + PCI_PM_CTRL,
1648                              &power_control);
1649         power_control |= PCI_PM_CTRL_PME_STATUS;
1650         power_control &= ~(PCI_PM_CTRL_STATE_MASK);
1651         switch (state) {
1652         case PCI_D0:
1653                 power_control |= 0;
1654                 pci_write_config_word(tp->pdev,
1655                                       pm + PCI_PM_CTRL,
1656                                       power_control);
1657                 udelay(100);    /* Delay after power state change */
1658
1659                 /* Switch out of Vaux if it is a NIC */
1660                 if (tp->tg3_flags2 & TG3_FLG2_IS_NIC)
1661                         tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl, 100);
1662
1663                 return 0;
1664
1665         case PCI_D1:
1666                 power_control |= 1;
1667                 break;
1668
1669         case PCI_D2:
1670                 power_control |= 2;
1671                 break;
1672
1673         case PCI_D3hot:
1674                 power_control |= 3;
1675                 break;
1676
1677         default:
1678                 printk(KERN_WARNING PFX "%s: Invalid power state (%d) "
1679                        "requested.\n",
1680                        tp->dev->name, state);
1681                 return -EINVAL;
1682         };
1683
1684         power_control |= PCI_PM_CTRL_PME_ENABLE;
1685
1686         misc_host_ctrl = tr32(TG3PCI_MISC_HOST_CTRL);
1687         tw32(TG3PCI_MISC_HOST_CTRL,
1688              misc_host_ctrl | MISC_HOST_CTRL_MASK_PCI_INT);
1689
1690         if (tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB) {
1691                 tp->link_config.phy_is_low_power = 1;
1692         } else {
1693                 if (tp->link_config.phy_is_low_power == 0) {
1694                         tp->link_config.phy_is_low_power = 1;
1695                         tp->link_config.orig_speed = tp->link_config.speed;
1696                         tp->link_config.orig_duplex = tp->link_config.duplex;
1697                         tp->link_config.orig_autoneg = tp->link_config.autoneg;
1698                 }
1699
1700                 if (!(tp->tg3_flags2 & TG3_FLG2_ANY_SERDES)) {
1701                         tp->link_config.speed = SPEED_10;
1702                         tp->link_config.duplex = DUPLEX_HALF;
1703                         tp->link_config.autoneg = AUTONEG_ENABLE;
1704                         tg3_setup_phy(tp, 0);
1705                 }
1706         }
1707
1708         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
1709                 u32 val;
1710
1711                 val = tr32(GRC_VCPU_EXT_CTRL);
1712                 tw32(GRC_VCPU_EXT_CTRL, val | GRC_VCPU_EXT_CTRL_DISABLE_WOL);
1713         } else if (!(tp->tg3_flags & TG3_FLAG_ENABLE_ASF)) {
1714                 int i;
1715                 u32 val;
1716
1717                 for (i = 0; i < 200; i++) {
1718                         tg3_read_mem(tp, NIC_SRAM_FW_ASF_STATUS_MBOX, &val);
1719                         if (val == ~NIC_SRAM_FIRMWARE_MBOX_MAGIC1)
1720                                 break;
1721                         msleep(1);
1722                 }
1723         }
1724         if (tp->tg3_flags & TG3_FLAG_WOL_CAP)
1725                 tg3_write_mem(tp, NIC_SRAM_WOL_MBOX, WOL_SIGNATURE |
1726                                                      WOL_DRV_STATE_SHUTDOWN |
1727                                                      WOL_DRV_WOL |
1728                                                      WOL_SET_MAGIC_PKT);
1729
1730         pci_read_config_word(tp->pdev, pm + PCI_PM_PMC, &power_caps);
1731
1732         if (tp->tg3_flags & TG3_FLAG_WOL_ENABLE) {
1733                 u32 mac_mode;
1734
1735                 if (!(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)) {
1736                         if (!(tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB)) {
1737                                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x5a);
1738                                 udelay(40);
1739                         }
1740
1741                         if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES)
1742                                 mac_mode = MAC_MODE_PORT_MODE_GMII;
1743                         else
1744                                 mac_mode = MAC_MODE_PORT_MODE_MII;
1745
1746                         mac_mode |= tp->mac_mode & MAC_MODE_LINK_POLARITY;
1747                         if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
1748                             ASIC_REV_5700) {
1749                                 u32 speed = (tp->tg3_flags &
1750                                              TG3_FLAG_WOL_SPEED_100MB) ?
1751                                              SPEED_100 : SPEED_10;
1752                                 if (tg3_5700_link_polarity(tp, speed))
1753                                         mac_mode |= MAC_MODE_LINK_POLARITY;
1754                                 else
1755                                         mac_mode &= ~MAC_MODE_LINK_POLARITY;
1756                         }
1757                 } else {
1758                         mac_mode = MAC_MODE_PORT_MODE_TBI;
1759                 }
1760
1761                 if (!(tp->tg3_flags2 & TG3_FLG2_5750_PLUS))
1762                         tw32(MAC_LED_CTRL, tp->led_ctrl);
1763
1764                 if (((power_caps & PCI_PM_CAP_PME_D3cold) &&
1765                      (tp->tg3_flags & TG3_FLAG_WOL_ENABLE)))
1766                         mac_mode |= MAC_MODE_MAGIC_PKT_ENABLE;
1767
1768                 tw32_f(MAC_MODE, mac_mode);
1769                 udelay(100);
1770
1771                 tw32_f(MAC_RX_MODE, RX_MODE_ENABLE);
1772                 udelay(10);
1773         }
1774
1775         if (!(tp->tg3_flags & TG3_FLAG_WOL_SPEED_100MB) &&
1776             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
1777              GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)) {
1778                 u32 base_val;
1779
1780                 base_val = tp->pci_clock_ctrl;
1781                 base_val |= (CLOCK_CTRL_RXCLK_DISABLE |
1782                              CLOCK_CTRL_TXCLK_DISABLE);
1783
1784                 tw32_wait_f(TG3PCI_CLOCK_CTRL, base_val | CLOCK_CTRL_ALTCLK |
1785                             CLOCK_CTRL_PWRDOWN_PLL133, 40);
1786         } else if ((tp->tg3_flags2 & TG3_FLG2_5780_CLASS) ||
1787                    (tp->tg3_flags & TG3_FLAG_CPMU_PRESENT) ||
1788                    (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)) {
1789                 /* do nothing */
1790         } else if (!((tp->tg3_flags2 & TG3_FLG2_5750_PLUS) &&
1791                      (tp->tg3_flags & TG3_FLAG_ENABLE_ASF))) {
1792                 u32 newbits1, newbits2;
1793
1794                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
1795                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
1796                         newbits1 = (CLOCK_CTRL_RXCLK_DISABLE |
1797                                     CLOCK_CTRL_TXCLK_DISABLE |
1798                                     CLOCK_CTRL_ALTCLK);
1799                         newbits2 = newbits1 | CLOCK_CTRL_44MHZ_CORE;
1800                 } else if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
1801                         newbits1 = CLOCK_CTRL_625_CORE;
1802                         newbits2 = newbits1 | CLOCK_CTRL_ALTCLK;
1803                 } else {
1804                         newbits1 = CLOCK_CTRL_ALTCLK;
1805                         newbits2 = newbits1 | CLOCK_CTRL_44MHZ_CORE;
1806                 }
1807
1808                 tw32_wait_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl | newbits1,
1809                             40);
1810
1811                 tw32_wait_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl | newbits2,
1812                             40);
1813
1814                 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
1815                         u32 newbits3;
1816
1817                         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
1818                             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
1819                                 newbits3 = (CLOCK_CTRL_RXCLK_DISABLE |
1820                                             CLOCK_CTRL_TXCLK_DISABLE |
1821                                             CLOCK_CTRL_44MHZ_CORE);
1822                         } else {
1823                                 newbits3 = CLOCK_CTRL_44MHZ_CORE;
1824                         }
1825
1826                         tw32_wait_f(TG3PCI_CLOCK_CTRL,
1827                                     tp->pci_clock_ctrl | newbits3, 40);
1828                 }
1829         }
1830
1831         if (!(tp->tg3_flags & TG3_FLAG_WOL_ENABLE) &&
1832             !(tp->tg3_flags & TG3_FLAG_ENABLE_ASF) &&
1833             !(tp->tg3_flags3 & TG3_FLG3_ENABLE_APE))
1834                 tg3_power_down_phy(tp);
1835
1836         tg3_frob_aux_power(tp);
1837
1838         /* Workaround for unstable PLL clock */
1839         if ((GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_AX) ||
1840             (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_BX)) {
1841                 u32 val = tr32(0x7d00);
1842
1843                 val &= ~((1 << 16) | (1 << 4) | (1 << 2) | (1 << 1) | 1);
1844                 tw32(0x7d00, val);
1845                 if (!(tp->tg3_flags & TG3_FLAG_ENABLE_ASF)) {
1846                         int err;
1847
1848                         err = tg3_nvram_lock(tp);
1849                         tg3_halt_cpu(tp, RX_CPU_BASE);
1850                         if (!err)
1851                                 tg3_nvram_unlock(tp);
1852                 }
1853         }
1854
1855         tg3_write_sig_post_reset(tp, RESET_KIND_SHUTDOWN);
1856
1857         /* Finally, set the new power state. */
1858         pci_write_config_word(tp->pdev, pm + PCI_PM_CTRL, power_control);
1859         udelay(100);    /* Delay after power state change */
1860
1861         return 0;
1862 }
1863
1864 static void tg3_aux_stat_to_speed_duplex(struct tg3 *tp, u32 val, u16 *speed, u8 *duplex)
1865 {
1866         switch (val & MII_TG3_AUX_STAT_SPDMASK) {
1867         case MII_TG3_AUX_STAT_10HALF:
1868                 *speed = SPEED_10;
1869                 *duplex = DUPLEX_HALF;
1870                 break;
1871
1872         case MII_TG3_AUX_STAT_10FULL:
1873                 *speed = SPEED_10;
1874                 *duplex = DUPLEX_FULL;
1875                 break;
1876
1877         case MII_TG3_AUX_STAT_100HALF:
1878                 *speed = SPEED_100;
1879                 *duplex = DUPLEX_HALF;
1880                 break;
1881
1882         case MII_TG3_AUX_STAT_100FULL:
1883                 *speed = SPEED_100;
1884                 *duplex = DUPLEX_FULL;
1885                 break;
1886
1887         case MII_TG3_AUX_STAT_1000HALF:
1888                 *speed = SPEED_1000;
1889                 *duplex = DUPLEX_HALF;
1890                 break;
1891
1892         case MII_TG3_AUX_STAT_1000FULL:
1893                 *speed = SPEED_1000;
1894                 *duplex = DUPLEX_FULL;
1895                 break;
1896
1897         default:
1898                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
1899                         *speed = (val & MII_TG3_AUX_STAT_100) ? SPEED_100 :
1900                                  SPEED_10;
1901                         *duplex = (val & MII_TG3_AUX_STAT_FULL) ? DUPLEX_FULL :
1902                                   DUPLEX_HALF;
1903                         break;
1904                 }
1905                 *speed = SPEED_INVALID;
1906                 *duplex = DUPLEX_INVALID;
1907                 break;
1908         };
1909 }
1910
1911 static void tg3_phy_copper_begin(struct tg3 *tp)
1912 {
1913         u32 new_adv;
1914         int i;
1915
1916         if (tp->link_config.phy_is_low_power) {
1917                 /* Entering low power mode.  Disable gigabit and
1918                  * 100baseT advertisements.
1919                  */
1920                 tg3_writephy(tp, MII_TG3_CTRL, 0);
1921
1922                 new_adv = (ADVERTISE_10HALF | ADVERTISE_10FULL |
1923                            ADVERTISE_CSMA | ADVERTISE_PAUSE_CAP);
1924                 if (tp->tg3_flags & TG3_FLAG_WOL_SPEED_100MB)
1925                         new_adv |= (ADVERTISE_100HALF | ADVERTISE_100FULL);
1926
1927                 tg3_writephy(tp, MII_ADVERTISE, new_adv);
1928         } else if (tp->link_config.speed == SPEED_INVALID) {
1929                 if (tp->tg3_flags & TG3_FLAG_10_100_ONLY)
1930                         tp->link_config.advertising &=
1931                                 ~(ADVERTISED_1000baseT_Half |
1932                                   ADVERTISED_1000baseT_Full);
1933
1934                 new_adv = ADVERTISE_CSMA;
1935                 if (tp->link_config.advertising & ADVERTISED_10baseT_Half)
1936                         new_adv |= ADVERTISE_10HALF;
1937                 if (tp->link_config.advertising & ADVERTISED_10baseT_Full)
1938                         new_adv |= ADVERTISE_10FULL;
1939                 if (tp->link_config.advertising & ADVERTISED_100baseT_Half)
1940                         new_adv |= ADVERTISE_100HALF;
1941                 if (tp->link_config.advertising & ADVERTISED_100baseT_Full)
1942                         new_adv |= ADVERTISE_100FULL;
1943
1944                 new_adv |= tg3_advert_flowctrl_1000T(tp->link_config.flowctrl);
1945
1946                 tg3_writephy(tp, MII_ADVERTISE, new_adv);
1947
1948                 if (tp->link_config.advertising &
1949                     (ADVERTISED_1000baseT_Half | ADVERTISED_1000baseT_Full)) {
1950                         new_adv = 0;
1951                         if (tp->link_config.advertising & ADVERTISED_1000baseT_Half)
1952                                 new_adv |= MII_TG3_CTRL_ADV_1000_HALF;
1953                         if (tp->link_config.advertising & ADVERTISED_1000baseT_Full)
1954                                 new_adv |= MII_TG3_CTRL_ADV_1000_FULL;
1955                         if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY) &&
1956                             (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
1957                              tp->pci_chip_rev_id == CHIPREV_ID_5701_B0))
1958                                 new_adv |= (MII_TG3_CTRL_AS_MASTER |
1959                                             MII_TG3_CTRL_ENABLE_AS_MASTER);
1960                         tg3_writephy(tp, MII_TG3_CTRL, new_adv);
1961                 } else {
1962                         tg3_writephy(tp, MII_TG3_CTRL, 0);
1963                 }
1964         } else {
1965                 new_adv = tg3_advert_flowctrl_1000T(tp->link_config.flowctrl);
1966                 new_adv |= ADVERTISE_CSMA;
1967
1968                 /* Asking for a specific link mode. */
1969                 if (tp->link_config.speed == SPEED_1000) {
1970                         tg3_writephy(tp, MII_ADVERTISE, new_adv);
1971
1972                         if (tp->link_config.duplex == DUPLEX_FULL)
1973                                 new_adv = MII_TG3_CTRL_ADV_1000_FULL;
1974                         else
1975                                 new_adv = MII_TG3_CTRL_ADV_1000_HALF;
1976                         if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
1977                             tp->pci_chip_rev_id == CHIPREV_ID_5701_B0)
1978                                 new_adv |= (MII_TG3_CTRL_AS_MASTER |
1979                                             MII_TG3_CTRL_ENABLE_AS_MASTER);
1980                 } else {
1981                         if (tp->link_config.speed == SPEED_100) {
1982                                 if (tp->link_config.duplex == DUPLEX_FULL)
1983                                         new_adv |= ADVERTISE_100FULL;
1984                                 else
1985                                         new_adv |= ADVERTISE_100HALF;
1986                         } else {
1987                                 if (tp->link_config.duplex == DUPLEX_FULL)
1988                                         new_adv |= ADVERTISE_10FULL;
1989                                 else
1990                                         new_adv |= ADVERTISE_10HALF;
1991                         }
1992                         tg3_writephy(tp, MII_ADVERTISE, new_adv);
1993
1994                         new_adv = 0;
1995                 }
1996
1997                 tg3_writephy(tp, MII_TG3_CTRL, new_adv);
1998         }
1999
2000         if (tp->link_config.autoneg == AUTONEG_DISABLE &&
2001             tp->link_config.speed != SPEED_INVALID) {
2002                 u32 bmcr, orig_bmcr;
2003
2004                 tp->link_config.active_speed = tp->link_config.speed;
2005                 tp->link_config.active_duplex = tp->link_config.duplex;
2006
2007                 bmcr = 0;
2008                 switch (tp->link_config.speed) {
2009                 default:
2010                 case SPEED_10:
2011                         break;
2012
2013                 case SPEED_100:
2014                         bmcr |= BMCR_SPEED100;
2015                         break;
2016
2017                 case SPEED_1000:
2018                         bmcr |= TG3_BMCR_SPEED1000;
2019                         break;
2020                 };
2021
2022                 if (tp->link_config.duplex == DUPLEX_FULL)
2023                         bmcr |= BMCR_FULLDPLX;
2024
2025                 if (!tg3_readphy(tp, MII_BMCR, &orig_bmcr) &&
2026                     (bmcr != orig_bmcr)) {
2027                         tg3_writephy(tp, MII_BMCR, BMCR_LOOPBACK);
2028                         for (i = 0; i < 1500; i++) {
2029                                 u32 tmp;
2030
2031                                 udelay(10);
2032                                 if (tg3_readphy(tp, MII_BMSR, &tmp) ||
2033                                     tg3_readphy(tp, MII_BMSR, &tmp))
2034                                         continue;
2035                                 if (!(tmp & BMSR_LSTATUS)) {
2036                                         udelay(40);
2037                                         break;
2038                                 }
2039                         }
2040                         tg3_writephy(tp, MII_BMCR, bmcr);
2041                         udelay(40);
2042                 }
2043         } else {
2044                 tg3_writephy(tp, MII_BMCR,
2045                              BMCR_ANENABLE | BMCR_ANRESTART);
2046         }
2047 }
2048
2049 static int tg3_init_5401phy_dsp(struct tg3 *tp)
2050 {
2051         int err;
2052
2053         /* Turn off tap power management. */
2054         /* Set Extended packet length bit */
2055         err  = tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x4c20);
2056
2057         err |= tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x0012);
2058         err |= tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x1804);
2059
2060         err |= tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x0013);
2061         err |= tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x1204);
2062
2063         err |= tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8006);
2064         err |= tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0132);
2065
2066         err |= tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8006);
2067         err |= tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0232);
2068
2069         err |= tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x201f);
2070         err |= tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0a20);
2071
2072         udelay(40);
2073
2074         return err;
2075 }
2076
2077 static int tg3_copper_is_advertising_all(struct tg3 *tp, u32 mask)
2078 {
2079         u32 adv_reg, all_mask = 0;
2080
2081         if (mask & ADVERTISED_10baseT_Half)
2082                 all_mask |= ADVERTISE_10HALF;
2083         if (mask & ADVERTISED_10baseT_Full)
2084                 all_mask |= ADVERTISE_10FULL;
2085         if (mask & ADVERTISED_100baseT_Half)
2086                 all_mask |= ADVERTISE_100HALF;
2087         if (mask & ADVERTISED_100baseT_Full)
2088                 all_mask |= ADVERTISE_100FULL;
2089
2090         if (tg3_readphy(tp, MII_ADVERTISE, &adv_reg))
2091                 return 0;
2092
2093         if ((adv_reg & all_mask) != all_mask)
2094                 return 0;
2095         if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY)) {
2096                 u32 tg3_ctrl;
2097
2098                 all_mask = 0;
2099                 if (mask & ADVERTISED_1000baseT_Half)
2100                         all_mask |= ADVERTISE_1000HALF;
2101                 if (mask & ADVERTISED_1000baseT_Full)
2102                         all_mask |= ADVERTISE_1000FULL;
2103
2104                 if (tg3_readphy(tp, MII_TG3_CTRL, &tg3_ctrl))
2105                         return 0;
2106
2107                 if ((tg3_ctrl & all_mask) != all_mask)
2108                         return 0;
2109         }
2110         return 1;
2111 }
2112
2113 static int tg3_adv_1000T_flowctrl_ok(struct tg3 *tp, u32 *lcladv, u32 *rmtadv)
2114 {
2115         u32 curadv, reqadv;
2116
2117         if (tg3_readphy(tp, MII_ADVERTISE, lcladv))
2118                 return 1;
2119
2120         curadv = *lcladv & (ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM);
2121         reqadv = tg3_advert_flowctrl_1000T(tp->link_config.flowctrl);
2122
2123         if (tp->link_config.active_duplex == DUPLEX_FULL) {
2124                 if (curadv != reqadv)
2125                         return 0;
2126
2127                 if (tp->tg3_flags & TG3_FLAG_PAUSE_AUTONEG)
2128                         tg3_readphy(tp, MII_LPA, rmtadv);
2129         } else {
2130                 /* Reprogram the advertisement register, even if it
2131                  * does not affect the current link.  If the link
2132                  * gets renegotiated in the future, we can save an
2133                  * additional renegotiation cycle by advertising
2134                  * it correctly in the first place.
2135                  */
2136                 if (curadv != reqadv) {
2137                         *lcladv &= ~(ADVERTISE_PAUSE_CAP |
2138                                      ADVERTISE_PAUSE_ASYM);
2139                         tg3_writephy(tp, MII_ADVERTISE, *lcladv | reqadv);
2140                 }
2141         }
2142
2143         return 1;
2144 }
2145
2146 static int tg3_setup_copper_phy(struct tg3 *tp, int force_reset)
2147 {
2148         int current_link_up;
2149         u32 bmsr, dummy;
2150         u32 lcl_adv, rmt_adv;
2151         u16 current_speed;
2152         u8 current_duplex;
2153         int i, err;
2154
2155         tw32(MAC_EVENT, 0);
2156
2157         tw32_f(MAC_STATUS,
2158              (MAC_STATUS_SYNC_CHANGED |
2159               MAC_STATUS_CFG_CHANGED |
2160               MAC_STATUS_MI_COMPLETION |
2161               MAC_STATUS_LNKSTATE_CHANGED));
2162         udelay(40);
2163
2164         if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
2165                 tw32_f(MAC_MI_MODE,
2166                      (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
2167                 udelay(80);
2168         }
2169
2170         tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x02);
2171
2172         /* Some third-party PHYs need to be reset on link going
2173          * down.
2174          */
2175         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
2176              GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
2177              GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) &&
2178             netif_carrier_ok(tp->dev)) {
2179                 tg3_readphy(tp, MII_BMSR, &bmsr);
2180                 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
2181                     !(bmsr & BMSR_LSTATUS))
2182                         force_reset = 1;
2183         }
2184         if (force_reset)
2185                 tg3_phy_reset(tp);
2186
2187         if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5401) {
2188                 tg3_readphy(tp, MII_BMSR, &bmsr);
2189                 if (tg3_readphy(tp, MII_BMSR, &bmsr) ||
2190                     !(tp->tg3_flags & TG3_FLAG_INIT_COMPLETE))
2191                         bmsr = 0;
2192
2193                 if (!(bmsr & BMSR_LSTATUS)) {
2194                         err = tg3_init_5401phy_dsp(tp);
2195                         if (err)
2196                                 return err;
2197
2198                         tg3_readphy(tp, MII_BMSR, &bmsr);
2199                         for (i = 0; i < 1000; i++) {
2200                                 udelay(10);
2201                                 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
2202                                     (bmsr & BMSR_LSTATUS)) {
2203                                         udelay(40);
2204                                         break;
2205                                 }
2206                         }
2207
2208                         if ((tp->phy_id & PHY_ID_REV_MASK) == PHY_REV_BCM5401_B0 &&
2209                             !(bmsr & BMSR_LSTATUS) &&
2210                             tp->link_config.active_speed == SPEED_1000) {
2211                                 err = tg3_phy_reset(tp);
2212                                 if (!err)
2213                                         err = tg3_init_5401phy_dsp(tp);
2214                                 if (err)
2215                                         return err;
2216                         }
2217                 }
2218         } else if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
2219                    tp->pci_chip_rev_id == CHIPREV_ID_5701_B0) {
2220                 /* 5701 {A0,B0} CRC bug workaround */
2221                 tg3_writephy(tp, 0x15, 0x0a75);
2222                 tg3_writephy(tp, 0x1c, 0x8c68);
2223                 tg3_writephy(tp, 0x1c, 0x8d68);
2224                 tg3_writephy(tp, 0x1c, 0x8c68);
2225         }
2226
2227         /* Clear pending interrupts... */
2228         tg3_readphy(tp, MII_TG3_ISTAT, &dummy);
2229         tg3_readphy(tp, MII_TG3_ISTAT, &dummy);
2230
2231         if (tp->tg3_flags & TG3_FLAG_USE_MI_INTERRUPT)
2232                 tg3_writephy(tp, MII_TG3_IMASK, ~MII_TG3_INT_LINKCHG);
2233         else if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5906)
2234                 tg3_writephy(tp, MII_TG3_IMASK, ~0);
2235
2236         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
2237             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
2238                 if (tp->led_ctrl == LED_CTRL_MODE_PHY_1)
2239                         tg3_writephy(tp, MII_TG3_EXT_CTRL,
2240                                      MII_TG3_EXT_CTRL_LNK3_LED_MODE);
2241                 else
2242                         tg3_writephy(tp, MII_TG3_EXT_CTRL, 0);
2243         }
2244
2245         current_link_up = 0;
2246         current_speed = SPEED_INVALID;
2247         current_duplex = DUPLEX_INVALID;
2248
2249         if (tp->tg3_flags2 & TG3_FLG2_CAPACITIVE_COUPLING) {
2250                 u32 val;
2251
2252                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x4007);
2253                 tg3_readphy(tp, MII_TG3_AUX_CTRL, &val);
2254                 if (!(val & (1 << 10))) {
2255                         val |= (1 << 10);
2256                         tg3_writephy(tp, MII_TG3_AUX_CTRL, val);
2257                         goto relink;
2258                 }
2259         }
2260
2261         bmsr = 0;
2262         for (i = 0; i < 100; i++) {
2263                 tg3_readphy(tp, MII_BMSR, &bmsr);
2264                 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
2265                     (bmsr & BMSR_LSTATUS))
2266                         break;
2267                 udelay(40);
2268         }
2269
2270         if (bmsr & BMSR_LSTATUS) {
2271                 u32 aux_stat, bmcr;
2272
2273                 tg3_readphy(tp, MII_TG3_AUX_STAT, &aux_stat);
2274                 for (i = 0; i < 2000; i++) {
2275                         udelay(10);
2276                         if (!tg3_readphy(tp, MII_TG3_AUX_STAT, &aux_stat) &&
2277                             aux_stat)
2278                                 break;
2279                 }
2280
2281                 tg3_aux_stat_to_speed_duplex(tp, aux_stat,
2282                                              &current_speed,
2283                                              &current_duplex);
2284
2285                 bmcr = 0;
2286                 for (i = 0; i < 200; i++) {
2287                         tg3_readphy(tp, MII_BMCR, &bmcr);
2288                         if (tg3_readphy(tp, MII_BMCR, &bmcr))
2289                                 continue;
2290                         if (bmcr && bmcr != 0x7fff)
2291                                 break;
2292                         udelay(10);
2293                 }
2294
2295                 lcl_adv = 0;
2296                 rmt_adv = 0;
2297
2298                 tp->link_config.active_speed = current_speed;
2299                 tp->link_config.active_duplex = current_duplex;
2300
2301                 if (tp->link_config.autoneg == AUTONEG_ENABLE) {
2302                         if ((bmcr & BMCR_ANENABLE) &&
2303                             tg3_copper_is_advertising_all(tp,
2304                                                 tp->link_config.advertising)) {
2305                                 if (tg3_adv_1000T_flowctrl_ok(tp, &lcl_adv,
2306                                                                   &rmt_adv))
2307                                         current_link_up = 1;
2308                         }
2309                 } else {
2310                         if (!(bmcr & BMCR_ANENABLE) &&
2311                             tp->link_config.speed == current_speed &&
2312                             tp->link_config.duplex == current_duplex &&
2313                             tp->link_config.flowctrl ==
2314                             tp->link_config.active_flowctrl) {
2315                                 current_link_up = 1;
2316                         }
2317                 }
2318
2319                 if (current_link_up == 1 &&
2320                     tp->link_config.active_duplex == DUPLEX_FULL)
2321                         tg3_setup_flow_control(tp, lcl_adv, rmt_adv);
2322         }
2323
2324 relink:
2325         if (current_link_up == 0 || tp->link_config.phy_is_low_power) {
2326                 u32 tmp;
2327
2328                 tg3_phy_copper_begin(tp);
2329
2330                 tg3_readphy(tp, MII_BMSR, &tmp);
2331                 if (!tg3_readphy(tp, MII_BMSR, &tmp) &&
2332                     (tmp & BMSR_LSTATUS))
2333                         current_link_up = 1;
2334         }
2335
2336         tp->mac_mode &= ~MAC_MODE_PORT_MODE_MASK;
2337         if (current_link_up == 1) {
2338                 if (tp->link_config.active_speed == SPEED_100 ||
2339                     tp->link_config.active_speed == SPEED_10)
2340                         tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
2341                 else
2342                         tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
2343         } else
2344                 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
2345
2346         tp->mac_mode &= ~MAC_MODE_HALF_DUPLEX;
2347         if (tp->link_config.active_duplex == DUPLEX_HALF)
2348                 tp->mac_mode |= MAC_MODE_HALF_DUPLEX;
2349
2350         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700) {
2351                 if (current_link_up == 1 &&
2352                     tg3_5700_link_polarity(tp, tp->link_config.active_speed))
2353                         tp->mac_mode |= MAC_MODE_LINK_POLARITY;
2354                 else
2355                         tp->mac_mode &= ~MAC_MODE_LINK_POLARITY;
2356         }
2357
2358         /* ??? Without this setting Netgear GA302T PHY does not
2359          * ??? send/receive packets...
2360          */
2361         if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5411 &&
2362             tp->pci_chip_rev_id == CHIPREV_ID_5700_ALTIMA) {
2363                 tp->mi_mode |= MAC_MI_MODE_AUTO_POLL;
2364                 tw32_f(MAC_MI_MODE, tp->mi_mode);
2365                 udelay(80);
2366         }
2367
2368         tw32_f(MAC_MODE, tp->mac_mode);
2369         udelay(40);
2370
2371         if (tp->tg3_flags & TG3_FLAG_USE_LINKCHG_REG) {
2372                 /* Polled via timer. */
2373                 tw32_f(MAC_EVENT, 0);
2374         } else {
2375                 tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
2376         }
2377         udelay(40);
2378
2379         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 &&
2380             current_link_up == 1 &&
2381             tp->link_config.active_speed == SPEED_1000 &&
2382             ((tp->tg3_flags & TG3_FLAG_PCIX_MODE) ||
2383              (tp->tg3_flags & TG3_FLAG_PCI_HIGH_SPEED))) {
2384                 udelay(120);
2385                 tw32_f(MAC_STATUS,
2386                      (MAC_STATUS_SYNC_CHANGED |
2387                       MAC_STATUS_CFG_CHANGED));
2388                 udelay(40);
2389                 tg3_write_mem(tp,
2390                               NIC_SRAM_FIRMWARE_MBOX,
2391                               NIC_SRAM_FIRMWARE_MBOX_MAGIC2);
2392         }
2393
2394         if (current_link_up != netif_carrier_ok(tp->dev)) {
2395                 if (current_link_up)
2396                         netif_carrier_on(tp->dev);
2397                 else
2398                         netif_carrier_off(tp->dev);
2399                 tg3_link_report(tp);
2400         }
2401
2402         return 0;
2403 }
2404
2405 struct tg3_fiber_aneginfo {
2406         int state;
2407 #define ANEG_STATE_UNKNOWN              0
2408 #define ANEG_STATE_AN_ENABLE            1
2409 #define ANEG_STATE_RESTART_INIT         2
2410 #define ANEG_STATE_RESTART              3
2411 #define ANEG_STATE_DISABLE_LINK_OK      4
2412 #define ANEG_STATE_ABILITY_DETECT_INIT  5
2413 #define ANEG_STATE_ABILITY_DETECT       6
2414 #define ANEG_STATE_ACK_DETECT_INIT      7
2415 #define ANEG_STATE_ACK_DETECT           8
2416 #define ANEG_STATE_COMPLETE_ACK_INIT    9
2417 #define ANEG_STATE_COMPLETE_ACK         10
2418 #define ANEG_STATE_IDLE_DETECT_INIT     11
2419 #define ANEG_STATE_IDLE_DETECT          12
2420 #define ANEG_STATE_LINK_OK              13
2421 #define ANEG_STATE_NEXT_PAGE_WAIT_INIT  14
2422 #define ANEG_STATE_NEXT_PAGE_WAIT       15
2423
2424         u32 flags;
2425 #define MR_AN_ENABLE            0x00000001
2426 #define MR_RESTART_AN           0x00000002
2427 #define MR_AN_COMPLETE          0x00000004
2428 #define MR_PAGE_RX              0x00000008
2429 #define MR_NP_LOADED            0x00000010
2430 #define MR_TOGGLE_TX            0x00000020
2431 #define MR_LP_ADV_FULL_DUPLEX   0x00000040
2432 #define MR_LP_ADV_HALF_DUPLEX   0x00000080
2433 #define MR_LP_ADV_SYM_PAUSE     0x00000100
2434 #define MR_LP_ADV_ASYM_PAUSE    0x00000200
2435 #define MR_LP_ADV_REMOTE_FAULT1 0x00000400
2436 #define MR_LP_ADV_REMOTE_FAULT2 0x00000800
2437 #define MR_LP_ADV_NEXT_PAGE     0x00001000
2438 #define MR_TOGGLE_RX            0x00002000
2439 #define MR_NP_RX                0x00004000
2440
2441 #define MR_LINK_OK              0x80000000
2442
2443         unsigned long link_time, cur_time;
2444
2445         u32 ability_match_cfg;
2446         int ability_match_count;
2447
2448         char ability_match, idle_match, ack_match;
2449
2450         u32 txconfig, rxconfig;
2451 #define ANEG_CFG_NP             0x00000080
2452 #define ANEG_CFG_ACK            0x00000040
2453 #define ANEG_CFG_RF2            0x00000020
2454 #define ANEG_CFG_RF1            0x00000010
2455 #define ANEG_CFG_PS2            0x00000001
2456 #define ANEG_CFG_PS1            0x00008000
2457 #define ANEG_CFG_HD             0x00004000
2458 #define ANEG_CFG_FD             0x00002000
2459 #define ANEG_CFG_INVAL          0x00001f06
2460
2461 };
2462 #define ANEG_OK         0
2463 #define ANEG_DONE       1
2464 #define ANEG_TIMER_ENAB 2
2465 #define ANEG_FAILED     -1
2466
2467 #define ANEG_STATE_SETTLE_TIME  10000
2468
2469 static int tg3_fiber_aneg_smachine(struct tg3 *tp,
2470                                    struct tg3_fiber_aneginfo *ap)
2471 {
2472         u16 flowctrl;
2473         unsigned long delta;
2474         u32 rx_cfg_reg;
2475         int ret;
2476
2477         if (ap->state == ANEG_STATE_UNKNOWN) {
2478                 ap->rxconfig = 0;
2479                 ap->link_time = 0;
2480                 ap->cur_time = 0;
2481                 ap->ability_match_cfg = 0;
2482                 ap->ability_match_count = 0;
2483                 ap->ability_match = 0;
2484                 ap->idle_match = 0;
2485                 ap->ack_match = 0;
2486         }
2487         ap->cur_time++;
2488
2489         if (tr32(MAC_STATUS) & MAC_STATUS_RCVD_CFG) {
2490                 rx_cfg_reg = tr32(MAC_RX_AUTO_NEG);
2491
2492                 if (rx_cfg_reg != ap->ability_match_cfg) {
2493                         ap->ability_match_cfg = rx_cfg_reg;
2494                         ap->ability_match = 0;
2495                         ap->ability_match_count = 0;
2496                 } else {
2497                         if (++ap->ability_match_count > 1) {
2498                                 ap->ability_match = 1;
2499                                 ap->ability_match_cfg = rx_cfg_reg;
2500                         }
2501                 }
2502                 if (rx_cfg_reg & ANEG_CFG_ACK)
2503                         ap->ack_match = 1;
2504                 else
2505                         ap->ack_match = 0;
2506
2507                 ap->idle_match = 0;
2508         } else {
2509                 ap->idle_match = 1;
2510                 ap->ability_match_cfg = 0;
2511                 ap->ability_match_count = 0;
2512                 ap->ability_match = 0;
2513                 ap->ack_match = 0;
2514
2515                 rx_cfg_reg = 0;
2516         }
2517
2518         ap->rxconfig = rx_cfg_reg;
2519         ret = ANEG_OK;
2520
2521         switch(ap->state) {
2522         case ANEG_STATE_UNKNOWN:
2523                 if (ap->flags & (MR_AN_ENABLE | MR_RESTART_AN))
2524                         ap->state = ANEG_STATE_AN_ENABLE;
2525
2526                 /* fallthru */
2527         case ANEG_STATE_AN_ENABLE:
2528                 ap->flags &= ~(MR_AN_COMPLETE | MR_PAGE_RX);
2529                 if (ap->flags & MR_AN_ENABLE) {
2530                         ap->link_time = 0;
2531                         ap->cur_time = 0;
2532                         ap->ability_match_cfg = 0;
2533                         ap->ability_match_count = 0;
2534                         ap->ability_match = 0;
2535                         ap->idle_match = 0;
2536                         ap->ack_match = 0;
2537
2538                         ap->state = ANEG_STATE_RESTART_INIT;
2539                 } else {
2540                         ap->state = ANEG_STATE_DISABLE_LINK_OK;
2541                 }
2542                 break;
2543
2544         case ANEG_STATE_RESTART_INIT:
2545                 ap->link_time = ap->cur_time;
2546                 ap->flags &= ~(MR_NP_LOADED);
2547                 ap->txconfig = 0;
2548                 tw32(MAC_TX_AUTO_NEG, 0);
2549                 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
2550                 tw32_f(MAC_MODE, tp->mac_mode);
2551                 udelay(40);
2552
2553                 ret = ANEG_TIMER_ENAB;
2554                 ap->state = ANEG_STATE_RESTART;
2555
2556                 /* fallthru */
2557         case ANEG_STATE_RESTART:
2558                 delta = ap->cur_time - ap->link_time;
2559                 if (delta > ANEG_STATE_SETTLE_TIME) {
2560                         ap->state = ANEG_STATE_ABILITY_DETECT_INIT;
2561                 } else {
2562                         ret = ANEG_TIMER_ENAB;
2563                 }
2564                 break;
2565
2566         case ANEG_STATE_DISABLE_LINK_OK:
2567                 ret = ANEG_DONE;
2568                 break;
2569
2570         case ANEG_STATE_ABILITY_DETECT_INIT:
2571                 ap->flags &= ~(MR_TOGGLE_TX);
2572                 ap->txconfig = ANEG_CFG_FD;
2573                 flowctrl = tg3_advert_flowctrl_1000X(tp->link_config.flowctrl);
2574                 if (flowctrl & ADVERTISE_1000XPAUSE)
2575                         ap->txconfig |= ANEG_CFG_PS1;
2576                 if (flowctrl & ADVERTISE_1000XPSE_ASYM)
2577                         ap->txconfig |= ANEG_CFG_PS2;
2578                 tw32(MAC_TX_AUTO_NEG, ap->txconfig);
2579                 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
2580                 tw32_f(MAC_MODE, tp->mac_mode);
2581                 udelay(40);
2582
2583                 ap->state = ANEG_STATE_ABILITY_DETECT;
2584                 break;
2585
2586         case ANEG_STATE_ABILITY_DETECT:
2587                 if (ap->ability_match != 0 && ap->rxconfig != 0) {
2588                         ap->state = ANEG_STATE_ACK_DETECT_INIT;
2589                 }
2590                 break;
2591
2592         case ANEG_STATE_ACK_DETECT_INIT:
2593                 ap->txconfig |= ANEG_CFG_ACK;
2594                 tw32(MAC_TX_AUTO_NEG, ap->txconfig);
2595                 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
2596                 tw32_f(MAC_MODE, tp->mac_mode);
2597                 udelay(40);
2598
2599                 ap->state = ANEG_STATE_ACK_DETECT;
2600
2601                 /* fallthru */
2602         case ANEG_STATE_ACK_DETECT:
2603                 if (ap->ack_match != 0) {
2604                         if ((ap->rxconfig & ~ANEG_CFG_ACK) ==
2605                             (ap->ability_match_cfg & ~ANEG_CFG_ACK)) {
2606                                 ap->state = ANEG_STATE_COMPLETE_ACK_INIT;
2607                         } else {
2608                                 ap->state = ANEG_STATE_AN_ENABLE;
2609                         }
2610                 } else if (ap->ability_match != 0 &&
2611                            ap->rxconfig == 0) {
2612                         ap->state = ANEG_STATE_AN_ENABLE;
2613                 }
2614                 break;
2615
2616         case ANEG_STATE_COMPLETE_ACK_INIT:
2617                 if (ap->rxconfig & ANEG_CFG_INVAL) {
2618                         ret = ANEG_FAILED;
2619                         break;
2620                 }
2621                 ap->flags &= ~(MR_LP_ADV_FULL_DUPLEX |
2622                                MR_LP_ADV_HALF_DUPLEX |
2623                                MR_LP_ADV_SYM_PAUSE |
2624                                MR_LP_ADV_ASYM_PAUSE |
2625                                MR_LP_ADV_REMOTE_FAULT1 |
2626                                MR_LP_ADV_REMOTE_FAULT2 |
2627                                MR_LP_ADV_NEXT_PAGE |
2628                                MR_TOGGLE_RX |
2629                                MR_NP_RX);
2630                 if (ap->rxconfig & ANEG_CFG_FD)
2631                         ap->flags |= MR_LP_ADV_FULL_DUPLEX;
2632                 if (ap->rxconfig & ANEG_CFG_HD)
2633                         ap->flags |= MR_LP_ADV_HALF_DUPLEX;
2634                 if (ap->rxconfig & ANEG_CFG_PS1)
2635                         ap->flags |= MR_LP_ADV_SYM_PAUSE;
2636                 if (ap->rxconfig & ANEG_CFG_PS2)
2637                         ap->flags |= MR_LP_ADV_ASYM_PAUSE;
2638                 if (ap->rxconfig & ANEG_CFG_RF1)
2639                         ap->flags |= MR_LP_ADV_REMOTE_FAULT1;
2640                 if (ap->rxconfig & ANEG_CFG_RF2)
2641                         ap->flags |= MR_LP_ADV_REMOTE_FAULT2;
2642                 if (ap->rxconfig & ANEG_CFG_NP)
2643                         ap->flags |= MR_LP_ADV_NEXT_PAGE;
2644
2645                 ap->link_time = ap->cur_time;
2646
2647                 ap->flags ^= (MR_TOGGLE_TX);
2648                 if (ap->rxconfig & 0x0008)
2649                         ap->flags |= MR_TOGGLE_RX;
2650                 if (ap->rxconfig & ANEG_CFG_NP)
2651                         ap->flags |= MR_NP_RX;
2652                 ap->flags |= MR_PAGE_RX;
2653
2654                 ap->state = ANEG_STATE_COMPLETE_ACK;
2655                 ret = ANEG_TIMER_ENAB;
2656                 break;
2657
2658         case ANEG_STATE_COMPLETE_ACK:
2659                 if (ap->ability_match != 0 &&
2660                     ap->rxconfig == 0) {
2661                         ap->state = ANEG_STATE_AN_ENABLE;
2662                         break;
2663                 }
2664                 delta = ap->cur_time - ap->link_time;
2665                 if (delta > ANEG_STATE_SETTLE_TIME) {
2666                         if (!(ap->flags & (MR_LP_ADV_NEXT_PAGE))) {
2667                                 ap->state = ANEG_STATE_IDLE_DETECT_INIT;
2668                         } else {
2669                                 if ((ap->txconfig & ANEG_CFG_NP) == 0 &&
2670                                     !(ap->flags & MR_NP_RX)) {
2671                                         ap->state = ANEG_STATE_IDLE_DETECT_INIT;
2672                                 } else {
2673                                         ret = ANEG_FAILED;
2674                                 }
2675                         }
2676                 }
2677                 break;
2678
2679         case ANEG_STATE_IDLE_DETECT_INIT:
2680                 ap->link_time = ap->cur_time;
2681                 tp->mac_mode &= ~MAC_MODE_SEND_CONFIGS;
2682                 tw32_f(MAC_MODE, tp->mac_mode);
2683                 udelay(40);
2684
2685                 ap->state = ANEG_STATE_IDLE_DETECT;
2686                 ret = ANEG_TIMER_ENAB;
2687                 break;
2688
2689         case ANEG_STATE_IDLE_DETECT:
2690                 if (ap->ability_match != 0 &&
2691                     ap->rxconfig == 0) {
2692                         ap->state = ANEG_STATE_AN_ENABLE;
2693                         break;
2694                 }
2695                 delta = ap->cur_time - ap->link_time;
2696                 if (delta > ANEG_STATE_SETTLE_TIME) {
2697                         /* XXX another gem from the Broadcom driver :( */
2698                         ap->state = ANEG_STATE_LINK_OK;
2699                 }
2700                 break;
2701
2702         case ANEG_STATE_LINK_OK:
2703                 ap->flags |= (MR_AN_COMPLETE | MR_LINK_OK);
2704                 ret = ANEG_DONE;
2705                 break;
2706
2707         case ANEG_STATE_NEXT_PAGE_WAIT_INIT:
2708                 /* ??? unimplemented */
2709                 break;
2710
2711         case ANEG_STATE_NEXT_PAGE_WAIT:
2712                 /* ??? unimplemented */
2713                 break;
2714
2715         default:
2716                 ret = ANEG_FAILED;
2717                 break;
2718         };
2719
2720         return ret;
2721 }
2722
2723 static int fiber_autoneg(struct tg3 *tp, u32 *txflags, u32 *rxflags)
2724 {
2725         int res = 0;
2726         struct tg3_fiber_aneginfo aninfo;
2727         int status = ANEG_FAILED;
2728         unsigned int tick;
2729         u32 tmp;
2730
2731         tw32_f(MAC_TX_AUTO_NEG, 0);
2732
2733         tmp = tp->mac_mode & ~MAC_MODE_PORT_MODE_MASK;
2734         tw32_f(MAC_MODE, tmp | MAC_MODE_PORT_MODE_GMII);
2735         udelay(40);
2736
2737         tw32_f(MAC_MODE, tp->mac_mode | MAC_MODE_SEND_CONFIGS);
2738         udelay(40);
2739
2740         memset(&aninfo, 0, sizeof(aninfo));
2741         aninfo.flags |= MR_AN_ENABLE;
2742         aninfo.state = ANEG_STATE_UNKNOWN;
2743         aninfo.cur_time = 0;
2744         tick = 0;
2745         while (++tick < 195000) {
2746                 status = tg3_fiber_aneg_smachine(tp, &aninfo);
2747                 if (status == ANEG_DONE || status == ANEG_FAILED)
2748                         break;
2749
2750                 udelay(1);
2751         }
2752
2753         tp->mac_mode &= ~MAC_MODE_SEND_CONFIGS;
2754         tw32_f(MAC_MODE, tp->mac_mode);
2755         udelay(40);
2756
2757         *txflags = aninfo.txconfig;
2758         *rxflags = aninfo.flags;
2759
2760         if (status == ANEG_DONE &&
2761             (aninfo.flags & (MR_AN_COMPLETE | MR_LINK_OK |
2762                              MR_LP_ADV_FULL_DUPLEX)))
2763                 res = 1;
2764
2765         return res;
2766 }
2767
2768 static void tg3_init_bcm8002(struct tg3 *tp)
2769 {
2770         u32 mac_status = tr32(MAC_STATUS);
2771         int i;
2772
2773         /* Reset when initting first time or we have a link. */
2774         if ((tp->tg3_flags & TG3_FLAG_INIT_COMPLETE) &&
2775             !(mac_status & MAC_STATUS_PCS_SYNCED))
2776                 return;
2777
2778         /* Set PLL lock range. */
2779         tg3_writephy(tp, 0x16, 0x8007);
2780
2781         /* SW reset */
2782         tg3_writephy(tp, MII_BMCR, BMCR_RESET);
2783
2784         /* Wait for reset to complete. */
2785         /* XXX schedule_timeout() ... */
2786         for (i = 0; i < 500; i++)
2787                 udelay(10);
2788
2789         /* Config mode; select PMA/Ch 1 regs. */
2790         tg3_writephy(tp, 0x10, 0x8411);
2791
2792         /* Enable auto-lock and comdet, select txclk for tx. */
2793         tg3_writephy(tp, 0x11, 0x0a10);
2794
2795         tg3_writephy(tp, 0x18, 0x00a0);
2796         tg3_writephy(tp, 0x16, 0x41ff);
2797
2798         /* Assert and deassert POR. */
2799         tg3_writephy(tp, 0x13, 0x0400);
2800         udelay(40);
2801         tg3_writephy(tp, 0x13, 0x0000);
2802
2803         tg3_writephy(tp, 0x11, 0x0a50);
2804         udelay(40);
2805         tg3_writephy(tp, 0x11, 0x0a10);
2806
2807         /* Wait for signal to stabilize */
2808         /* XXX schedule_timeout() ... */
2809         for (i = 0; i < 15000; i++)
2810                 udelay(10);
2811
2812         /* Deselect the channel register so we can read the PHYID
2813          * later.
2814          */
2815         tg3_writephy(tp, 0x10, 0x8011);
2816 }
2817
2818 static int tg3_setup_fiber_hw_autoneg(struct tg3 *tp, u32 mac_status)
2819 {
2820         u16 flowctrl;
2821         u32 sg_dig_ctrl, sg_dig_status;
2822         u32 serdes_cfg, expected_sg_dig_ctrl;
2823         int workaround, port_a;
2824         int current_link_up;
2825
2826         serdes_cfg = 0;
2827         expected_sg_dig_ctrl = 0;
2828         workaround = 0;
2829         port_a = 1;
2830         current_link_up = 0;
2831
2832         if (tp->pci_chip_rev_id != CHIPREV_ID_5704_A0 &&
2833             tp->pci_chip_rev_id != CHIPREV_ID_5704_A1) {
2834                 workaround = 1;
2835                 if (tr32(TG3PCI_DUAL_MAC_CTRL) & DUAL_MAC_CTRL_ID)
2836                         port_a = 0;
2837
2838                 /* preserve bits 0-11,13,14 for signal pre-emphasis */
2839                 /* preserve bits 20-23 for voltage regulator */
2840                 serdes_cfg = tr32(MAC_SERDES_CFG) & 0x00f06fff;
2841         }
2842
2843         sg_dig_ctrl = tr32(SG_DIG_CTRL);
2844
2845         if (tp->link_config.autoneg != AUTONEG_ENABLE) {
2846                 if (sg_dig_ctrl & SG_DIG_USING_HW_AUTONEG) {
2847                         if (workaround) {
2848                                 u32 val = serdes_cfg;
2849
2850                                 if (port_a)
2851                                         val |= 0xc010000;
2852                                 else
2853                                         val |= 0x4010000;
2854                                 tw32_f(MAC_SERDES_CFG, val);
2855                         }
2856
2857                         tw32_f(SG_DIG_CTRL, SG_DIG_COMMON_SETUP);
2858                 }
2859                 if (mac_status & MAC_STATUS_PCS_SYNCED) {
2860                         tg3_setup_flow_control(tp, 0, 0);
2861                         current_link_up = 1;
2862                 }
2863                 goto out;
2864         }
2865
2866         /* Want auto-negotiation.  */
2867         expected_sg_dig_ctrl = SG_DIG_USING_HW_AUTONEG | SG_DIG_COMMON_SETUP;
2868
2869         flowctrl = tg3_advert_flowctrl_1000X(tp->link_config.flowctrl);
2870         if (flowctrl & ADVERTISE_1000XPAUSE)
2871                 expected_sg_dig_ctrl |= SG_DIG_PAUSE_CAP;
2872         if (flowctrl & ADVERTISE_1000XPSE_ASYM)
2873                 expected_sg_dig_ctrl |= SG_DIG_ASYM_PAUSE;
2874
2875         if (sg_dig_ctrl != expected_sg_dig_ctrl) {
2876                 if ((tp->tg3_flags2 & TG3_FLG2_PARALLEL_DETECT) &&
2877                     tp->serdes_counter &&
2878                     ((mac_status & (MAC_STATUS_PCS_SYNCED |
2879                                     MAC_STATUS_RCVD_CFG)) ==
2880                      MAC_STATUS_PCS_SYNCED)) {
2881                         tp->serdes_counter--;
2882                         current_link_up = 1;
2883                         goto out;
2884                 }
2885 restart_autoneg:
2886                 if (workaround)
2887                         tw32_f(MAC_SERDES_CFG, serdes_cfg | 0xc011000);
2888                 tw32_f(SG_DIG_CTRL, expected_sg_dig_ctrl | SG_DIG_SOFT_RESET);
2889                 udelay(5);
2890                 tw32_f(SG_DIG_CTRL, expected_sg_dig_ctrl);
2891
2892                 tp->serdes_counter = SERDES_AN_TIMEOUT_5704S;
2893                 tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
2894         } else if (mac_status & (MAC_STATUS_PCS_SYNCED |
2895                                  MAC_STATUS_SIGNAL_DET)) {
2896                 sg_dig_status = tr32(SG_DIG_STATUS);
2897                 mac_status = tr32(MAC_STATUS);
2898
2899                 if ((sg_dig_status & SG_DIG_AUTONEG_COMPLETE) &&
2900                     (mac_status & MAC_STATUS_PCS_SYNCED)) {
2901                         u32 local_adv = 0, remote_adv = 0;
2902
2903                         if (sg_dig_ctrl & SG_DIG_PAUSE_CAP)
2904                                 local_adv |= ADVERTISE_1000XPAUSE;
2905                         if (sg_dig_ctrl & SG_DIG_ASYM_PAUSE)
2906                                 local_adv |= ADVERTISE_1000XPSE_ASYM;
2907
2908                         if (sg_dig_status & SG_DIG_PARTNER_PAUSE_CAPABLE)
2909                                 remote_adv |= LPA_1000XPAUSE;
2910                         if (sg_dig_status & SG_DIG_PARTNER_ASYM_PAUSE)
2911                                 remote_adv |= LPA_1000XPAUSE_ASYM;
2912
2913                         tg3_setup_flow_control(tp, local_adv, remote_adv);
2914                         current_link_up = 1;
2915                         tp->serdes_counter = 0;
2916                         tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
2917                 } else if (!(sg_dig_status & SG_DIG_AUTONEG_COMPLETE)) {
2918                         if (tp->serdes_counter)
2919                                 tp->serdes_counter--;
2920                         else {
2921                                 if (workaround) {
2922                                         u32 val = serdes_cfg;
2923
2924                                         if (port_a)
2925                                                 val |= 0xc010000;
2926                                         else
2927                                                 val |= 0x4010000;
2928
2929                                         tw32_f(MAC_SERDES_CFG, val);
2930                                 }
2931
2932                                 tw32_f(SG_DIG_CTRL, SG_DIG_COMMON_SETUP);
2933                                 udelay(40);
2934
2935                                 /* Link parallel detection - link is up */
2936                                 /* only if we have PCS_SYNC and not */
2937                                 /* receiving config code words */
2938                                 mac_status = tr32(MAC_STATUS);
2939                                 if ((mac_status & MAC_STATUS_PCS_SYNCED) &&
2940                                     !(mac_status & MAC_STATUS_RCVD_CFG)) {
2941                                         tg3_setup_flow_control(tp, 0, 0);
2942                                         current_link_up = 1;
2943                                         tp->tg3_flags2 |=
2944                                                 TG3_FLG2_PARALLEL_DETECT;
2945                                         tp->serdes_counter =
2946                                                 SERDES_PARALLEL_DET_TIMEOUT;
2947                                 } else
2948                                         goto restart_autoneg;
2949                         }
2950                 }
2951         } else {
2952                 tp->serdes_counter = SERDES_AN_TIMEOUT_5704S;
2953                 tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
2954         }
2955
2956 out:
2957         return current_link_up;
2958 }
2959
2960 static int tg3_setup_fiber_by_hand(struct tg3 *tp, u32 mac_status)
2961 {
2962         int current_link_up = 0;
2963
2964         if (!(mac_status & MAC_STATUS_PCS_SYNCED))
2965                 goto out;
2966
2967         if (tp->link_config.autoneg == AUTONEG_ENABLE) {
2968                 u32 txflags, rxflags;
2969                 int i;
2970
2971                 if (fiber_autoneg(tp, &txflags, &rxflags)) {
2972                         u32 local_adv = 0, remote_adv = 0;
2973
2974                         if (txflags & ANEG_CFG_PS1)
2975                                 local_adv |= ADVERTISE_1000XPAUSE;
2976                         if (txflags & ANEG_CFG_PS2)
2977                                 local_adv |= ADVERTISE_1000XPSE_ASYM;
2978
2979                         if (rxflags & MR_LP_ADV_SYM_PAUSE)
2980                                 remote_adv |= LPA_1000XPAUSE;
2981                         if (rxflags & MR_LP_ADV_ASYM_PAUSE)
2982                                 remote_adv |= LPA_1000XPAUSE_ASYM;
2983
2984                         tg3_setup_flow_control(tp, local_adv, remote_adv);
2985
2986                         current_link_up = 1;
2987                 }
2988                 for (i = 0; i < 30; i++) {
2989                         udelay(20);
2990                         tw32_f(MAC_STATUS,
2991                                (MAC_STATUS_SYNC_CHANGED |
2992                                 MAC_STATUS_CFG_CHANGED));
2993                         udelay(40);
2994                         if ((tr32(MAC_STATUS) &
2995                              (MAC_STATUS_SYNC_CHANGED |
2996                               MAC_STATUS_CFG_CHANGED)) == 0)
2997                                 break;
2998                 }
2999
3000                 mac_status = tr32(MAC_STATUS);
3001                 if (current_link_up == 0 &&
3002                     (mac_status & MAC_STATUS_PCS_SYNCED) &&
3003                     !(mac_status & MAC_STATUS_RCVD_CFG))
3004                         current_link_up = 1;
3005         } else {
3006                 tg3_setup_flow_control(tp, 0, 0);
3007
3008                 /* Forcing 1000FD link up. */
3009                 current_link_up = 1;
3010
3011                 tw32_f(MAC_MODE, (tp->mac_mode | MAC_MODE_SEND_CONFIGS));
3012                 udelay(40);
3013
3014                 tw32_f(MAC_MODE, tp->mac_mode);
3015                 udelay(40);
3016         }
3017
3018 out:
3019         return current_link_up;
3020 }
3021
3022 static int tg3_setup_fiber_phy(struct tg3 *tp, int force_reset)
3023 {
3024         u32 orig_pause_cfg;
3025         u16 orig_active_speed;
3026         u8 orig_active_duplex;
3027         u32 mac_status;
3028         int current_link_up;
3029         int i;
3030
3031         orig_pause_cfg = tp->link_config.active_flowctrl;
3032         orig_active_speed = tp->link_config.active_speed;
3033         orig_active_duplex = tp->link_config.active_duplex;
3034
3035         if (!(tp->tg3_flags2 & TG3_FLG2_HW_AUTONEG) &&
3036             netif_carrier_ok(tp->dev) &&
3037             (tp->tg3_flags & TG3_FLAG_INIT_COMPLETE)) {
3038                 mac_status = tr32(MAC_STATUS);
3039                 mac_status &= (MAC_STATUS_PCS_SYNCED |
3040                                MAC_STATUS_SIGNAL_DET |
3041                                MAC_STATUS_CFG_CHANGED |
3042                                MAC_STATUS_RCVD_CFG);
3043                 if (mac_status == (MAC_STATUS_PCS_SYNCED |
3044                                    MAC_STATUS_SIGNAL_DET)) {
3045                         tw32_f(MAC_STATUS, (MAC_STATUS_SYNC_CHANGED |
3046                                             MAC_STATUS_CFG_CHANGED));
3047                         return 0;
3048                 }
3049         }
3050
3051         tw32_f(MAC_TX_AUTO_NEG, 0);
3052
3053         tp->mac_mode &= ~(MAC_MODE_PORT_MODE_MASK | MAC_MODE_HALF_DUPLEX);
3054         tp->mac_mode |= MAC_MODE_PORT_MODE_TBI;
3055         tw32_f(MAC_MODE, tp->mac_mode);
3056         udelay(40);
3057
3058         if (tp->phy_id == PHY_ID_BCM8002)
3059                 tg3_init_bcm8002(tp);
3060
3061         /* Enable link change event even when serdes polling.  */
3062         tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
3063         udelay(40);
3064
3065         current_link_up = 0;
3066         mac_status = tr32(MAC_STATUS);
3067
3068         if (tp->tg3_flags2 & TG3_FLG2_HW_AUTONEG)
3069                 current_link_up = tg3_setup_fiber_hw_autoneg(tp, mac_status);
3070         else
3071                 current_link_up = tg3_setup_fiber_by_hand(tp, mac_status);
3072
3073         tp->hw_status->status =
3074                 (SD_STATUS_UPDATED |
3075                  (tp->hw_status->status & ~SD_STATUS_LINK_CHG));
3076
3077         for (i = 0; i < 100; i++) {
3078                 tw32_f(MAC_STATUS, (MAC_STATUS_SYNC_CHANGED |
3079                                     MAC_STATUS_CFG_CHANGED));
3080                 udelay(5);
3081                 if ((tr32(MAC_STATUS) & (MAC_STATUS_SYNC_CHANGED |
3082                                          MAC_STATUS_CFG_CHANGED |
3083                                          MAC_STATUS_LNKSTATE_CHANGED)) == 0)
3084                         break;
3085         }
3086
3087         mac_status = tr32(MAC_STATUS);
3088         if ((mac_status & MAC_STATUS_PCS_SYNCED) == 0) {
3089                 current_link_up = 0;
3090                 if (tp->link_config.autoneg == AUTONEG_ENABLE &&
3091                     tp->serdes_counter == 0) {
3092                         tw32_f(MAC_MODE, (tp->mac_mode |
3093                                           MAC_MODE_SEND_CONFIGS));
3094                         udelay(1);
3095                         tw32_f(MAC_MODE, tp->mac_mode);
3096                 }
3097         }
3098
3099         if (current_link_up == 1) {
3100                 tp->link_config.active_speed = SPEED_1000;
3101                 tp->link_config.active_duplex = DUPLEX_FULL;
3102                 tw32(MAC_LED_CTRL, (tp->led_ctrl |
3103                                     LED_CTRL_LNKLED_OVERRIDE |
3104                                     LED_CTRL_1000MBPS_ON));
3105         } else {
3106                 tp->link_config.active_speed = SPEED_INVALID;
3107                 tp->link_config.active_duplex = DUPLEX_INVALID;
3108                 tw32(MAC_LED_CTRL, (tp->led_ctrl |
3109                                     LED_CTRL_LNKLED_OVERRIDE |
3110                                     LED_CTRL_TRAFFIC_OVERRIDE));
3111         }
3112
3113         if (current_link_up != netif_carrier_ok(tp->dev)) {
3114                 if (current_link_up)
3115                         netif_carrier_on(tp->dev);
3116                 else
3117                         netif_carrier_off(tp->dev);
3118                 tg3_link_report(tp);
3119         } else {
3120                 u32 now_pause_cfg = tp->link_config.active_flowctrl;
3121                 if (orig_pause_cfg != now_pause_cfg ||
3122                     orig_active_speed != tp->link_config.active_speed ||
3123                     orig_active_duplex != tp->link_config.active_duplex)
3124                         tg3_link_report(tp);
3125         }
3126
3127         return 0;
3128 }
3129
3130 static int tg3_setup_fiber_mii_phy(struct tg3 *tp, int force_reset)
3131 {
3132         int current_link_up, err = 0;
3133         u32 bmsr, bmcr;
3134         u16 current_speed;
3135         u8 current_duplex;
3136         u32 local_adv, remote_adv;
3137
3138         tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
3139         tw32_f(MAC_MODE, tp->mac_mode);
3140         udelay(40);
3141
3142         tw32(MAC_EVENT, 0);
3143
3144         tw32_f(MAC_STATUS,
3145              (MAC_STATUS_SYNC_CHANGED |
3146               MAC_STATUS_CFG_CHANGED |
3147               MAC_STATUS_MI_COMPLETION |
3148               MAC_STATUS_LNKSTATE_CHANGED));
3149         udelay(40);
3150
3151         if (force_reset)
3152                 tg3_phy_reset(tp);
3153
3154         current_link_up = 0;
3155         current_speed = SPEED_INVALID;
3156         current_duplex = DUPLEX_INVALID;
3157
3158         err |= tg3_readphy(tp, MII_BMSR, &bmsr);
3159         err |= tg3_readphy(tp, MII_BMSR, &bmsr);
3160         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) {
3161                 if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
3162                         bmsr |= BMSR_LSTATUS;
3163                 else
3164                         bmsr &= ~BMSR_LSTATUS;
3165         }
3166
3167         err |= tg3_readphy(tp, MII_BMCR, &bmcr);
3168
3169         if ((tp->link_config.autoneg == AUTONEG_ENABLE) && !force_reset &&
3170             (tp->tg3_flags2 & TG3_FLG2_PARALLEL_DETECT) &&
3171              tp->link_config.flowctrl == tp->link_config.active_flowctrl) {
3172                 /* do nothing, just check for link up at the end */
3173         } else if (tp->link_config.autoneg == AUTONEG_ENABLE) {
3174                 u32 adv, new_adv;
3175
3176                 err |= tg3_readphy(tp, MII_ADVERTISE, &adv);
3177                 new_adv = adv & ~(ADVERTISE_1000XFULL | ADVERTISE_1000XHALF |
3178                                   ADVERTISE_1000XPAUSE |
3179                                   ADVERTISE_1000XPSE_ASYM |
3180                                   ADVERTISE_SLCT);
3181
3182                 new_adv |= tg3_advert_flowctrl_1000X(tp->link_config.flowctrl);
3183
3184                 if (tp->link_config.advertising & ADVERTISED_1000baseT_Half)
3185                         new_adv |= ADVERTISE_1000XHALF;
3186                 if (tp->link_config.advertising & ADVERTISED_1000baseT_Full)
3187                         new_adv |= ADVERTISE_1000XFULL;
3188
3189                 if ((new_adv != adv) || !(bmcr & BMCR_ANENABLE)) {
3190                         tg3_writephy(tp, MII_ADVERTISE, new_adv);
3191                         bmcr |= BMCR_ANENABLE | BMCR_ANRESTART;
3192                         tg3_writephy(tp, MII_BMCR, bmcr);
3193
3194                         tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
3195                         tp->serdes_counter = SERDES_AN_TIMEOUT_5714S;
3196                         tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
3197
3198                         return err;
3199                 }
3200         } else {
3201                 u32 new_bmcr;
3202
3203                 bmcr &= ~BMCR_SPEED1000;
3204                 new_bmcr = bmcr & ~(BMCR_ANENABLE | BMCR_FULLDPLX);
3205
3206                 if (tp->link_config.duplex == DUPLEX_FULL)
3207                         new_bmcr |= BMCR_FULLDPLX;
3208
3209                 if (new_bmcr != bmcr) {
3210                         /* BMCR_SPEED1000 is a reserved bit that needs
3211                          * to be set on write.
3212                          */
3213                         new_bmcr |= BMCR_SPEED1000;
3214
3215                         /* Force a linkdown */
3216                         if (netif_carrier_ok(tp->dev)) {
3217                                 u32 adv;
3218
3219                                 err |= tg3_readphy(tp, MII_ADVERTISE, &adv);
3220                                 adv &= ~(ADVERTISE_1000XFULL |
3221                                          ADVERTISE_1000XHALF |
3222                                          ADVERTISE_SLCT);
3223                                 tg3_writephy(tp, MII_ADVERTISE, adv);
3224                                 tg3_writephy(tp, MII_BMCR, bmcr |
3225                                                            BMCR_ANRESTART |
3226                                                            BMCR_ANENABLE);
3227                                 udelay(10);
3228                                 netif_carrier_off(tp->dev);
3229                         }
3230                         tg3_writephy(tp, MII_BMCR, new_bmcr);
3231                         bmcr = new_bmcr;
3232                         err |= tg3_readphy(tp, MII_BMSR, &bmsr);
3233                         err |= tg3_readphy(tp, MII_BMSR, &bmsr);
3234                         if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
3235                             ASIC_REV_5714) {
3236                                 if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
3237                                         bmsr |= BMSR_LSTATUS;
3238                                 else
3239                                         bmsr &= ~BMSR_LSTATUS;
3240                         }
3241                         tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
3242                 }
3243         }
3244
3245         if (bmsr & BMSR_LSTATUS) {
3246                 current_speed = SPEED_1000;
3247                 current_link_up = 1;
3248                 if (bmcr & BMCR_FULLDPLX)
3249                         current_duplex = DUPLEX_FULL;
3250                 else
3251                         current_duplex = DUPLEX_HALF;
3252
3253                 local_adv = 0;
3254                 remote_adv = 0;
3255
3256                 if (bmcr & BMCR_ANENABLE) {
3257                         u32 common;
3258
3259                         err |= tg3_readphy(tp, MII_ADVERTISE, &local_adv);
3260                         err |= tg3_readphy(tp, MII_LPA, &remote_adv);
3261                         common = local_adv & remote_adv;
3262                         if (common & (ADVERTISE_1000XHALF |
3263                                       ADVERTISE_1000XFULL)) {
3264                                 if (common & ADVERTISE_1000XFULL)
3265                                         current_duplex = DUPLEX_FULL;
3266                                 else
3267                                         current_duplex = DUPLEX_HALF;
3268                         }
3269                         else
3270                                 current_link_up = 0;
3271                 }
3272         }
3273
3274         if (current_link_up == 1 && current_duplex == DUPLEX_FULL)
3275                 tg3_setup_flow_control(tp, local_adv, remote_adv);
3276
3277         tp->mac_mode &= ~MAC_MODE_HALF_DUPLEX;
3278         if (tp->link_config.active_duplex == DUPLEX_HALF)
3279                 tp->mac_mode |= MAC_MODE_HALF_DUPLEX;
3280
3281         tw32_f(MAC_MODE, tp->mac_mode);
3282         udelay(40);
3283
3284         tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
3285
3286         tp->link_config.active_speed = current_speed;
3287         tp->link_config.active_duplex = current_duplex;
3288
3289         if (current_link_up != netif_carrier_ok(tp->dev)) {
3290                 if (current_link_up)
3291                         netif_carrier_on(tp->dev);
3292                 else {
3293                         netif_carrier_off(tp->dev);
3294                         tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
3295                 }
3296                 tg3_link_report(tp);
3297         }
3298         return err;
3299 }
3300
3301 static void tg3_serdes_parallel_detect(struct tg3 *tp)
3302 {
3303         if (tp->serdes_counter) {
3304                 /* Give autoneg time to complete. */
3305                 tp->serdes_counter--;
3306                 return;
3307         }
3308         if (!netif_carrier_ok(tp->dev) &&
3309             (tp->link_config.autoneg == AUTONEG_ENABLE)) {
3310                 u32 bmcr;
3311
3312                 tg3_readphy(tp, MII_BMCR, &bmcr);
3313                 if (bmcr & BMCR_ANENABLE) {
3314                         u32 phy1, phy2;
3315
3316                         /* Select shadow register 0x1f */
3317                         tg3_writephy(tp, 0x1c, 0x7c00);
3318                         tg3_readphy(tp, 0x1c, &phy1);
3319
3320                         /* Select expansion interrupt status register */
3321                         tg3_writephy(tp, 0x17, 0x0f01);
3322                         tg3_readphy(tp, 0x15, &phy2);
3323                         tg3_readphy(tp, 0x15, &phy2);
3324
3325                         if ((phy1 & 0x10) && !(phy2 & 0x20)) {
3326                                 /* We have signal detect and not receiving
3327                                  * config code words, link is up by parallel
3328                                  * detection.
3329                                  */
3330
3331                                 bmcr &= ~BMCR_ANENABLE;
3332                                 bmcr |= BMCR_SPEED1000 | BMCR_FULLDPLX;
3333                                 tg3_writephy(tp, MII_BMCR, bmcr);
3334                                 tp->tg3_flags2 |= TG3_FLG2_PARALLEL_DETECT;
3335                         }
3336                 }
3337         }
3338         else if (netif_carrier_ok(tp->dev) &&
3339                  (tp->link_config.autoneg == AUTONEG_ENABLE) &&
3340                  (tp->tg3_flags2 & TG3_FLG2_PARALLEL_DETECT)) {
3341                 u32 phy2;
3342
3343                 /* Select expansion interrupt status register */
3344                 tg3_writephy(tp, 0x17, 0x0f01);
3345                 tg3_readphy(tp, 0x15, &phy2);
3346                 if (phy2 & 0x20) {
3347                         u32 bmcr;
3348
3349                         /* Config code words received, turn on autoneg. */
3350                         tg3_readphy(tp, MII_BMCR, &bmcr);
3351                         tg3_writephy(tp, MII_BMCR, bmcr | BMCR_ANENABLE);
3352
3353                         tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
3354
3355                 }
3356         }
3357 }
3358
3359 static int tg3_setup_phy(struct tg3 *tp, int force_reset)
3360 {
3361         int err;
3362
3363         if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) {
3364                 err = tg3_setup_fiber_phy(tp, force_reset);
3365         } else if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES) {
3366                 err = tg3_setup_fiber_mii_phy(tp, force_reset);
3367         } else {
3368                 err = tg3_setup_copper_phy(tp, force_reset);
3369         }
3370
3371         if (tp->pci_chip_rev_id == CHIPREV_ID_5784_A0 ||
3372             tp->pci_chip_rev_id == CHIPREV_ID_5784_A1) {
3373                 u32 val, scale;
3374
3375                 val = tr32(TG3_CPMU_CLCK_STAT) & CPMU_CLCK_STAT_MAC_CLCK_MASK;
3376                 if (val == CPMU_CLCK_STAT_MAC_CLCK_62_5)
3377                         scale = 65;
3378                 else if (val == CPMU_CLCK_STAT_MAC_CLCK_6_25)
3379                         scale = 6;
3380                 else
3381                         scale = 12;
3382
3383                 val = tr32(GRC_MISC_CFG) & ~GRC_MISC_CFG_PRESCALAR_MASK;
3384                 val |= (scale << GRC_MISC_CFG_PRESCALAR_SHIFT);
3385                 tw32(GRC_MISC_CFG, val);
3386         }
3387
3388         if (tp->link_config.active_speed == SPEED_1000 &&
3389             tp->link_config.active_duplex == DUPLEX_HALF)
3390                 tw32(MAC_TX_LENGTHS,
3391                      ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
3392                       (6 << TX_LENGTHS_IPG_SHIFT) |
3393                       (0xff << TX_LENGTHS_SLOT_TIME_SHIFT)));
3394         else
3395                 tw32(MAC_TX_LENGTHS,
3396                      ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
3397                       (6 << TX_LENGTHS_IPG_SHIFT) |
3398                       (32 << TX_LENGTHS_SLOT_TIME_SHIFT)));
3399
3400         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
3401                 if (netif_carrier_ok(tp->dev)) {
3402                         tw32(HOSTCC_STAT_COAL_TICKS,
3403                              tp->coal.stats_block_coalesce_usecs);
3404                 } else {
3405                         tw32(HOSTCC_STAT_COAL_TICKS, 0);
3406                 }
3407         }
3408
3409         if (tp->tg3_flags & TG3_FLAG_ASPM_WORKAROUND) {
3410                 u32 val = tr32(PCIE_PWR_MGMT_THRESH);
3411                 if (!netif_carrier_ok(tp->dev))
3412                         val = (val & ~PCIE_PWR_MGMT_L1_THRESH_MSK) |
3413                               tp->pwrmgmt_thresh;
3414                 else
3415                         val |= PCIE_PWR_MGMT_L1_THRESH_MSK;
3416                 tw32(PCIE_PWR_MGMT_THRESH, val);
3417         }
3418
3419         return err;
3420 }
3421
3422 /* This is called whenever we suspect that the system chipset is re-
3423  * ordering the sequence of MMIO to the tx send mailbox. The symptom
3424  * is bogus tx completions. We try to recover by setting the
3425  * TG3_FLAG_MBOX_WRITE_REORDER flag and resetting the chip later
3426  * in the workqueue.
3427  */
3428 static void tg3_tx_recover(struct tg3 *tp)
3429 {
3430         BUG_ON((tp->tg3_flags & TG3_FLAG_MBOX_WRITE_REORDER) ||
3431                tp->write32_tx_mbox == tg3_write_indirect_mbox);
3432
3433         printk(KERN_WARNING PFX "%s: The system may be re-ordering memory-"
3434                "mapped I/O cycles to the network device, attempting to "
3435                "recover. Please report the problem to the driver maintainer "
3436                "and include system chipset information.\n", tp->dev->name);
3437
3438         spin_lock(&tp->lock);
3439         tp->tg3_flags |= TG3_FLAG_TX_RECOVERY_PENDING;
3440         spin_unlock(&tp->lock);
3441 }
3442
3443 static inline u32 tg3_tx_avail(struct tg3 *tp)
3444 {
3445         smp_mb();
3446         return (tp->tx_pending -
3447                 ((tp->tx_prod - tp->tx_cons) & (TG3_TX_RING_SIZE - 1)));
3448 }
3449
3450 /* Tigon3 never reports partial packet sends.  So we do not
3451  * need special logic to handle SKBs that have not had all
3452  * of their frags sent yet, like SunGEM does.
3453  */
3454 static void tg3_tx(struct tg3 *tp)
3455 {
3456         u32 hw_idx = tp->hw_status->idx[0].tx_consumer;
3457         u32 sw_idx = tp->tx_cons;
3458
3459         while (sw_idx != hw_idx) {
3460                 struct tx_ring_info *ri = &tp->tx_buffers[sw_idx];
3461                 struct sk_buff *skb = ri->skb;
3462                 int i, tx_bug = 0;
3463
3464                 if (unlikely(skb == NULL)) {
3465                         tg3_tx_recover(tp);
3466                         return;
3467                 }
3468
3469                 pci_unmap_single(tp->pdev,
3470                                  pci_unmap_addr(ri, mapping),
3471                                  skb_headlen(skb),
3472                                  PCI_DMA_TODEVICE);
3473
3474                 ri->skb = NULL;
3475
3476                 sw_idx = NEXT_TX(sw_idx);
3477
3478                 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
3479                         ri = &tp->tx_buffers[sw_idx];
3480                         if (unlikely(ri->skb != NULL || sw_idx == hw_idx))
3481                                 tx_bug = 1;
3482
3483                         pci_unmap_page(tp->pdev,
3484                                        pci_unmap_addr(ri, mapping),
3485                                        skb_shinfo(skb)->frags[i].size,
3486                                        PCI_DMA_TODEVICE);
3487
3488                         sw_idx = NEXT_TX(sw_idx);
3489                 }
3490
3491                 dev_kfree_skb(skb);
3492
3493                 if (unlikely(tx_bug)) {
3494                         tg3_tx_recover(tp);
3495                         return;
3496                 }
3497         }
3498
3499         tp->tx_cons = sw_idx;
3500
3501         /* Need to make the tx_cons update visible to tg3_start_xmit()
3502          * before checking for netif_queue_stopped().  Without the
3503          * memory barrier, there is a small possibility that tg3_start_xmit()
3504          * will miss it and cause the queue to be stopped forever.
3505          */
3506         smp_mb();
3507
3508         if (unlikely(netif_queue_stopped(tp->dev) &&
3509                      (tg3_tx_avail(tp) > TG3_TX_WAKEUP_THRESH(tp)))) {
3510                 netif_tx_lock(tp->dev);
3511                 if (netif_queue_stopped(tp->dev) &&
3512                     (tg3_tx_avail(tp) > TG3_TX_WAKEUP_THRESH(tp)))
3513                         netif_wake_queue(tp->dev);
3514                 netif_tx_unlock(tp->dev);
3515         }
3516 }
3517
3518 /* Returns size of skb allocated or < 0 on error.
3519  *
3520  * We only need to fill in the address because the other members
3521  * of the RX descriptor are invariant, see tg3_init_rings.
3522  *
3523  * Note the purposeful assymetry of cpu vs. chip accesses.  For
3524  * posting buffers we only dirty the first cache line of the RX
3525  * descriptor (containing the address).  Whereas for the RX status
3526  * buffers the cpu only reads the last cacheline of the RX descriptor
3527  * (to fetch the error flags, vlan tag, checksum, and opaque cookie).
3528  */
3529 static int tg3_alloc_rx_skb(struct tg3 *tp, u32 opaque_key,
3530                             int src_idx, u32 dest_idx_unmasked)
3531 {
3532         struct tg3_rx_buffer_desc *desc;
3533         struct ring_info *map, *src_map;
3534         struct sk_buff *skb;
3535         dma_addr_t mapping;
3536         int skb_size, dest_idx;
3537
3538         src_map = NULL;
3539         switch (opaque_key) {
3540         case RXD_OPAQUE_RING_STD:
3541                 dest_idx = dest_idx_unmasked % TG3_RX_RING_SIZE;
3542                 desc = &tp->rx_std[dest_idx];
3543                 map = &tp->rx_std_buffers[dest_idx];
3544                 if (src_idx >= 0)
3545                         src_map = &tp->rx_std_buffers[src_idx];
3546                 skb_size = tp->rx_pkt_buf_sz;
3547                 break;
3548
3549         case RXD_OPAQUE_RING_JUMBO:
3550                 dest_idx = dest_idx_unmasked % TG3_RX_JUMBO_RING_SIZE;
3551                 desc = &tp->rx_jumbo[dest_idx];
3552                 map = &tp->rx_jumbo_buffers[dest_idx];
3553                 if (src_idx >= 0)
3554                         src_map = &tp->rx_jumbo_buffers[src_idx];
3555                 skb_size = RX_JUMBO_PKT_BUF_SZ;
3556                 break;
3557
3558         default:
3559                 return -EINVAL;
3560         };
3561
3562         /* Do not overwrite any of the map or rp information
3563          * until we are sure we can commit to a new buffer.
3564          *
3565          * Callers depend upon this behavior and assume that
3566          * we leave everything unchanged if we fail.
3567          */
3568         skb = netdev_alloc_skb(tp->dev, skb_size);
3569         if (skb == NULL)
3570                 return -ENOMEM;
3571
3572         skb_reserve(skb, tp->rx_offset);
3573
3574         mapping = pci_map_single(tp->pdev, skb->data,
3575                                  skb_size - tp->rx_offset,
3576                                  PCI_DMA_FROMDEVICE);
3577
3578         map->skb = skb;
3579         pci_unmap_addr_set(map, mapping, mapping);
3580
3581         if (src_map != NULL)
3582                 src_map->skb = NULL;
3583
3584         desc->addr_hi = ((u64)mapping >> 32);
3585         desc->addr_lo = ((u64)mapping & 0xffffffff);
3586
3587         return skb_size;
3588 }
3589
3590 /* We only need to move over in the address because the other
3591  * members of the RX descriptor are invariant.  See notes above
3592  * tg3_alloc_rx_skb for full details.
3593  */
3594 static void tg3_recycle_rx(struct tg3 *tp, u32 opaque_key,
3595                            int src_idx, u32 dest_idx_unmasked)
3596 {
3597         struct tg3_rx_buffer_desc *src_desc, *dest_desc;
3598         struct ring_info *src_map, *dest_map;
3599         int dest_idx;
3600
3601         switch (opaque_key) {
3602         case RXD_OPAQUE_RING_STD:
3603                 dest_idx = dest_idx_unmasked % TG3_RX_RING_SIZE;
3604                 dest_desc = &tp->rx_std[dest_idx];
3605                 dest_map = &tp->rx_std_buffers[dest_idx];
3606                 src_desc = &tp->rx_std[src_idx];
3607                 src_map = &tp->rx_std_buffers[src_idx];
3608                 break;
3609
3610         case RXD_OPAQUE_RING_JUMBO:
3611                 dest_idx = dest_idx_unmasked % TG3_RX_JUMBO_RING_SIZE;
3612                 dest_desc = &tp->rx_jumbo[dest_idx];
3613                 dest_map = &tp->rx_jumbo_buffers[dest_idx];
3614                 src_desc = &tp->rx_jumbo[src_idx];
3615                 src_map = &tp->rx_jumbo_buffers[src_idx];
3616                 break;
3617
3618         default:
3619                 return;
3620         };
3621
3622         dest_map->skb = src_map->skb;
3623         pci_unmap_addr_set(dest_map, mapping,
3624                            pci_unmap_addr(src_map, mapping));
3625         dest_desc->addr_hi = src_desc->addr_hi;
3626         dest_desc->addr_lo = src_desc->addr_lo;
3627
3628         src_map->skb = NULL;
3629 }
3630
3631 #if TG3_VLAN_TAG_USED
3632 static int tg3_vlan_rx(struct tg3 *tp, struct sk_buff *skb, u16 vlan_tag)
3633 {
3634         return vlan_hwaccel_receive_skb(skb, tp->vlgrp, vlan_tag);
3635 }
3636 #endif
3637
3638 /* The RX ring scheme is composed of multiple rings which post fresh
3639  * buffers to the chip, and one special ring the chip uses to report
3640  * status back to the host.
3641  *
3642  * The special ring reports the status of received packets to the
3643  * host.  The chip does not write into the original descriptor the
3644  * RX buffer was obtained from.  The chip simply takes the original
3645  * descriptor as provided by the host, updates the status and length
3646  * field, then writes this into the next status ring entry.
3647  *
3648  * Each ring the host uses to post buffers to the chip is described
3649  * by a TG3_BDINFO entry in the chips SRAM area.  When a packet arrives,
3650  * it is first placed into the on-chip ram.  When the packet's length
3651  * is known, it walks down the TG3_BDINFO entries to select the ring.
3652  * Each TG3_BDINFO specifies a MAXLEN field and the first TG3_BDINFO
3653  * which is within the range of the new packet's length is chosen.
3654  *
3655  * The "separate ring for rx status" scheme may sound queer, but it makes
3656  * sense from a cache coherency perspective.  If only the host writes
3657  * to the buffer post rings, and only the chip writes to the rx status
3658  * rings, then cache lines never move beyond shared-modified state.
3659  * If both the host and chip were to write into the same ring, cache line
3660  * eviction could occur since both entities want it in an exclusive state.
3661  */
3662 static int tg3_rx(struct tg3 *tp, int budget)
3663 {
3664         u32 work_mask, rx_std_posted = 0;
3665         u32 sw_idx = tp->rx_rcb_ptr;
3666         u16 hw_idx;
3667         int received;
3668
3669         hw_idx = tp->hw_status->idx[0].rx_producer;
3670         /*
3671          * We need to order the read of hw_idx and the read of
3672          * the opaque cookie.
3673          */
3674         rmb();
3675         work_mask = 0;
3676         received = 0;
3677         while (sw_idx != hw_idx && budget > 0) {
3678                 struct tg3_rx_buffer_desc *desc = &tp->rx_rcb[sw_idx];
3679                 unsigned int len;
3680                 struct sk_buff *skb;
3681                 dma_addr_t dma_addr;
3682                 u32 opaque_key, desc_idx, *post_ptr;
3683
3684                 desc_idx = desc->opaque & RXD_OPAQUE_INDEX_MASK;
3685                 opaque_key = desc->opaque & RXD_OPAQUE_RING_MASK;
3686                 if (opaque_key == RXD_OPAQUE_RING_STD) {
3687                         dma_addr = pci_unmap_addr(&tp->rx_std_buffers[desc_idx],
3688                                                   mapping);
3689                         skb = tp->rx_std_buffers[desc_idx].skb;
3690                         post_ptr = &tp->rx_std_ptr;
3691                         rx_std_posted++;
3692                 } else if (opaque_key == RXD_OPAQUE_RING_JUMBO) {
3693                         dma_addr = pci_unmap_addr(&tp->rx_jumbo_buffers[desc_idx],
3694                                                   mapping);
3695                         skb = tp->rx_jumbo_buffers[desc_idx].skb;
3696                         post_ptr = &tp->rx_jumbo_ptr;
3697                 }
3698                 else {
3699                         goto next_pkt_nopost;
3700                 }
3701
3702                 work_mask |= opaque_key;
3703
3704                 if ((desc->err_vlan & RXD_ERR_MASK) != 0 &&
3705                     (desc->err_vlan != RXD_ERR_ODD_NIBBLE_RCVD_MII)) {
3706                 drop_it:
3707                         tg3_recycle_rx(tp, opaque_key,
3708                                        desc_idx, *post_ptr);
3709                 drop_it_no_recycle:
3710                         /* Other statistics kept track of by card. */
3711                         tp->net_stats.rx_dropped++;
3712                         goto next_pkt;
3713                 }
3714
3715                 len = ((desc->idx_len & RXD_LEN_MASK) >> RXD_LEN_SHIFT) - 4; /* omit crc */
3716
3717                 if (len > RX_COPY_THRESHOLD
3718                         && tp->rx_offset == 2
3719                         /* rx_offset != 2 iff this is a 5701 card running
3720                          * in PCI-X mode [see tg3_get_invariants()] */
3721                 ) {
3722                         int skb_size;
3723
3724                         skb_size = tg3_alloc_rx_skb(tp, opaque_key,
3725                                                     desc_idx, *post_ptr);
3726                         if (skb_size < 0)
3727                                 goto drop_it;
3728
3729                         pci_unmap_single(tp->pdev, dma_addr,
3730                                          skb_size - tp->rx_offset,
3731                                          PCI_DMA_FROMDEVICE);
3732
3733                         skb_put(skb, len);
3734                 } else {
3735                         struct sk_buff *copy_skb;
3736
3737                         tg3_recycle_rx(tp, opaque_key,
3738                                        desc_idx, *post_ptr);
3739
3740                         copy_skb = netdev_alloc_skb(tp->dev, len + 2);
3741                         if (copy_skb == NULL)
3742                                 goto drop_it_no_recycle;
3743
3744                         skb_reserve(copy_skb, 2);
3745                         skb_put(copy_skb, len);
3746                         pci_dma_sync_single_for_cpu(tp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE);
3747                         skb_copy_from_linear_data(skb, copy_skb->data, len);
3748                         pci_dma_sync_single_for_device(tp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE);
3749
3750                         /* We'll reuse the original ring buffer. */
3751                         skb = copy_skb;
3752                 }
3753
3754                 if ((tp->tg3_flags & TG3_FLAG_RX_CHECKSUMS) &&
3755                     (desc->type_flags & RXD_FLAG_TCPUDP_CSUM) &&
3756                     (((desc->ip_tcp_csum & RXD_TCPCSUM_MASK)
3757                       >> RXD_TCPCSUM_SHIFT) == 0xffff))
3758                         skb->ip_summed = CHECKSUM_UNNECESSARY;
3759                 else
3760                         skb->ip_summed = CHECKSUM_NONE;
3761
3762                 skb->protocol = eth_type_trans(skb, tp->dev);
3763 #if TG3_VLAN_TAG_USED
3764                 if (tp->vlgrp != NULL &&
3765                     desc->type_flags & RXD_FLAG_VLAN) {
3766                         tg3_vlan_rx(tp, skb,
3767                                     desc->err_vlan & RXD_VLAN_MASK);
3768                 } else
3769 #endif
3770                         netif_receive_skb(skb);
3771
3772                 tp->dev->last_rx = jiffies;
3773                 received++;
3774                 budget--;
3775
3776 next_pkt:
3777                 (*post_ptr)++;
3778
3779                 if (unlikely(rx_std_posted >= tp->rx_std_max_post)) {
3780                         u32 idx = *post_ptr % TG3_RX_RING_SIZE;
3781
3782                         tw32_rx_mbox(MAILBOX_RCV_STD_PROD_IDX +
3783                                      TG3_64BIT_REG_LOW, idx);
3784                         work_mask &= ~RXD_OPAQUE_RING_STD;
3785                         rx_std_posted = 0;
3786                 }
3787 next_pkt_nopost:
3788                 sw_idx++;
3789                 sw_idx &= (TG3_RX_RCB_RING_SIZE(tp) - 1);
3790
3791                 /* Refresh hw_idx to see if there is new work */
3792                 if (sw_idx == hw_idx) {
3793                         hw_idx = tp->hw_status->idx[0].rx_producer;
3794                         rmb();
3795                 }
3796         }
3797
3798         /* ACK the status ring. */
3799         tp->rx_rcb_ptr = sw_idx;
3800         tw32_rx_mbox(MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW, sw_idx);
3801
3802         /* Refill RX ring(s). */
3803         if (work_mask & RXD_OPAQUE_RING_STD) {
3804                 sw_idx = tp->rx_std_ptr % TG3_RX_RING_SIZE;
3805                 tw32_rx_mbox(MAILBOX_RCV_STD_PROD_IDX + TG3_64BIT_REG_LOW,
3806                              sw_idx);
3807         }
3808         if (work_mask & RXD_OPAQUE_RING_JUMBO) {
3809                 sw_idx = tp->rx_jumbo_ptr % TG3_RX_JUMBO_RING_SIZE;
3810                 tw32_rx_mbox(MAILBOX_RCV_JUMBO_PROD_IDX + TG3_64BIT_REG_LOW,
3811                              sw_idx);
3812         }
3813         mmiowb();
3814
3815         return received;
3816 }
3817
3818 static int tg3_poll_work(struct tg3 *tp, int work_done, int budget)
3819 {
3820         struct tg3_hw_status *sblk = tp->hw_status;
3821
3822         /* handle link change and other phy events */
3823         if (!(tp->tg3_flags &
3824               (TG3_FLAG_USE_LINKCHG_REG |
3825                TG3_FLAG_POLL_SERDES))) {
3826                 if (sblk->status & SD_STATUS_LINK_CHG) {
3827                         sblk->status = SD_STATUS_UPDATED |
3828                                 (sblk->status & ~SD_STATUS_LINK_CHG);
3829                         spin_lock(&tp->lock);
3830                         if (tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB) {
3831                                 tw32_f(MAC_STATUS,
3832                                      (MAC_STATUS_SYNC_CHANGED |
3833                                       MAC_STATUS_CFG_CHANGED |
3834                                       MAC_STATUS_MI_COMPLETION |
3835                                       MAC_STATUS_LNKSTATE_CHANGED));
3836                                 udelay(40);
3837                         } else
3838                                 tg3_setup_phy(tp, 0);
3839                         spin_unlock(&tp->lock);
3840                 }
3841         }
3842
3843         /* run TX completion thread */
3844         if (sblk->idx[0].tx_consumer != tp->tx_cons) {
3845                 tg3_tx(tp);
3846                 if (unlikely(tp->tg3_flags & TG3_FLAG_TX_RECOVERY_PENDING))
3847                         return work_done;
3848         }
3849
3850         /* run RX thread, within the bounds set by NAPI.
3851          * All RX "locking" is done by ensuring outside
3852          * code synchronizes with tg3->napi.poll()
3853          */
3854         if (sblk->idx[0].rx_producer != tp->rx_rcb_ptr)
3855                 work_done += tg3_rx(tp, budget - work_done);
3856
3857         return work_done;
3858 }
3859
3860 static int tg3_poll(struct napi_struct *napi, int budget)
3861 {
3862         struct tg3 *tp = container_of(napi, struct tg3, napi);
3863         int work_done = 0;
3864         struct tg3_hw_status *sblk = tp->hw_status;
3865
3866         while (1) {
3867                 work_done = tg3_poll_work(tp, work_done, budget);
3868
3869                 if (unlikely(tp->tg3_flags & TG3_FLAG_TX_RECOVERY_PENDING))
3870                         goto tx_recovery;
3871
3872                 if (unlikely(work_done >= budget))
3873                         break;
3874
3875                 if (tp->tg3_flags & TG3_FLAG_TAGGED_STATUS) {
3876                         /* tp->last_tag is used in tg3_restart_ints() below
3877                          * to tell the hw how much work has been processed,
3878                          * so we must read it before checking for more work.
3879                          */
3880                         tp->last_tag = sblk->status_tag;
3881                         rmb();
3882                 } else
3883                         sblk->status &= ~SD_STATUS_UPDATED;
3884
3885                 if (likely(!tg3_has_work(tp))) {
3886                         netif_rx_complete(tp->dev, napi);
3887                         tg3_restart_ints(tp);
3888                         break;
3889                 }
3890         }
3891
3892         return work_done;
3893
3894 tx_recovery:
3895         /* work_done is guaranteed to be less than budget. */
3896         netif_rx_complete(tp->dev, napi);
3897         schedule_work(&tp->reset_task);
3898         return work_done;
3899 }
3900
3901 static void tg3_irq_quiesce(struct tg3 *tp)
3902 {
3903         BUG_ON(tp->irq_sync);
3904
3905         tp->irq_sync = 1;
3906         smp_mb();
3907
3908         synchronize_irq(tp->pdev->irq);
3909 }
3910
3911 static inline int tg3_irq_sync(struct tg3 *tp)
3912 {
3913         return tp->irq_sync;
3914 }
3915
3916 /* Fully shutdown all tg3 driver activity elsewhere in the system.
3917  * If irq_sync is non-zero, then the IRQ handler must be synchronized
3918  * with as well.  Most of the time, this is not necessary except when
3919  * shutting down the device.
3920  */
3921 static inline void tg3_full_lock(struct tg3 *tp, int irq_sync)
3922 {
3923         spin_lock_bh(&tp->lock);
3924         if (irq_sync)
3925                 tg3_irq_quiesce(tp);
3926 }
3927
3928 static inline void tg3_full_unlock(struct tg3 *tp)
3929 {
3930         spin_unlock_bh(&tp->lock);
3931 }
3932
3933 /* One-shot MSI handler - Chip automatically disables interrupt
3934  * after sending MSI so driver doesn't have to do it.
3935  */
3936 static irqreturn_t tg3_msi_1shot(int irq, void *dev_id)
3937 {
3938         struct net_device *dev = dev_id;
3939         struct tg3 *tp = netdev_priv(dev);
3940
3941         prefetch(tp->hw_status);
3942         prefetch(&tp->rx_rcb[tp->rx_rcb_ptr]);
3943
3944         if (likely(!tg3_irq_sync(tp)))
3945                 netif_rx_schedule(dev, &tp->napi);
3946
3947         return IRQ_HANDLED;
3948 }
3949
3950 /* MSI ISR - No need to check for interrupt sharing and no need to
3951  * flush status block and interrupt mailbox. PCI ordering rules
3952  * guarantee that MSI will arrive after the status block.
3953  */
3954 static irqreturn_t tg3_msi(int irq, void *dev_id)
3955 {
3956         struct net_device *dev = dev_id;
3957         struct tg3 *tp = netdev_priv(dev);
3958
3959         prefetch(tp->hw_status);
3960         prefetch(&tp->rx_rcb[tp->rx_rcb_ptr]);
3961         /*
3962          * Writing any value to intr-mbox-0 clears PCI INTA# and
3963          * chip-internal interrupt pending events.
3964          * Writing non-zero to intr-mbox-0 additional tells the
3965          * NIC to stop sending us irqs, engaging "in-intr-handler"
3966          * event coalescing.
3967          */
3968         tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
3969         if (likely(!tg3_irq_sync(tp)))
3970                 netif_rx_schedule(dev, &tp->napi);
3971
3972         return IRQ_RETVAL(1);
3973 }
3974
3975 static irqreturn_t tg3_interrupt(int irq, void *dev_id)
3976 {
3977         struct net_device *dev = dev_id;
3978         struct tg3 *tp = netdev_priv(dev);
3979         struct tg3_hw_status *sblk = tp->hw_status;
3980         unsigned int handled = 1;
3981
3982         /* In INTx mode, it is possible for the interrupt to arrive at
3983          * the CPU before the status block posted prior to the interrupt.
3984          * Reading the PCI State register will confirm whether the
3985          * interrupt is ours and will flush the status block.
3986          */
3987         if (unlikely(!(sblk->status & SD_STATUS_UPDATED))) {
3988                 if ((tp->tg3_flags & TG3_FLAG_CHIP_RESETTING) ||
3989                     (tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
3990                         handled = 0;
3991                         goto out;
3992                 }
3993         }
3994
3995         /*
3996          * Writing any value to intr-mbox-0 clears PCI INTA# and
3997          * chip-internal interrupt pending events.
3998          * Writing non-zero to intr-mbox-0 additional tells the
3999          * NIC to stop sending us irqs, engaging "in-intr-handler"
4000          * event coalescing.
4001          *
4002          * Flush the mailbox to de-assert the IRQ immediately to prevent
4003          * spurious interrupts.  The flush impacts performance but
4004          * excessive spurious interrupts can be worse in some cases.
4005          */
4006         tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
4007         if (tg3_irq_sync(tp))
4008                 goto out;
4009         sblk->status &= ~SD_STATUS_UPDATED;
4010         if (likely(tg3_has_work(tp))) {
4011                 prefetch(&tp->rx_rcb[tp->rx_rcb_ptr]);
4012                 netif_rx_schedule(dev, &tp->napi);
4013         } else {
4014                 /* No work, shared interrupt perhaps?  re-enable
4015                  * interrupts, and flush that PCI write
4016                  */
4017                 tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
4018                                0x00000000);
4019         }
4020 out:
4021         return IRQ_RETVAL(handled);
4022 }
4023
4024 static irqreturn_t tg3_interrupt_tagged(int irq, void *dev_id)
4025 {
4026         struct net_device *dev = dev_id;
4027         struct tg3 *tp = netdev_priv(dev);
4028         struct tg3_hw_status *sblk = tp->hw_status;
4029         unsigned int handled = 1;
4030
4031         /* In INTx mode, it is possible for the interrupt to arrive at
4032          * the CPU before the status block posted prior to the interrupt.
4033          * Reading the PCI State register will confirm whether the
4034          * interrupt is ours and will flush the status block.
4035          */
4036         if (unlikely(sblk->status_tag == tp->last_tag)) {
4037                 if ((tp->tg3_flags & TG3_FLAG_CHIP_RESETTING) ||
4038                     (tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
4039                         handled = 0;
4040                         goto out;
4041                 }
4042         }
4043
4044         /*
4045          * writing any value to intr-mbox-0 clears PCI INTA# and
4046          * chip-internal interrupt pending events.
4047          * writing non-zero to intr-mbox-0 additional tells the
4048          * NIC to stop sending us irqs, engaging "in-intr-handler"
4049          * event coalescing.
4050          *
4051          * Flush the mailbox to de-assert the IRQ immediately to prevent
4052          * spurious interrupts.  The flush impacts performance but
4053          * excessive spurious interrupts can be worse in some cases.
4054          */
4055         tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
4056         if (tg3_irq_sync(tp))
4057                 goto out;
4058         if (netif_rx_schedule_prep(dev, &tp->napi)) {
4059                 prefetch(&tp->rx_rcb[tp->rx_rcb_ptr]);
4060                 /* Update last_tag to mark that this status has been
4061                  * seen. Because interrupt may be shared, we may be
4062                  * racing with tg3_poll(), so only update last_tag
4063                  * if tg3_poll() is not scheduled.
4064                  */
4065                 tp->last_tag = sblk->status_tag;
4066                 __netif_rx_schedule(dev, &tp->napi);
4067         }
4068 out:
4069         return IRQ_RETVAL(handled);
4070 }
4071
4072 /* ISR for interrupt test */
4073 static irqreturn_t tg3_test_isr(int irq, void *dev_id)
4074 {
4075         struct net_device *dev = dev_id;
4076         struct tg3 *tp = netdev_priv(dev);
4077         struct tg3_hw_status *sblk = tp->hw_status;
4078
4079         if ((sblk->status & SD_STATUS_UPDATED) ||
4080             !(tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
4081                 tg3_disable_ints(tp);
4082                 return IRQ_RETVAL(1);
4083         }
4084         return IRQ_RETVAL(0);
4085 }
4086
4087 static int tg3_init_hw(struct tg3 *, int);
4088 static int tg3_halt(struct tg3 *, int, int);
4089
4090 /* Restart hardware after configuration changes, self-test, etc.
4091  * Invoked with tp->lock held.
4092  */
4093 static int tg3_restart_hw(struct tg3 *tp, int reset_phy)
4094         __releases(tp->lock)
4095         __acquires(tp->lock)
4096 {
4097         int err;
4098
4099         err = tg3_init_hw(tp, reset_phy);
4100         if (err) {
4101                 printk(KERN_ERR PFX "%s: Failed to re-initialize device, "
4102                        "aborting.\n", tp->dev->name);
4103                 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
4104                 tg3_full_unlock(tp);
4105                 del_timer_sync(&tp->timer);
4106                 tp->irq_sync = 0;
4107                 napi_enable(&tp->napi);
4108                 dev_close(tp->dev);
4109                 tg3_full_lock(tp, 0);
4110         }
4111         return err;
4112 }
4113
4114 #ifdef CONFIG_NET_POLL_CONTROLLER
4115 static void tg3_poll_controller(struct net_device *dev)
4116 {
4117         struct tg3 *tp = netdev_priv(dev);
4118
4119         tg3_interrupt(tp->pdev->irq, dev);
4120 }
4121 #endif
4122
4123 static void tg3_reset_task(struct work_struct *work)
4124 {
4125         struct tg3 *tp = container_of(work, struct tg3, reset_task);
4126         unsigned int restart_timer;
4127
4128         tg3_full_lock(tp, 0);
4129
4130         if (!netif_running(tp->dev)) {
4131                 tg3_full_unlock(tp);
4132                 return;
4133         }
4134
4135         tg3_full_unlock(tp);
4136
4137         tg3_netif_stop(tp);
4138
4139         tg3_full_lock(tp, 1);
4140
4141         restart_timer = tp->tg3_flags2 & TG3_FLG2_RESTART_TIMER;
4142         tp->tg3_flags2 &= ~TG3_FLG2_RESTART_TIMER;
4143
4144         if (tp->tg3_flags & TG3_FLAG_TX_RECOVERY_PENDING) {
4145                 tp->write32_tx_mbox = tg3_write32_tx_mbox;
4146                 tp->write32_rx_mbox = tg3_write_flush_reg32;
4147                 tp->tg3_flags |= TG3_FLAG_MBOX_WRITE_REORDER;
4148                 tp->tg3_flags &= ~TG3_FLAG_TX_RECOVERY_PENDING;
4149         }
4150
4151         tg3_halt(tp, RESET_KIND_SHUTDOWN, 0);
4152         if (tg3_init_hw(tp, 1))
4153                 goto out;
4154
4155         tg3_netif_start(tp);
4156
4157         if (restart_timer)
4158                 mod_timer(&tp->timer, jiffies + 1);
4159
4160 out:
4161         tg3_full_unlock(tp);
4162 }
4163
4164 static void tg3_dump_short_state(struct tg3 *tp)
4165 {
4166         printk(KERN_ERR PFX "DEBUG: MAC_TX_STATUS[%08x] MAC_RX_STATUS[%08x]\n",
4167                tr32(MAC_TX_STATUS), tr32(MAC_RX_STATUS));
4168         printk(KERN_ERR PFX "DEBUG: RDMAC_STATUS[%08x] WDMAC_STATUS[%08x]\n",
4169                tr32(RDMAC_STATUS), tr32(WDMAC_STATUS));
4170 }
4171
4172 static void tg3_tx_timeout(struct net_device *dev)
4173 {
4174         struct tg3 *tp = netdev_priv(dev);
4175
4176         if (netif_msg_tx_err(tp)) {
4177                 printk(KERN_ERR PFX "%s: transmit timed out, resetting\n",
4178                        dev->name);
4179                 tg3_dump_short_state(tp);
4180         }
4181
4182         schedule_work(&tp->reset_task);
4183 }
4184
4185 /* Test for DMA buffers crossing any 4GB boundaries: 4G, 8G, etc */
4186 static inline int tg3_4g_overflow_test(dma_addr_t mapping, int len)
4187 {
4188         u32 base = (u32) mapping & 0xffffffff;
4189
4190         return ((base > 0xffffdcc0) &&
4191                 (base + len + 8 < base));
4192 }
4193
4194 /* Test for DMA addresses > 40-bit */
4195 static inline int tg3_40bit_overflow_test(struct tg3 *tp, dma_addr_t mapping,
4196                                           int len)
4197 {
4198 #if defined(CONFIG_HIGHMEM) && (BITS_PER_LONG == 64)
4199         if (tp->tg3_flags & TG3_FLAG_40BIT_DMA_BUG)
4200                 return (((u64) mapping + len) > DMA_40BIT_MASK);
4201         return 0;
4202 #else
4203         return 0;
4204 #endif
4205 }
4206
4207 static void tg3_set_txd(struct tg3 *, int, dma_addr_t, int, u32, u32);
4208
4209 /* Workaround 4GB and 40-bit hardware DMA bugs. */
4210 static int tigon3_dma_hwbug_workaround(struct tg3 *tp, struct sk_buff *skb,
4211                                        u32 last_plus_one, u32 *start,
4212                                        u32 base_flags, u32 mss)
4213 {
4214         struct sk_buff *new_skb;
4215         dma_addr_t new_addr = 0;
4216         u32 entry = *start;
4217         int i, ret = 0;
4218
4219         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701)
4220                 new_skb = skb_copy(skb, GFP_ATOMIC);
4221         else {
4222                 int more_headroom = 4 - ((unsigned long)skb->data & 3);
4223
4224                 new_skb = skb_copy_expand(skb,
4225                                           skb_headroom(skb) + more_headroom,
4226                                           skb_tailroom(skb), GFP_ATOMIC);
4227         }
4228
4229         if (!new_skb) {
4230                 ret = -1;
4231         } else {
4232                 /* New SKB is guaranteed to be linear. */
4233                 entry = *start;
4234                 new_addr = pci_map_single(tp->pdev, new_skb->data, new_skb->len,
4235                                           PCI_DMA_TODEVICE);
4236                 /* Make sure new skb does not cross any 4G boundaries.
4237                  * Drop the packet if it does.
4238                  */
4239                 if (tg3_4g_overflow_test(new_addr, new_skb->len)) {
4240                         ret = -1;
4241                         dev_kfree_skb(new_skb);
4242                         new_skb = NULL;
4243                 } else {
4244                         tg3_set_txd(tp, entry, new_addr, new_skb->len,
4245                                     base_flags, 1 | (mss << 1));
4246                         *start = NEXT_TX(entry);
4247                 }
4248         }
4249
4250         /* Now clean up the sw ring entries. */
4251         i = 0;
4252         while (entry != last_plus_one) {
4253                 int len;
4254
4255                 if (i == 0)
4256                         len = skb_headlen(skb);
4257                 else
4258                         len = skb_shinfo(skb)->frags[i-1].size;
4259                 pci_unmap_single(tp->pdev,
4260                                  pci_unmap_addr(&tp->tx_buffers[entry], mapping),
4261                                  len, PCI_DMA_TODEVICE);
4262                 if (i == 0) {
4263                         tp->tx_buffers[entry].skb = new_skb;
4264                         pci_unmap_addr_set(&tp->tx_buffers[entry], mapping, new_addr);
4265                 } else {
4266                         tp->tx_buffers[entry].skb = NULL;
4267                 }
4268                 entry = NEXT_TX(entry);
4269                 i++;
4270         }
4271
4272         dev_kfree_skb(skb);
4273
4274         return ret;
4275 }
4276
4277 static void tg3_set_txd(struct tg3 *tp, int entry,
4278                         dma_addr_t mapping, int len, u32 flags,
4279                         u32 mss_and_is_end)
4280 {
4281         struct tg3_tx_buffer_desc *txd = &tp->tx_ring[entry];
4282         int is_end = (mss_and_is_end & 0x1);
4283         u32 mss = (mss_and_is_end >> 1);
4284         u32 vlan_tag = 0;
4285
4286         if (is_end)
4287                 flags |= TXD_FLAG_END;
4288         if (flags & TXD_FLAG_VLAN) {
4289                 vlan_tag = flags >> 16;
4290                 flags &= 0xffff;
4291         }
4292         vlan_tag |= (mss << TXD_MSS_SHIFT);
4293
4294         txd->addr_hi = ((u64) mapping >> 32);
4295         txd->addr_lo = ((u64) mapping & 0xffffffff);
4296         txd->len_flags = (len << TXD_LEN_SHIFT) | flags;
4297         txd->vlan_tag = vlan_tag << TXD_VLAN_TAG_SHIFT;
4298 }
4299
4300 /* hard_start_xmit for devices that don't have any bugs and
4301  * support TG3_FLG2_HW_TSO_2 only.
4302  */
4303 static int tg3_start_xmit(struct sk_buff *skb, struct net_device *dev)
4304 {
4305         struct tg3 *tp = netdev_priv(dev);
4306         dma_addr_t mapping;
4307         u32 len, entry, base_flags, mss;
4308
4309         len = skb_headlen(skb);
4310
4311         /* We are running in BH disabled context with netif_tx_lock
4312          * and TX reclaim runs via tp->napi.poll inside of a software
4313          * interrupt.  Furthermore, IRQ processing runs lockless so we have
4314          * no IRQ context deadlocks to worry about either.  Rejoice!
4315          */
4316         if (unlikely(tg3_tx_avail(tp) <= (skb_shinfo(skb)->nr_frags + 1))) {
4317                 if (!netif_queue_stopped(dev)) {
4318                         netif_stop_queue(dev);
4319
4320                         /* This is a hard error, log it. */
4321                         printk(KERN_ERR PFX "%s: BUG! Tx Ring full when "
4322                                "queue awake!\n", dev->name);
4323                 }
4324                 return NETDEV_TX_BUSY;
4325         }
4326
4327         entry = tp->tx_prod;
4328         base_flags = 0;
4329         mss = 0;
4330         if ((mss = skb_shinfo(skb)->gso_size) != 0) {
4331                 int tcp_opt_len, ip_tcp_len;
4332
4333                 if (skb_header_cloned(skb) &&
4334                     pskb_expand_head(skb, 0, 0, GFP_ATOMIC)) {
4335                         dev_kfree_skb(skb);
4336                         goto out_unlock;
4337                 }
4338
4339                 if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6)
4340                         mss |= (skb_headlen(skb) - ETH_HLEN) << 9;
4341                 else {
4342                         struct iphdr *iph = ip_hdr(skb);
4343
4344                         tcp_opt_len = tcp_optlen(skb);
4345                         ip_tcp_len = ip_hdrlen(skb) + sizeof(struct tcphdr);
4346
4347                         iph->check = 0;
4348                         iph->tot_len = htons(mss + ip_tcp_len + tcp_opt_len);
4349                         mss |= (ip_tcp_len + tcp_opt_len) << 9;
4350                 }
4351
4352                 base_flags |= (TXD_FLAG_CPU_PRE_DMA |
4353                                TXD_FLAG_CPU_POST_DMA);
4354
4355                 tcp_hdr(skb)->check = 0;
4356
4357         }
4358         else if (skb->ip_summed == CHECKSUM_PARTIAL)
4359                 base_flags |= TXD_FLAG_TCPUDP_CSUM;
4360 #if TG3_VLAN_TAG_USED
4361         if (tp->vlgrp != NULL && vlan_tx_tag_present(skb))
4362                 base_flags |= (TXD_FLAG_VLAN |
4363                                (vlan_tx_tag_get(skb) << 16));
4364 #endif
4365
4366         /* Queue skb data, a.k.a. the main skb fragment. */
4367         mapping = pci_map_single(tp->pdev, skb->data, len, PCI_DMA_TODEVICE);
4368
4369         tp->tx_buffers[entry].skb = skb;
4370         pci_unmap_addr_set(&tp->tx_buffers[entry], mapping, mapping);
4371
4372         tg3_set_txd(tp, entry, mapping, len, base_flags,
4373                     (skb_shinfo(skb)->nr_frags == 0) | (mss << 1));
4374
4375         entry = NEXT_TX(entry);
4376
4377         /* Now loop through additional data fragments, and queue them. */
4378         if (skb_shinfo(skb)->nr_frags > 0) {
4379                 unsigned int i, last;
4380
4381                 last = skb_shinfo(skb)->nr_frags - 1;
4382                 for (i = 0; i <= last; i++) {
4383                         skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
4384
4385                         len = frag->size;
4386                         mapping = pci_map_page(tp->pdev,
4387                                                frag->page,
4388                                                frag->page_offset,
4389                                                len, PCI_DMA_TODEVICE);
4390
4391                         tp->tx_buffers[entry].skb = NULL;
4392                         pci_unmap_addr_set(&tp->tx_buffers[entry], mapping, mapping);
4393
4394                         tg3_set_txd(tp, entry, mapping, len,
4395                                     base_flags, (i == last) | (mss << 1));
4396
4397                         entry = NEXT_TX(entry);
4398                 }
4399         }
4400
4401         /* Packets are ready, update Tx producer idx local and on card. */
4402         tw32_tx_mbox((MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW), entry);
4403
4404         tp->tx_prod = entry;
4405         if (unlikely(tg3_tx_avail(tp) <= (MAX_SKB_FRAGS + 1))) {
4406                 netif_stop_queue(dev);
4407                 if (tg3_tx_avail(tp) > TG3_TX_WAKEUP_THRESH(tp))
4408                         netif_wake_queue(tp->dev);
4409         }
4410
4411 out_unlock:
4412         mmiowb();
4413
4414         dev->trans_start = jiffies;
4415
4416         return NETDEV_TX_OK;
4417 }
4418
4419 static int tg3_start_xmit_dma_bug(struct sk_buff *, struct net_device *);
4420
4421 /* Use GSO to workaround a rare TSO bug that may be triggered when the
4422  * TSO header is greater than 80 bytes.
4423  */
4424 static int tg3_tso_bug(struct tg3 *tp, struct sk_buff *skb)
4425 {
4426         struct sk_buff *segs, *nskb;
4427
4428         /* Estimate the number of fragments in the worst case */
4429         if (unlikely(tg3_tx_avail(tp) <= (skb_shinfo(skb)->gso_segs * 3))) {
4430                 netif_stop_queue(tp->dev);
4431                 if (tg3_tx_avail(tp) <= (skb_shinfo(skb)->gso_segs * 3))
4432                         return NETDEV_TX_BUSY;
4433
4434                 netif_wake_queue(tp->dev);
4435         }
4436
4437         segs = skb_gso_segment(skb, tp->dev->features & ~NETIF_F_TSO);
4438         if (IS_ERR(segs))
4439                 goto tg3_tso_bug_end;
4440
4441         do {
4442                 nskb = segs;
4443                 segs = segs->next;
4444                 nskb->next = NULL;
4445                 tg3_start_xmit_dma_bug(nskb, tp->dev);
4446         } while (segs);
4447
4448 tg3_tso_bug_end:
4449         dev_kfree_skb(skb);
4450
4451         return NETDEV_TX_OK;
4452 }
4453
4454 /* hard_start_xmit for devices that have the 4G bug and/or 40-bit bug and
4455  * support TG3_FLG2_HW_TSO_1 or firmware TSO only.
4456  */
4457 static int tg3_start_xmit_dma_bug(struct sk_buff *skb, struct net_device *dev)
4458 {
4459         struct tg3 *tp = netdev_priv(dev);
4460         dma_addr_t mapping;
4461         u32 len, entry, base_flags, mss;
4462         int would_hit_hwbug;
4463
4464         len = skb_headlen(skb);
4465
4466         /* We are running in BH disabled context with netif_tx_lock
4467          * and TX reclaim runs via tp->napi.poll inside of a software
4468          * interrupt.  Furthermore, IRQ processing runs lockless so we have
4469          * no IRQ context deadlocks to worry about either.  Rejoice!
4470          */
4471         if (unlikely(tg3_tx_avail(tp) <= (skb_shinfo(skb)->nr_frags + 1))) {
4472                 if (!netif_queue_stopped(dev)) {
4473                         netif_stop_queue(dev);
4474
4475                         /* This is a hard error, log it. */
4476                         printk(KERN_ERR PFX "%s: BUG! Tx Ring full when "
4477                                "queue awake!\n", dev->name);
4478                 }
4479                 return NETDEV_TX_BUSY;
4480         }
4481
4482         entry = tp->tx_prod;
4483         base_flags = 0;
4484         if (skb->ip_summed == CHECKSUM_PARTIAL)
4485                 base_flags |= TXD_FLAG_TCPUDP_CSUM;
4486         mss = 0;
4487         if ((mss = skb_shinfo(skb)->gso_size) != 0) {
4488                 struct iphdr *iph;
4489                 int tcp_opt_len, ip_tcp_len, hdr_len;
4490
4491                 if (skb_header_cloned(skb) &&
4492                     pskb_expand_head(skb, 0, 0, GFP_ATOMIC)) {
4493                         dev_kfree_skb(skb);
4494                         goto out_unlock;
4495                 }
4496
4497                 tcp_opt_len = tcp_optlen(skb);
4498                 ip_tcp_len = ip_hdrlen(skb) + sizeof(struct tcphdr);
4499
4500                 hdr_len = ip_tcp_len + tcp_opt_len;
4501                 if (unlikely((ETH_HLEN + hdr_len) > 80) &&
4502                              (tp->tg3_flags2 & TG3_FLG2_TSO_BUG))
4503                         return (tg3_tso_bug(tp, skb));
4504
4505                 base_flags |= (TXD_FLAG_CPU_PRE_DMA |
4506                                TXD_FLAG_CPU_POST_DMA);
4507
4508                 iph = ip_hdr(skb);
4509                 iph->check = 0;
4510                 iph->tot_len = htons(mss + hdr_len);
4511                 if (tp->tg3_flags2 & TG3_FLG2_HW_TSO) {
4512                         tcp_hdr(skb)->check = 0;
4513                         base_flags &= ~TXD_FLAG_TCPUDP_CSUM;
4514                 } else
4515                         tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr,
4516                                                                  iph->daddr, 0,
4517                                                                  IPPROTO_TCP,
4518                                                                  0);
4519
4520                 if ((tp->tg3_flags2 & TG3_FLG2_HW_TSO) ||
4521                     (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705)) {
4522                         if (tcp_opt_len || iph->ihl > 5) {
4523                                 int tsflags;
4524
4525                                 tsflags = (iph->ihl - 5) + (tcp_opt_len >> 2);
4526                                 mss |= (tsflags << 11);
4527                         }
4528                 } else {
4529                         if (tcp_opt_len || iph->ihl > 5) {
4530                                 int tsflags;
4531
4532                                 tsflags = (iph->ihl - 5) + (tcp_opt_len >> 2);
4533                                 base_flags |= tsflags << 12;
4534                         }
4535                 }
4536         }
4537 #if TG3_VLAN_TAG_USED
4538         if (tp->vlgrp != NULL && vlan_tx_tag_present(skb))
4539                 base_flags |= (TXD_FLAG_VLAN |
4540                                (vlan_tx_tag_get(skb) << 16));
4541 #endif
4542
4543         /* Queue skb data, a.k.a. the main skb fragment. */
4544         mapping = pci_map_single(tp->pdev, skb->data, len, PCI_DMA_TODEVICE);
4545
4546         tp->tx_buffers[entry].skb = skb;
4547         pci_unmap_addr_set(&tp->tx_buffers[entry], mapping, mapping);
4548
4549         would_hit_hwbug = 0;
4550
4551         if (tp->tg3_flags3 & TG3_FLG3_5701_DMA_BUG)
4552                 would_hit_hwbug = 1;
4553         else if (tg3_4g_overflow_test(mapping, len))
4554                 would_hit_hwbug = 1;
4555
4556         tg3_set_txd(tp, entry, mapping, len, base_flags,
4557                     (skb_shinfo(skb)->nr_frags == 0) | (mss << 1));
4558
4559         entry = NEXT_TX(entry);
4560
4561         /* Now loop through additional data fragments, and queue them. */
4562         if (skb_shinfo(skb)->nr_frags > 0) {
4563                 unsigned int i, last;
4564
4565                 last = skb_shinfo(skb)->nr_frags - 1;
4566                 for (i = 0; i <= last; i++) {
4567                         skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
4568
4569                         len = frag->size;
4570                         mapping = pci_map_page(tp->pdev,
4571                                                frag->page,
4572                                                frag->page_offset,
4573                                                len, PCI_DMA_TODEVICE);
4574
4575                         tp->tx_buffers[entry].skb = NULL;
4576                         pci_unmap_addr_set(&tp->tx_buffers[entry], mapping, mapping);
4577
4578                         if (tg3_4g_overflow_test(mapping, len))
4579                                 would_hit_hwbug = 1;
4580
4581                         if (tg3_40bit_overflow_test(tp, mapping, len))
4582                                 would_hit_hwbug = 1;
4583
4584                         if (tp->tg3_flags2 & TG3_FLG2_HW_TSO)
4585                                 tg3_set_txd(tp, entry, mapping, len,
4586                                             base_flags, (i == last)|(mss << 1));
4587                         else
4588                                 tg3_set_txd(tp, entry, mapping, len,
4589                                             base_flags, (i == last));
4590
4591                         entry = NEXT_TX(entry);
4592                 }
4593         }
4594
4595         if (would_hit_hwbug) {
4596                 u32 last_plus_one = entry;
4597                 u32 start;
4598
4599                 start = entry - 1 - skb_shinfo(skb)->nr_frags;
4600                 start &= (TG3_TX_RING_SIZE - 1);
4601
4602                 /* If the workaround fails due to memory/mapping
4603                  * failure, silently drop this packet.
4604                  */
4605                 if (tigon3_dma_hwbug_workaround(tp, skb, last_plus_one,
4606                                                 &start, base_flags, mss))
4607                         goto out_unlock;
4608
4609                 entry = start;
4610         }
4611
4612         /* Packets are ready, update Tx producer idx local and on card. */
4613         tw32_tx_mbox((MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW), entry);
4614
4615         tp->tx_prod = entry;
4616         if (unlikely(tg3_tx_avail(tp) <= (MAX_SKB_FRAGS + 1))) {
4617                 netif_stop_queue(dev);
4618                 if (tg3_tx_avail(tp) > TG3_TX_WAKEUP_THRESH(tp))
4619                         netif_wake_queue(tp->dev);
4620         }
4621
4622 out_unlock:
4623         mmiowb();
4624
4625         dev->trans_start = jiffies;
4626
4627         return NETDEV_TX_OK;
4628 }
4629
4630 static inline void tg3_set_mtu(struct net_device *dev, struct tg3 *tp,
4631                                int new_mtu)
4632 {
4633         dev->mtu = new_mtu;
4634
4635         if (new_mtu > ETH_DATA_LEN) {
4636                 if (tp->tg3_flags2 & TG3_FLG2_5780_CLASS) {
4637                         tp->tg3_flags2 &= ~TG3_FLG2_TSO_CAPABLE;
4638                         ethtool_op_set_tso(dev, 0);
4639                 }
4640                 else
4641                         tp->tg3_flags |= TG3_FLAG_JUMBO_RING_ENABLE;
4642         } else {
4643                 if (tp->tg3_flags2 & TG3_FLG2_5780_CLASS)
4644                         tp->tg3_flags2 |= TG3_FLG2_TSO_CAPABLE;
4645                 tp->tg3_flags &= ~TG3_FLAG_JUMBO_RING_ENABLE;
4646         }
4647 }
4648
4649 static int tg3_change_mtu(struct net_device *dev, int new_mtu)
4650 {
4651         struct tg3 *tp = netdev_priv(dev);
4652         int err;
4653
4654         if (new_mtu < TG3_MIN_MTU || new_mtu > TG3_MAX_MTU(tp))
4655                 return -EINVAL;
4656
4657         if (!netif_running(dev)) {
4658                 /* We'll just catch it later when the
4659                  * device is up'd.
4660                  */
4661                 tg3_set_mtu(dev, tp, new_mtu);
4662                 return 0;
4663         }
4664
4665         tg3_netif_stop(tp);
4666
4667         tg3_full_lock(tp, 1);
4668
4669         tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
4670
4671         tg3_set_mtu(dev, tp, new_mtu);
4672
4673         err = tg3_restart_hw(tp, 0);
4674
4675         if (!err)
4676                 tg3_netif_start(tp);
4677
4678         tg3_full_unlock(tp);
4679
4680         return err;
4681 }
4682
4683 /* Free up pending packets in all rx/tx rings.
4684  *
4685  * The chip has been shut down and the driver detached from
4686  * the networking, so no interrupts or new tx packets will
4687  * end up in the driver.  tp->{tx,}lock is not held and we are not
4688  * in an interrupt context and thus may sleep.
4689  */
4690 static void tg3_free_rings(struct tg3 *tp)
4691 {
4692         struct ring_info *rxp;
4693         int i;
4694
4695         for (i = 0; i < TG3_RX_RING_SIZE; i++) {
4696                 rxp = &tp->rx_std_buffers[i];
4697
4698                 if (rxp->skb == NULL)
4699                         continue;
4700                 pci_unmap_single(tp->pdev,
4701                                  pci_unmap_addr(rxp, mapping),
4702                                  tp->rx_pkt_buf_sz - tp->rx_offset,
4703                                  PCI_DMA_FROMDEVICE);
4704                 dev_kfree_skb_any(rxp->skb);
4705                 rxp->skb = NULL;
4706         }
4707
4708         for (i = 0; i < TG3_RX_JUMBO_RING_SIZE; i++) {
4709                 rxp = &tp->rx_jumbo_buffers[i];
4710
4711                 if (rxp->skb == NULL)
4712                         continue;
4713                 pci_unmap_single(tp->pdev,
4714                                  pci_unmap_addr(rxp, mapping),
4715                                  RX_JUMBO_PKT_BUF_SZ - tp->rx_offset,
4716                                  PCI_DMA_FROMDEVICE);
4717                 dev_kfree_skb_any(rxp->skb);
4718                 rxp->skb = NULL;
4719         }
4720
4721         for (i = 0; i < TG3_TX_RING_SIZE; ) {
4722                 struct tx_ring_info *txp;
4723                 struct sk_buff *skb;
4724                 int j;
4725
4726                 txp = &tp->tx_buffers[i];
4727                 skb = txp->skb;
4728
4729                 if (skb == NULL) {
4730                         i++;
4731                         continue;
4732                 }
4733
4734                 pci_unmap_single(tp->pdev,
4735                                  pci_unmap_addr(txp, mapping),
4736                                  skb_headlen(skb),
4737                                  PCI_DMA_TODEVICE);
4738                 txp->skb = NULL;
4739
4740                 i++;
4741
4742                 for (j = 0; j < skb_shinfo(skb)->nr_frags; j++) {
4743                         txp = &tp->tx_buffers[i & (TG3_TX_RING_SIZE - 1)];
4744                         pci_unmap_page(tp->pdev,
4745                                        pci_unmap_addr(txp, mapping),
4746                                        skb_shinfo(skb)->frags[j].size,
4747                                        PCI_DMA_TODEVICE);
4748                         i++;
4749                 }
4750
4751                 dev_kfree_skb_any(skb);
4752         }
4753 }
4754
4755 /* Initialize tx/rx rings for packet processing.
4756  *
4757  * The chip has been shut down and the driver detached from
4758  * the networking, so no interrupts or new tx packets will
4759  * end up in the driver.  tp->{tx,}lock are held and thus
4760  * we may not sleep.
4761  */
4762 static int tg3_init_rings(struct tg3 *tp)
4763 {
4764         u32 i;
4765
4766         /* Free up all the SKBs. */
4767         tg3_free_rings(tp);
4768
4769         /* Zero out all descriptors. */
4770         memset(tp->rx_std, 0, TG3_RX_RING_BYTES);
4771         memset(tp->rx_jumbo, 0, TG3_RX_JUMBO_RING_BYTES);
4772         memset(tp->rx_rcb, 0, TG3_RX_RCB_RING_BYTES(tp));
4773         memset(tp->tx_ring, 0, TG3_TX_RING_BYTES);
4774
4775         tp->rx_pkt_buf_sz = RX_PKT_BUF_SZ;
4776         if ((tp->tg3_flags2 & TG3_FLG2_5780_CLASS) &&
4777             (tp->dev->mtu > ETH_DATA_LEN))
4778                 tp->rx_pkt_buf_sz = RX_JUMBO_PKT_BUF_SZ;
4779
4780         /* Initialize invariants of the rings, we only set this
4781          * stuff once.  This works because the card does not
4782          * write into the rx buffer posting rings.
4783          */
4784         for (i = 0; i < TG3_RX_RING_SIZE; i++) {
4785                 struct tg3_rx_buffer_desc *rxd;
4786
4787                 rxd = &tp->rx_std[i];
4788                 rxd->idx_len = (tp->rx_pkt_buf_sz - tp->rx_offset - 64)
4789                         << RXD_LEN_SHIFT;
4790                 rxd->type_flags = (RXD_FLAG_END << RXD_FLAGS_SHIFT);
4791                 rxd->opaque = (RXD_OPAQUE_RING_STD |
4792                                (i << RXD_OPAQUE_INDEX_SHIFT));
4793         }
4794
4795         if (tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE) {
4796                 for (i = 0; i < TG3_RX_JUMBO_RING_SIZE; i++) {
4797                         struct tg3_rx_buffer_desc *rxd;
4798
4799                         rxd = &tp->rx_jumbo[i];
4800                         rxd->idx_len = (RX_JUMBO_PKT_BUF_SZ - tp->rx_offset - 64)
4801                                 << RXD_LEN_SHIFT;
4802                         rxd->type_flags = (RXD_FLAG_END << RXD_FLAGS_SHIFT) |
4803                                 RXD_FLAG_JUMBO;
4804                         rxd->opaque = (RXD_OPAQUE_RING_JUMBO |
4805                                (i << RXD_OPAQUE_INDEX_SHIFT));
4806                 }
4807         }
4808
4809         /* Now allocate fresh SKBs for each rx ring. */
4810         for (i = 0; i < tp->rx_pending; i++) {
4811                 if (tg3_alloc_rx_skb(tp, RXD_OPAQUE_RING_STD, -1, i) < 0) {
4812                         printk(KERN_WARNING PFX
4813                                "%s: Using a smaller RX standard ring, "
4814                                "only %d out of %d buffers were allocated "
4815                                "successfully.\n",
4816                                tp->dev->name, i, tp->rx_pending);
4817                         if (i == 0)
4818                                 return -ENOMEM;
4819                         tp->rx_pending = i;
4820                         break;
4821                 }
4822         }
4823
4824         if (tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE) {
4825                 for (i = 0; i < tp->rx_jumbo_pending; i++) {
4826                         if (tg3_alloc_rx_skb(tp, RXD_OPAQUE_RING_JUMBO,
4827                                              -1, i) < 0) {
4828                                 printk(KERN_WARNING PFX
4829                                        "%s: Using a smaller RX jumbo ring, "
4830                                        "only %d out of %d buffers were "
4831                                        "allocated successfully.\n",
4832                                        tp->dev->name, i, tp->rx_jumbo_pending);
4833                                 if (i == 0) {
4834                                         tg3_free_rings(tp);
4835                                         return -ENOMEM;
4836                                 }
4837                                 tp->rx_jumbo_pending = i;
4838                                 break;
4839                         }
4840                 }
4841         }
4842         return 0;
4843 }
4844
4845 /*
4846  * Must not be invoked with interrupt sources disabled and
4847  * the hardware shutdown down.
4848  */
4849 static void tg3_free_consistent(struct tg3 *tp)
4850 {
4851         kfree(tp->rx_std_buffers);
4852         tp->rx_std_buffers = NULL;
4853         if (tp->rx_std) {
4854                 pci_free_consistent(tp->pdev, TG3_RX_RING_BYTES,
4855                                     tp->rx_std, tp->rx_std_mapping);
4856                 tp->rx_std = NULL;
4857         }
4858         if (tp->rx_jumbo) {
4859                 pci_free_consistent(tp->pdev, TG3_RX_JUMBO_RING_BYTES,
4860                                     tp->rx_jumbo, tp->rx_jumbo_mapping);
4861                 tp->rx_jumbo = NULL;
4862         }
4863         if (tp->rx_rcb) {
4864                 pci_free_consistent(tp->pdev, TG3_RX_RCB_RING_BYTES(tp),
4865                                     tp->rx_rcb, tp->rx_rcb_mapping);
4866                 tp->rx_rcb = NULL;
4867         }
4868         if (tp->tx_ring) {
4869                 pci_free_consistent(tp->pdev, TG3_TX_RING_BYTES,
4870                         tp->tx_ring, tp->tx_desc_mapping);
4871                 tp->tx_ring = NULL;
4872         }
4873         if (tp->hw_status) {
4874                 pci_free_consistent(tp->pdev, TG3_HW_STATUS_SIZE,
4875                                     tp->hw_status, tp->status_mapping);
4876                 tp->hw_status = NULL;
4877         }
4878         if (tp->hw_stats) {
4879                 pci_free_consistent(tp->pdev, sizeof(struct tg3_hw_stats),
4880                                     tp->hw_stats, tp->stats_mapping);
4881                 tp->hw_stats = NULL;
4882         }
4883 }
4884
4885 /*
4886  * Must not be invoked with interrupt sources disabled and
4887  * the hardware shutdown down.  Can sleep.
4888  */
4889 static int tg3_alloc_consistent(struct tg3 *tp)
4890 {
4891         tp->rx_std_buffers = kzalloc((sizeof(struct ring_info) *
4892                                       (TG3_RX_RING_SIZE +
4893                                        TG3_RX_JUMBO_RING_SIZE)) +
4894                                      (sizeof(struct tx_ring_info) *
4895                                       TG3_TX_RING_SIZE),
4896                                      GFP_KERNEL);
4897         if (!tp->rx_std_buffers)
4898                 return -ENOMEM;
4899
4900         tp->rx_jumbo_buffers = &tp->rx_std_buffers[TG3_RX_RING_SIZE];
4901         tp->tx_buffers = (struct tx_ring_info *)
4902                 &tp->rx_jumbo_buffers[TG3_RX_JUMBO_RING_SIZE];
4903
4904         tp->rx_std = pci_alloc_consistent(tp->pdev, TG3_RX_RING_BYTES,
4905                                           &tp->rx_std_mapping);
4906         if (!tp->rx_std)
4907                 goto err_out;
4908
4909         tp->rx_jumbo = pci_alloc_consistent(tp->pdev, TG3_RX_JUMBO_RING_BYTES,
4910                                             &tp->rx_jumbo_mapping);
4911
4912         if (!tp->rx_jumbo)
4913                 goto err_out;
4914
4915         tp->rx_rcb = pci_alloc_consistent(tp->pdev, TG3_RX_RCB_RING_BYTES(tp),
4916                                           &tp->rx_rcb_mapping);
4917         if (!tp->rx_rcb)
4918                 goto err_out;
4919
4920         tp->tx_ring = pci_alloc_consistent(tp->pdev, TG3_TX_RING_BYTES,
4921                                            &tp->tx_desc_mapping);
4922         if (!tp->tx_ring)
4923                 goto err_out;
4924
4925         tp->hw_status = pci_alloc_consistent(tp->pdev,
4926                                              TG3_HW_STATUS_SIZE,
4927                                              &tp->status_mapping);
4928         if (!tp->hw_status)
4929                 goto err_out;
4930
4931         tp->hw_stats = pci_alloc_consistent(tp->pdev,
4932                                             sizeof(struct tg3_hw_stats),
4933                                             &tp->stats_mapping);
4934         if (!tp->hw_stats)
4935                 goto err_out;
4936
4937         memset(tp->hw_status, 0, TG3_HW_STATUS_SIZE);
4938         memset(tp->hw_stats, 0, sizeof(struct tg3_hw_stats));
4939
4940         return 0;
4941
4942 err_out:
4943         tg3_free_consistent(tp);
4944         return -ENOMEM;
4945 }
4946
4947 #define MAX_WAIT_CNT 1000
4948
4949 /* To stop a block, clear the enable bit and poll till it
4950  * clears.  tp->lock is held.
4951  */
4952 static int tg3_stop_block(struct tg3 *tp, unsigned long ofs, u32 enable_bit, int silent)
4953 {
4954         unsigned int i;
4955         u32 val;
4956
4957         if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
4958                 switch (ofs) {
4959                 case RCVLSC_MODE:
4960                 case DMAC_MODE:
4961                 case MBFREE_MODE:
4962                 case BUFMGR_MODE:
4963                 case MEMARB_MODE:
4964                         /* We can't enable/disable these bits of the
4965                          * 5705/5750, just say success.
4966                          */
4967                         return 0;
4968
4969                 default:
4970                         break;
4971                 };
4972         }
4973
4974         val = tr32(ofs);
4975         val &= ~enable_bit;
4976         tw32_f(ofs, val);
4977
4978         for (i = 0; i < MAX_WAIT_CNT; i++) {
4979                 udelay(100);
4980                 val = tr32(ofs);
4981                 if ((val & enable_bit) == 0)
4982                         break;
4983         }
4984
4985         if (i == MAX_WAIT_CNT && !silent) {
4986                 printk(KERN_ERR PFX "tg3_stop_block timed out, "
4987                        "ofs=%lx enable_bit=%x\n",
4988                        ofs, enable_bit);
4989                 return -ENODEV;
4990         }
4991
4992         return 0;
4993 }
4994
4995 /* tp->lock is held. */
4996 static int tg3_abort_hw(struct tg3 *tp, int silent)
4997 {
4998         int i, err;
4999
5000         tg3_disable_ints(tp);
5001
5002         tp->rx_mode &= ~RX_MODE_ENABLE;
5003         tw32_f(MAC_RX_MODE, tp->rx_mode);
5004         udelay(10);
5005
5006         err  = tg3_stop_block(tp, RCVBDI_MODE, RCVBDI_MODE_ENABLE, silent);
5007         err |= tg3_stop_block(tp, RCVLPC_MODE, RCVLPC_MODE_ENABLE, silent);
5008         err |= tg3_stop_block(tp, RCVLSC_MODE, RCVLSC_MODE_ENABLE, silent);
5009         err |= tg3_stop_block(tp, RCVDBDI_MODE, RCVDBDI_MODE_ENABLE, silent);
5010         err |= tg3_stop_block(tp, RCVDCC_MODE, RCVDCC_MODE_ENABLE, silent);
5011         err |= tg3_stop_block(tp, RCVCC_MODE, RCVCC_MODE_ENABLE, silent);
5012
5013         err |= tg3_stop_block(tp, SNDBDS_MODE, SNDBDS_MODE_ENABLE, silent);
5014         err |= tg3_stop_block(tp, SNDBDI_MODE, SNDBDI_MODE_ENABLE, silent);
5015         err |= tg3_stop_block(tp, SNDDATAI_MODE, SNDDATAI_MODE_ENABLE, silent);
5016         err |= tg3_stop_block(tp, RDMAC_MODE, RDMAC_MODE_ENABLE, silent);
5017         err |= tg3_stop_block(tp, SNDDATAC_MODE, SNDDATAC_MODE_ENABLE, silent);
5018         err |= tg3_stop_block(tp, DMAC_MODE, DMAC_MODE_ENABLE, silent);
5019         err |= tg3_stop_block(tp, SNDBDC_MODE, SNDBDC_MODE_ENABLE, silent);
5020
5021         tp->mac_mode &= ~MAC_MODE_TDE_ENABLE;
5022         tw32_f(MAC_MODE, tp->mac_mode);
5023         udelay(40);
5024
5025         tp->tx_mode &= ~TX_MODE_ENABLE;
5026         tw32_f(MAC_TX_MODE, tp->tx_mode);
5027
5028         for (i = 0; i < MAX_WAIT_CNT; i++) {
5029                 udelay(100);
5030                 if (!(tr32(MAC_TX_MODE) & TX_MODE_ENABLE))
5031                         break;
5032         }
5033         if (i >= MAX_WAIT_CNT) {
5034                 printk(KERN_ERR PFX "tg3_abort_hw timed out for %s, "
5035                        "TX_MODE_ENABLE will not clear MAC_TX_MODE=%08x\n",
5036                        tp->dev->name, tr32(MAC_TX_MODE));
5037                 err |= -ENODEV;
5038         }
5039
5040         err |= tg3_stop_block(tp, HOSTCC_MODE, HOSTCC_MODE_ENABLE, silent);
5041         err |= tg3_stop_block(tp, WDMAC_MODE, WDMAC_MODE_ENABLE, silent);
5042         err |= tg3_stop_block(tp, MBFREE_MODE, MBFREE_MODE_ENABLE, silent);
5043
5044         tw32(FTQ_RESET, 0xffffffff);
5045         tw32(FTQ_RESET, 0x00000000);
5046
5047         err |= tg3_stop_block(tp, BUFMGR_MODE, BUFMGR_MODE_ENABLE, silent);
5048         err |= tg3_stop_block(tp, MEMARB_MODE, MEMARB_MODE_ENABLE, silent);
5049
5050         if (tp->hw_status)
5051                 memset(tp->hw_status, 0, TG3_HW_STATUS_SIZE);
5052         if (tp->hw_stats)
5053                 memset(tp->hw_stats, 0, sizeof(struct tg3_hw_stats));
5054
5055         return err;
5056 }
5057
5058 /* tp->lock is held. */
5059 static int tg3_nvram_lock(struct tg3 *tp)
5060 {
5061         if (tp->tg3_flags & TG3_FLAG_NVRAM) {
5062                 int i;
5063
5064                 if (tp->nvram_lock_cnt == 0) {
5065                         tw32(NVRAM_SWARB, SWARB_REQ_SET1);
5066                         for (i = 0; i < 8000; i++) {
5067                                 if (tr32(NVRAM_SWARB) & SWARB_GNT1)
5068                                         break;
5069                                 udelay(20);
5070                         }
5071                         if (i == 8000) {
5072                                 tw32(NVRAM_SWARB, SWARB_REQ_CLR1);
5073                                 return -ENODEV;
5074                         }
5075                 }
5076                 tp->nvram_lock_cnt++;
5077         }
5078         return 0;
5079 }
5080
5081 /* tp->lock is held. */
5082 static void tg3_nvram_unlock(struct tg3 *tp)
5083 {
5084         if (tp->tg3_flags & TG3_FLAG_NVRAM) {
5085                 if (tp->nvram_lock_cnt > 0)
5086                         tp->nvram_lock_cnt--;
5087                 if (tp->nvram_lock_cnt == 0)
5088                         tw32_f(NVRAM_SWARB, SWARB_REQ_CLR1);
5089         }
5090 }
5091
5092 /* tp->lock is held. */
5093 static void tg3_enable_nvram_access(struct tg3 *tp)
5094 {
5095         if ((tp->tg3_flags2 & TG3_FLG2_5750_PLUS) &&
5096             !(tp->tg3_flags2 & TG3_FLG2_PROTECTED_NVRAM)) {
5097                 u32 nvaccess = tr32(NVRAM_ACCESS);
5098
5099                 tw32(NVRAM_ACCESS, nvaccess | ACCESS_ENABLE);
5100         }
5101 }
5102
5103 /* tp->lock is held. */
5104 static void tg3_disable_nvram_access(struct tg3 *tp)
5105 {
5106         if ((tp->tg3_flags2 & TG3_FLG2_5750_PLUS) &&
5107             !(tp->tg3_flags2 & TG3_FLG2_PROTECTED_NVRAM)) {
5108                 u32 nvaccess = tr32(NVRAM_ACCESS);
5109
5110                 tw32(NVRAM_ACCESS, nvaccess & ~ACCESS_ENABLE);
5111         }
5112 }
5113
5114 static void tg3_ape_send_event(struct tg3 *tp, u32 event)
5115 {
5116         int i;
5117         u32 apedata;
5118
5119         apedata = tg3_ape_read32(tp, TG3_APE_SEG_SIG);
5120         if (apedata != APE_SEG_SIG_MAGIC)
5121                 return;
5122
5123         apedata = tg3_ape_read32(tp, TG3_APE_FW_STATUS);
5124         if (apedata != APE_FW_STATUS_READY)
5125                 return;
5126
5127         /* Wait for up to 1 millisecond for APE to service previous event. */
5128         for (i = 0; i < 10; i++) {
5129                 if (tg3_ape_lock(tp, TG3_APE_LOCK_MEM))
5130                         return;
5131
5132                 apedata = tg3_ape_read32(tp, TG3_APE_EVENT_STATUS);
5133
5134                 if (!(apedata & APE_EVENT_STATUS_EVENT_PENDING))
5135                         tg3_ape_write32(tp, TG3_APE_EVENT_STATUS,
5136                                         event | APE_EVENT_STATUS_EVENT_PENDING);
5137
5138                 tg3_ape_unlock(tp, TG3_APE_LOCK_MEM);
5139
5140                 if (!(apedata & APE_EVENT_STATUS_EVENT_PENDING))
5141                         break;
5142
5143                 udelay(100);
5144         }
5145
5146         if (!(apedata & APE_EVENT_STATUS_EVENT_PENDING))
5147                 tg3_ape_write32(tp, TG3_APE_EVENT, APE_EVENT_1);
5148 }
5149
5150 static void tg3_ape_driver_state_change(struct tg3 *tp, int kind)
5151 {
5152         u32 event;
5153         u32 apedata;
5154
5155         if (!(tp->tg3_flags3 & TG3_FLG3_ENABLE_APE))
5156                 return;
5157
5158         switch (kind) {
5159                 case RESET_KIND_INIT:
5160                         tg3_ape_write32(tp, TG3_APE_HOST_SEG_SIG,
5161                                         APE_HOST_SEG_SIG_MAGIC);
5162                         tg3_ape_write32(tp, TG3_APE_HOST_SEG_LEN,
5163                                         APE_HOST_SEG_LEN_MAGIC);
5164                         apedata = tg3_ape_read32(tp, TG3_APE_HOST_INIT_COUNT);
5165                         tg3_ape_write32(tp, TG3_APE_HOST_INIT_COUNT, ++apedata);
5166                         tg3_ape_write32(tp, TG3_APE_HOST_DRIVER_ID,
5167                                         APE_HOST_DRIVER_ID_MAGIC);
5168                         tg3_ape_write32(tp, TG3_APE_HOST_BEHAVIOR,
5169                                         APE_HOST_BEHAV_NO_PHYLOCK);
5170
5171                         event = APE_EVENT_STATUS_STATE_START;
5172                         break;
5173                 case RESET_KIND_SHUTDOWN:
5174                         event = APE_EVENT_STATUS_STATE_UNLOAD;
5175                         break;
5176                 case RESET_KIND_SUSPEND:
5177                         event = APE_EVENT_STATUS_STATE_SUSPEND;
5178                         break;
5179                 default:
5180                         return;
5181         }
5182
5183         event |= APE_EVENT_STATUS_DRIVER_EVNT | APE_EVENT_STATUS_STATE_CHNGE;
5184
5185         tg3_ape_send_event(tp, event);
5186 }
5187
5188 /* tp->lock is held. */
5189 static void tg3_write_sig_pre_reset(struct tg3 *tp, int kind)
5190 {
5191         tg3_write_mem(tp, NIC_SRAM_FIRMWARE_MBOX,
5192                       NIC_SRAM_FIRMWARE_MBOX_MAGIC1);
5193
5194         if (tp->tg3_flags2 & TG3_FLG2_ASF_NEW_HANDSHAKE) {
5195                 switch (kind) {
5196                 case RESET_KIND_INIT:
5197                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
5198                                       DRV_STATE_START);
5199                         break;
5200
5201                 case RESET_KIND_SHUTDOWN:
5202                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
5203                                       DRV_STATE_UNLOAD);
5204                         break;
5205
5206                 case RESET_KIND_SUSPEND:
5207                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
5208                                       DRV_STATE_SUSPEND);
5209                         break;
5210
5211                 default:
5212                         break;
5213                 };
5214         }
5215
5216         if (kind == RESET_KIND_INIT ||
5217             kind == RESET_KIND_SUSPEND)
5218                 tg3_ape_driver_state_change(tp, kind);
5219 }
5220
5221 /* tp->lock is held. */
5222 static void tg3_write_sig_post_reset(struct tg3 *tp, int kind)
5223 {
5224         if (tp->tg3_flags2 & TG3_FLG2_ASF_NEW_HANDSHAKE) {
5225                 switch (kind) {
5226                 case RESET_KIND_INIT:
5227                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
5228                                       DRV_STATE_START_DONE);
5229                         break;
5230
5231                 case RESET_KIND_SHUTDOWN:
5232                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
5233                                       DRV_STATE_UNLOAD_DONE);
5234                         break;
5235
5236                 default:
5237                         break;
5238                 };
5239         }
5240
5241         if (kind == RESET_KIND_SHUTDOWN)
5242                 tg3_ape_driver_state_change(tp, kind);
5243 }
5244
5245 /* tp->lock is held. */
5246 static void tg3_write_sig_legacy(struct tg3 *tp, int kind)
5247 {
5248         if (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) {
5249                 switch (kind) {
5250                 case RESET_KIND_INIT:
5251                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
5252                                       DRV_STATE_START);
5253                         break;
5254
5255                 case RESET_KIND_SHUTDOWN:
5256                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
5257                                       DRV_STATE_UNLOAD);
5258                         break;
5259
5260                 case RESET_KIND_SUSPEND:
5261                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
5262                                       DRV_STATE_SUSPEND);
5263                         break;
5264
5265                 default:
5266                         break;
5267                 };
5268         }
5269 }
5270
5271 static int tg3_poll_fw(struct tg3 *tp)
5272 {
5273         int i;
5274         u32 val;
5275
5276         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
5277                 /* Wait up to 20ms for init done. */
5278                 for (i = 0; i < 200; i++) {
5279                         if (tr32(VCPU_STATUS) & VCPU_STATUS_INIT_DONE)
5280                                 return 0;
5281                         udelay(100);
5282                 }
5283                 return -ENODEV;
5284         }
5285
5286         /* Wait for firmware initialization to complete. */
5287         for (i = 0; i < 100000; i++) {
5288                 tg3_read_mem(tp, NIC_SRAM_FIRMWARE_MBOX, &val);
5289                 if (val == ~NIC_SRAM_FIRMWARE_MBOX_MAGIC1)
5290                         break;
5291                 udelay(10);
5292         }
5293
5294         /* Chip might not be fitted with firmware.  Some Sun onboard
5295          * parts are configured like that.  So don't signal the timeout
5296          * of the above loop as an error, but do report the lack of
5297          * running firmware once.
5298          */
5299         if (i >= 100000 &&
5300             !(tp->tg3_flags2 & TG3_FLG2_NO_FWARE_REPORTED)) {
5301                 tp->tg3_flags2 |= TG3_FLG2_NO_FWARE_REPORTED;
5302
5303                 printk(KERN_INFO PFX "%s: No firmware running.\n",
5304                        tp->dev->name);
5305         }
5306
5307         return 0;
5308 }
5309
5310 /* Save PCI command register before chip reset */
5311 static void tg3_save_pci_state(struct tg3 *tp)
5312 {
5313         pci_read_config_word(tp->pdev, PCI_COMMAND, &tp->pci_cmd);
5314 }
5315
5316 /* Restore PCI state after chip reset */
5317 static void tg3_restore_pci_state(struct tg3 *tp)
5318 {
5319         u32 val;
5320
5321         /* Re-enable indirect register accesses. */
5322         pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
5323                                tp->misc_host_ctrl);
5324
5325         /* Set MAX PCI retry to zero. */
5326         val = (PCISTATE_ROM_ENABLE | PCISTATE_ROM_RETRY_ENABLE);
5327         if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0 &&
5328             (tp->tg3_flags & TG3_FLAG_PCIX_MODE))
5329                 val |= PCISTATE_RETRY_SAME_DMA;
5330         /* Allow reads and writes to the APE register and memory space. */
5331         if (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE)
5332                 val |= PCISTATE_ALLOW_APE_CTLSPC_WR |
5333                        PCISTATE_ALLOW_APE_SHMEM_WR;
5334         pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE, val);
5335
5336         pci_write_config_word(tp->pdev, PCI_COMMAND, tp->pci_cmd);
5337
5338         if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS)
5339                 pcie_set_readrq(tp->pdev, 4096);
5340         else {
5341                 pci_write_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE,
5342                                       tp->pci_cacheline_sz);
5343                 pci_write_config_byte(tp->pdev, PCI_LATENCY_TIMER,
5344                                       tp->pci_lat_timer);
5345         }
5346
5347         /* Make sure PCI-X relaxed ordering bit is clear. */
5348         if (tp->pcix_cap) {
5349                 u16 pcix_cmd;
5350
5351                 pci_read_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
5352                                      &pcix_cmd);
5353                 pcix_cmd &= ~PCI_X_CMD_ERO;
5354                 pci_write_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
5355                                       pcix_cmd);
5356         }
5357
5358         if (tp->tg3_flags2 & TG3_FLG2_5780_CLASS) {
5359
5360                 /* Chip reset on 5780 will reset MSI enable bit,
5361                  * so need to restore it.
5362                  */
5363                 if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
5364                         u16 ctrl;
5365
5366                         pci_read_config_word(tp->pdev,
5367                                              tp->msi_cap + PCI_MSI_FLAGS,
5368                                              &ctrl);
5369                         pci_write_config_word(tp->pdev,
5370                                               tp->msi_cap + PCI_MSI_FLAGS,
5371                                               ctrl | PCI_MSI_FLAGS_ENABLE);
5372                         val = tr32(MSGINT_MODE);
5373                         tw32(MSGINT_MODE, val | MSGINT_MODE_ENABLE);
5374                 }
5375         }
5376 }
5377
5378 static void tg3_stop_fw(struct tg3 *);
5379
5380 /* tp->lock is held. */
5381 static int tg3_chip_reset(struct tg3 *tp)
5382 {
5383         u32 val;
5384         void (*write_op)(struct tg3 *, u32, u32);
5385         int err;
5386
5387         tg3_nvram_lock(tp);
5388
5389         /* No matching tg3_nvram_unlock() after this because
5390          * chip reset below will undo the nvram lock.
5391          */
5392         tp->nvram_lock_cnt = 0;
5393
5394         /* GRC_MISC_CFG core clock reset will clear the memory
5395          * enable bit in PCI register 4 and the MSI enable bit
5396          * on some chips, so we save relevant registers here.
5397          */
5398         tg3_save_pci_state(tp);
5399
5400         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752 ||
5401             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
5402             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
5403             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
5404             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
5405                 tw32(GRC_FASTBOOT_PC, 0);
5406
5407         /*
5408          * We must avoid the readl() that normally takes place.
5409          * It locks machines, causes machine checks, and other
5410          * fun things.  So, temporarily disable the 5701
5411          * hardware workaround, while we do the reset.
5412          */
5413         write_op = tp->write32;
5414         if (write_op == tg3_write_flush_reg32)
5415                 tp->write32 = tg3_write32;
5416
5417         /* Prevent the irq handler from reading or writing PCI registers
5418          * during chip reset when the memory enable bit in the PCI command
5419          * register may be cleared.  The chip does not generate interrupt
5420          * at this time, but the irq handler may still be called due to irq
5421          * sharing or irqpoll.
5422          */
5423         tp->tg3_flags |= TG3_FLAG_CHIP_RESETTING;
5424         if (tp->hw_status) {
5425                 tp->hw_status->status = 0;
5426                 tp->hw_status->status_tag = 0;
5427         }
5428         tp->last_tag = 0;
5429         smp_mb();
5430         synchronize_irq(tp->pdev->irq);
5431
5432         /* do the reset */
5433         val = GRC_MISC_CFG_CORECLK_RESET;
5434
5435         if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) {
5436                 if (tr32(0x7e2c) == 0x60) {
5437                         tw32(0x7e2c, 0x20);
5438                 }
5439                 if (tp->pci_chip_rev_id != CHIPREV_ID_5750_A0) {
5440                         tw32(GRC_MISC_CFG, (1 << 29));
5441                         val |= (1 << 29);
5442                 }
5443         }
5444
5445         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
5446                 tw32(VCPU_STATUS, tr32(VCPU_STATUS) | VCPU_STATUS_DRV_RESET);
5447                 tw32(GRC_VCPU_EXT_CTRL,
5448                      tr32(GRC_VCPU_EXT_CTRL) & ~GRC_VCPU_EXT_CTRL_HALT_CPU);
5449         }
5450
5451         if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS)
5452                 val |= GRC_MISC_CFG_KEEP_GPHY_POWER;
5453         tw32(GRC_MISC_CFG, val);
5454
5455         /* restore 5701 hardware bug workaround write method */
5456         tp->write32 = write_op;
5457
5458         /* Unfortunately, we have to delay before the PCI read back.
5459          * Some 575X chips even will not respond to a PCI cfg access
5460          * when the reset command is given to the chip.
5461          *
5462          * How do these hardware designers expect things to work
5463          * properly if the PCI write is posted for a long period
5464          * of time?  It is always necessary to have some method by
5465          * which a register read back can occur to push the write
5466          * out which does the reset.
5467          *
5468          * For most tg3 variants the trick below was working.
5469          * Ho hum...
5470          */
5471         udelay(120);
5472
5473         /* Flush PCI posted writes.  The normal MMIO registers
5474          * are inaccessible at this time so this is the only
5475          * way to make this reliably (actually, this is no longer
5476          * the case, see above).  I tried to use indirect
5477          * register read/write but this upset some 5701 variants.
5478          */
5479         pci_read_config_dword(tp->pdev, PCI_COMMAND, &val);
5480
5481         udelay(120);
5482
5483         if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) {
5484                 if (tp->pci_chip_rev_id == CHIPREV_ID_5750_A0) {
5485                         int i;
5486                         u32 cfg_val;
5487
5488                         /* Wait for link training to complete.  */
5489                         for (i = 0; i < 5000; i++)
5490                                 udelay(100);
5491
5492                         pci_read_config_dword(tp->pdev, 0xc4, &cfg_val);
5493                         pci_write_config_dword(tp->pdev, 0xc4,
5494                                                cfg_val | (1 << 15));
5495                 }
5496                 /* Set PCIE max payload size and clear error status.  */
5497                 pci_write_config_dword(tp->pdev, 0xd8, 0xf5000);
5498         }
5499
5500         tg3_restore_pci_state(tp);
5501
5502         tp->tg3_flags &= ~TG3_FLAG_CHIP_RESETTING;
5503
5504         val = 0;
5505         if (tp->tg3_flags2 & TG3_FLG2_5780_CLASS)
5506                 val = tr32(MEMARB_MODE);
5507         tw32(MEMARB_MODE, val | MEMARB_MODE_ENABLE);
5508
5509         if (tp->pci_chip_rev_id == CHIPREV_ID_5750_A3) {
5510                 tg3_stop_fw(tp);
5511                 tw32(0x5000, 0x400);
5512         }
5513
5514         tw32(GRC_MODE, tp->grc_mode);
5515
5516         if (tp->pci_chip_rev_id == CHIPREV_ID_5705_A0) {
5517                 val = tr32(0xc4);
5518
5519                 tw32(0xc4, val | (1 << 15));
5520         }
5521
5522         if ((tp->nic_sram_data_cfg & NIC_SRAM_DATA_CFG_MINI_PCI) != 0 &&
5523             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
5524                 tp->pci_clock_ctrl |= CLOCK_CTRL_CLKRUN_OENABLE;
5525                 if (tp->pci_chip_rev_id == CHIPREV_ID_5705_A0)
5526                         tp->pci_clock_ctrl |= CLOCK_CTRL_FORCE_CLKRUN;
5527                 tw32(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl);
5528         }
5529
5530         if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) {
5531                 tp->mac_mode = MAC_MODE_PORT_MODE_TBI;
5532                 tw32_f(MAC_MODE, tp->mac_mode);
5533         } else if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES) {
5534                 tp->mac_mode = MAC_MODE_PORT_MODE_GMII;
5535                 tw32_f(MAC_MODE, tp->mac_mode);
5536         } else
5537                 tw32_f(MAC_MODE, 0);
5538         udelay(40);
5539
5540         err = tg3_poll_fw(tp);
5541         if (err)
5542                 return err;
5543
5544         if ((tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) &&
5545             tp->pci_chip_rev_id != CHIPREV_ID_5750_A0) {
5546                 val = tr32(0x7c00);
5547
5548                 tw32(0x7c00, val | (1 << 25));
5549         }
5550
5551         /* Reprobe ASF enable state.  */
5552         tp->tg3_flags &= ~TG3_FLAG_ENABLE_ASF;
5553         tp->tg3_flags2 &= ~TG3_FLG2_ASF_NEW_HANDSHAKE;
5554         tg3_read_mem(tp, NIC_SRAM_DATA_SIG, &val);
5555         if (val == NIC_SRAM_DATA_SIG_MAGIC) {
5556                 u32 nic_cfg;
5557
5558                 tg3_read_mem(tp, NIC_SRAM_DATA_CFG, &nic_cfg);
5559                 if (nic_cfg & NIC_SRAM_DATA_CFG_ASF_ENABLE) {
5560                         tp->tg3_flags |= TG3_FLAG_ENABLE_ASF;
5561                         if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS)
5562                                 tp->tg3_flags2 |= TG3_FLG2_ASF_NEW_HANDSHAKE;
5563                 }
5564         }
5565
5566         return 0;
5567 }
5568
5569 /* tp->lock is held. */
5570 static void tg3_stop_fw(struct tg3 *tp)
5571 {
5572         if ((tp->tg3_flags & TG3_FLAG_ENABLE_ASF) &&
5573            !(tp->tg3_flags3 & TG3_FLG3_ENABLE_APE)) {
5574                 u32 val;
5575
5576                 /* Wait for RX cpu to ACK the previous event. */
5577                 tg3_wait_for_event_ack(tp);
5578
5579                 tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX, FWCMD_NICDRV_PAUSE_FW);
5580                 val = tr32(GRC_RX_CPU_EVENT);
5581                 val |= GRC_RX_CPU_DRIVER_EVENT;
5582                 tw32(GRC_RX_CPU_EVENT, val);
5583
5584                 /* Wait for RX cpu to ACK this event. */
5585                 tg3_wait_for_event_ack(tp);
5586         }
5587 }
5588
5589 /* tp->lock is held. */
5590 static int tg3_halt(struct tg3 *tp, int kind, int silent)
5591 {
5592         int err;
5593
5594         tg3_stop_fw(tp);
5595
5596         tg3_write_sig_pre_reset(tp, kind);
5597
5598         tg3_abort_hw(tp, silent);
5599         err = tg3_chip_reset(tp);
5600
5601         tg3_write_sig_legacy(tp, kind);
5602         tg3_write_sig_post_reset(tp, kind);
5603
5604         if (err)
5605                 return err;
5606
5607         return 0;
5608 }
5609
5610 #define TG3_FW_RELEASE_MAJOR    0x0
5611 #define TG3_FW_RELASE_MINOR     0x0
5612 #define TG3_FW_RELEASE_FIX      0x0
5613 #define TG3_FW_START_ADDR       0x08000000
5614 #define TG3_FW_TEXT_ADDR        0x08000000
5615 #define TG3_FW_TEXT_LEN         0x9c0
5616 #define TG3_FW_RODATA_ADDR      0x080009c0
5617 #define TG3_FW_RODATA_LEN       0x60
5618 #define TG3_FW_DATA_ADDR        0x08000a40
5619 #define TG3_FW_DATA_LEN         0x20
5620 #define TG3_FW_SBSS_ADDR        0x08000a60
5621 #define TG3_FW_SBSS_LEN         0xc
5622 #define TG3_FW_BSS_ADDR         0x08000a70
5623 #define TG3_FW_BSS_LEN          0x10
5624
5625 static const u32 tg3FwText[(TG3_FW_TEXT_LEN / sizeof(u32)) + 1] = {
5626         0x00000000, 0x10000003, 0x00000000, 0x0000000d, 0x0000000d, 0x3c1d0800,
5627         0x37bd3ffc, 0x03a0f021, 0x3c100800, 0x26100000, 0x0e000018, 0x00000000,
5628         0x0000000d, 0x3c1d0800, 0x37bd3ffc, 0x03a0f021, 0x3c100800, 0x26100034,
5629         0x0e00021c, 0x00000000, 0x0000000d, 0x00000000, 0x00000000, 0x00000000,
5630         0x27bdffe0, 0x3c1cc000, 0xafbf0018, 0xaf80680c, 0x0e00004c, 0x241b2105,
5631         0x97850000, 0x97870002, 0x9782002c, 0x9783002e, 0x3c040800, 0x248409c0,
5632         0xafa00014, 0x00021400, 0x00621825, 0x00052c00, 0xafa30010, 0x8f860010,
5633         0x00e52825, 0x0e000060, 0x24070102, 0x3c02ac00, 0x34420100, 0x3c03ac01,
5634         0x34630100, 0xaf820490, 0x3c02ffff, 0xaf820494, 0xaf830498, 0xaf82049c,
5635         0x24020001, 0xaf825ce0, 0x0e00003f, 0xaf825d00, 0x0e000140, 0x00000000,
5636         0x8fbf0018, 0x03e00008, 0x27bd0020, 0x2402ffff, 0xaf825404, 0x8f835400,
5637         0x34630400, 0xaf835400, 0xaf825404, 0x3c020800, 0x24420034, 0xaf82541c,
5638         0x03e00008, 0xaf805400, 0x00000000, 0x00000000, 0x3c020800, 0x34423000,
5639         0x3c030800, 0x34633000, 0x3c040800, 0x348437ff, 0x3c010800, 0xac220a64,
5640         0x24020040, 0x3c010800, 0xac220a68, 0x3c010800, 0xac200a60, 0xac600000,
5641         0x24630004, 0x0083102b, 0x5040fffd, 0xac600000, 0x03e00008, 0x00000000,
5642         0x00804821, 0x8faa0010, 0x3c020800, 0x8c420a60, 0x3c040800, 0x8c840a68,
5643         0x8fab0014, 0x24430001, 0x0044102b, 0x3c010800, 0xac230a60, 0x14400003,
5644         0x00004021, 0x3c010800, 0xac200a60, 0x3c020800, 0x8c420a60, 0x3c030800,
5645         0x8c630a64, 0x91240000, 0x00021140, 0x00431021, 0x00481021, 0x25080001,
5646         0xa0440000, 0x29020008, 0x1440fff4, 0x25290001, 0x3c020800, 0x8c420a60,
5647         0x3c030800, 0x8c630a64, 0x8f84680c, 0x00021140, 0x00431021, 0xac440008,
5648         0xac45000c, 0xac460010, 0xac470014, 0xac4a0018, 0x03e00008, 0xac4b001c,
5649         0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
5650         0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
5651         0, 0, 0, 0, 0, 0,
5652         0x02000008, 0x00000000, 0x0a0001e3, 0x3c0a0001, 0x0a0001e3, 0x3c0a0002,
5653         0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000,
5654         0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000,
5655         0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000,
5656         0x0a0001e3, 0x3c0a0007, 0x0a0001e3, 0x3c0a0008, 0x0a0001e3, 0x3c0a0009,
5657         0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x3c0a000b,
5658         0x0a0001e3, 0x3c0a000c, 0x0a0001e3, 0x3c0a000d, 0x0a0001e3, 0x00000000,
5659         0x0a0001e3, 0x00000000, 0x0a0001e3, 0x3c0a000e, 0x0a0001e3, 0x00000000,
5660         0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000,
5661         0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000,
5662         0x0a0001e3, 0x00000000, 0x0a0001e3, 0x3c0a0013, 0x0a0001e3, 0x3c0a0014,
5663         0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
5664         0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
5665         0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
5666         0x27bdffe0, 0x00001821, 0x00001021, 0xafbf0018, 0xafb10014, 0xafb00010,
5667         0x3c010800, 0x00220821, 0xac200a70, 0x3c010800, 0x00220821, 0xac200a74,
5668         0x3c010800, 0x00220821, 0xac200a78, 0x24630001, 0x1860fff5, 0x2442000c,
5669         0x24110001, 0x8f906810, 0x32020004, 0x14400005, 0x24040001, 0x3c020800,
5670         0x8c420a78, 0x18400003, 0x00002021, 0x0e000182, 0x00000000, 0x32020001,
5671         0x10400003, 0x00000000, 0x0e000169, 0x00000000, 0x0a000153, 0xaf915028,
5672         0x8fbf0018, 0x8fb10014, 0x8fb00010, 0x03e00008, 0x27bd0020, 0x3c050800,
5673         0x8ca50a70, 0x3c060800, 0x8cc60a80, 0x3c070800, 0x8ce70a78, 0x27bdffe0,
5674         0x3c040800, 0x248409d0, 0xafbf0018, 0xafa00010, 0x0e000060, 0xafa00014,
5675         0x0e00017b, 0x00002021, 0x8fbf0018, 0x03e00008, 0x27bd0020, 0x24020001,
5676         0x8f836810, 0x00821004, 0x00021027, 0x00621824, 0x03e00008, 0xaf836810,
5677         0x27bdffd8, 0xafbf0024, 0x1080002e, 0xafb00020, 0x8f825cec, 0xafa20018,
5678         0x8f825cec, 0x3c100800, 0x26100a78, 0xafa2001c, 0x34028000, 0xaf825cec,
5679         0x8e020000, 0x18400016, 0x00000000, 0x3c020800, 0x94420a74, 0x8fa3001c,
5680         0x000221c0, 0xac830004, 0x8fa2001c, 0x3c010800, 0x0e000201, 0xac220a74,
5681         0x10400005, 0x00000000, 0x8e020000, 0x24420001, 0x0a0001df, 0xae020000,
5682         0x3c020800, 0x8c420a70, 0x00021c02, 0x000321c0, 0x0a0001c5, 0xafa2001c,
5683         0x0e000201, 0x00000000, 0x1040001f, 0x00000000, 0x8e020000, 0x8fa3001c,
5684         0x24420001, 0x3c010800, 0xac230a70, 0x3c010800, 0xac230a74, 0x0a0001df,
5685         0xae020000, 0x3c100800, 0x26100a78, 0x8e020000, 0x18400028, 0x00000000,
5686         0x0e000201, 0x00000000, 0x14400024, 0x00000000, 0x8e020000, 0x3c030800,
5687         0x8c630a70, 0x2442ffff, 0xafa3001c, 0x18400006, 0xae020000, 0x00031402,
5688         0x000221c0, 0x8c820004, 0x3c010800, 0xac220a70, 0x97a2001e, 0x2442ff00,
5689         0x2c420300, 0x1440000b, 0x24024000, 0x3c040800, 0x248409dc, 0xafa00010,
5690         0xafa00014, 0x8fa6001c, 0x24050008, 0x0e000060, 0x00003821, 0x0a0001df,
5691         0x00000000, 0xaf825cf8, 0x3c020800, 0x8c420a40, 0x8fa3001c, 0x24420001,
5692         0xaf835cf8, 0x3c010800, 0xac220a40, 0x8fbf0024, 0x8fb00020, 0x03e00008,
5693         0x27bd0028, 0x27bdffe0, 0x3c040800, 0x248409e8, 0x00002821, 0x00003021,
5694         0x00003821, 0xafbf0018, 0xafa00010, 0x0e000060, 0xafa00014, 0x8fbf0018,
5695         0x03e00008, 0x27bd0020, 0x8f82680c, 0x8f85680c, 0x00021827, 0x0003182b,
5696         0x00031823, 0x00431024, 0x00441021, 0x00a2282b, 0x10a00006, 0x00000000,
5697         0x00401821, 0x8f82680c, 0x0043102b, 0x1440fffd, 0x00000000, 0x03e00008,
5698         0x00000000, 0x3c040800, 0x8c840000, 0x3c030800, 0x8c630a40, 0x0064102b,
5699         0x54400002, 0x00831023, 0x00641023, 0x2c420008, 0x03e00008, 0x38420001,
5700         0x27bdffe0, 0x00802821, 0x3c040800, 0x24840a00, 0x00003021, 0x00003821,
5701         0xafbf0018, 0xafa00010, 0x0e000060, 0xafa00014, 0x0a000216, 0x00000000,
5702         0x8fbf0018, 0x03e00008, 0x27bd0020, 0x00000000, 0x27bdffe0, 0x3c1cc000,
5703         0xafbf0018, 0x0e00004c, 0xaf80680c, 0x3c040800, 0x24840a10, 0x03802821,
5704         0x00003021, 0x00003821, 0xafa00010, 0x0e000060, 0xafa00014, 0x2402ffff,
5705         0xaf825404, 0x3c0200aa, 0x0e000234, 0xaf825434, 0x8fbf0018, 0x03e00008,
5706         0x27bd0020, 0x00000000, 0x00000000, 0x00000000, 0x27bdffe8, 0xafb00010,
5707         0x24100001, 0xafbf0014, 0x3c01c003, 0xac200000, 0x8f826810, 0x30422000,
5708         0x10400003, 0x00000000, 0x0e000246, 0x00000000, 0x0a00023a, 0xaf905428,
5709         0x8fbf0014, 0x8fb00010, 0x03e00008, 0x27bd0018, 0x27bdfff8, 0x8f845d0c,
5710         0x3c0200ff, 0x3c030800, 0x8c630a50, 0x3442fff8, 0x00821024, 0x1043001e,
5711         0x3c0500ff, 0x34a5fff8, 0x3c06c003, 0x3c074000, 0x00851824, 0x8c620010,
5712         0x3c010800, 0xac230a50, 0x30420008, 0x10400005, 0x00871025, 0x8cc20000,
5713         0x24420001, 0xacc20000, 0x00871025, 0xaf825d0c, 0x8fa20000, 0x24420001,
5714         0xafa20000, 0x8fa20000, 0x8fa20000, 0x24420001, 0xafa20000, 0x8fa20000,
5715         0x8f845d0c, 0x3c030800, 0x8c630a50, 0x00851024, 0x1443ffe8, 0x00851824,
5716         0x27bd0008, 0x03e00008, 0x00000000, 0x00000000, 0x00000000
5717 };
5718
5719 static const u32 tg3FwRodata[(TG3_FW_RODATA_LEN / sizeof(u32)) + 1] = {
5720         0x35373031, 0x726c7341, 0x00000000, 0x00000000, 0x53774576, 0x656e7430,
5721         0x00000000, 0x726c7045, 0x76656e74, 0x31000000, 0x556e6b6e, 0x45766e74,
5722         0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x66617461, 0x6c457272,
5723         0x00000000, 0x00000000, 0x4d61696e, 0x43707542, 0x00000000, 0x00000000,
5724         0x00000000
5725 };
5726
5727 #if 0 /* All zeros, don't eat up space with it. */
5728 u32 tg3FwData[(TG3_FW_DATA_LEN / sizeof(u32)) + 1] = {
5729         0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
5730         0x00000000, 0x00000000, 0x00000000, 0x00000000
5731 };
5732 #endif
5733
5734 #define RX_CPU_SCRATCH_BASE     0x30000
5735 #define RX_CPU_SCRATCH_SIZE     0x04000
5736