Merge master.kernel.org:/pub/scm/linux/kernel/git/davem/net-2.6
[linux-3.10.git] / drivers / net / tg3.c
1 /*
2  * tg3.c: Broadcom Tigon3 ethernet driver.
3  *
4  * Copyright (C) 2001, 2002, 2003, 2004 David S. Miller (davem@redhat.com)
5  * Copyright (C) 2001, 2002, 2003 Jeff Garzik (jgarzik@pobox.com)
6  * Copyright (C) 2004 Sun Microsystems Inc.
7  * Copyright (C) 2005 Broadcom Corporation.
8  *
9  * Firmware is:
10  *      Derived from proprietary unpublished source code,
11  *      Copyright (C) 2000-2003 Broadcom Corporation.
12  *
13  *      Permission is hereby granted for the distribution of this firmware
14  *      data in hexadecimal or equivalent format, provided this copyright
15  *      notice is accompanying it.
16  */
17
18 #include <linux/config.h>
19
20 #include <linux/module.h>
21 #include <linux/moduleparam.h>
22 #include <linux/kernel.h>
23 #include <linux/types.h>
24 #include <linux/compiler.h>
25 #include <linux/slab.h>
26 #include <linux/delay.h>
27 #include <linux/init.h>
28 #include <linux/ioport.h>
29 #include <linux/pci.h>
30 #include <linux/netdevice.h>
31 #include <linux/etherdevice.h>
32 #include <linux/skbuff.h>
33 #include <linux/ethtool.h>
34 #include <linux/mii.h>
35 #include <linux/if_vlan.h>
36 #include <linux/ip.h>
37 #include <linux/tcp.h>
38 #include <linux/workqueue.h>
39
40 #include <net/checksum.h>
41
42 #include <asm/system.h>
43 #include <asm/io.h>
44 #include <asm/byteorder.h>
45 #include <asm/uaccess.h>
46
47 #ifdef CONFIG_SPARC64
48 #include <asm/idprom.h>
49 #include <asm/oplib.h>
50 #include <asm/pbm.h>
51 #endif
52
53 #if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE)
54 #define TG3_VLAN_TAG_USED 1
55 #else
56 #define TG3_VLAN_TAG_USED 0
57 #endif
58
59 #ifdef NETIF_F_TSO
60 #define TG3_TSO_SUPPORT 1
61 #else
62 #define TG3_TSO_SUPPORT 0
63 #endif
64
65 #include "tg3.h"
66
67 #define DRV_MODULE_NAME         "tg3"
68 #define PFX DRV_MODULE_NAME     ": "
69 #define DRV_MODULE_VERSION      "3.34"
70 #define DRV_MODULE_RELDATE      "July 25, 2005"
71
72 #define TG3_DEF_MAC_MODE        0
73 #define TG3_DEF_RX_MODE         0
74 #define TG3_DEF_TX_MODE         0
75 #define TG3_DEF_MSG_ENABLE        \
76         (NETIF_MSG_DRV          | \
77          NETIF_MSG_PROBE        | \
78          NETIF_MSG_LINK         | \
79          NETIF_MSG_TIMER        | \
80          NETIF_MSG_IFDOWN       | \
81          NETIF_MSG_IFUP         | \
82          NETIF_MSG_RX_ERR       | \
83          NETIF_MSG_TX_ERR)
84
85 /* length of time before we decide the hardware is borked,
86  * and dev->tx_timeout() should be called to fix the problem
87  */
88 #define TG3_TX_TIMEOUT                  (5 * HZ)
89
90 /* hardware minimum and maximum for a single frame's data payload */
91 #define TG3_MIN_MTU                     60
92 #define TG3_MAX_MTU(tp) \
93         ((tp->tg3_flags2 & TG3_FLG2_JUMBO_CAPABLE) ? 9000 : 1500)
94
95 /* These numbers seem to be hard coded in the NIC firmware somehow.
96  * You can't change the ring sizes, but you can change where you place
97  * them in the NIC onboard memory.
98  */
99 #define TG3_RX_RING_SIZE                512
100 #define TG3_DEF_RX_RING_PENDING         200
101 #define TG3_RX_JUMBO_RING_SIZE          256
102 #define TG3_DEF_RX_JUMBO_RING_PENDING   100
103
104 /* Do not place this n-ring entries value into the tp struct itself,
105  * we really want to expose these constants to GCC so that modulo et
106  * al.  operations are done with shifts and masks instead of with
107  * hw multiply/modulo instructions.  Another solution would be to
108  * replace things like '% foo' with '& (foo - 1)'.
109  */
110 #define TG3_RX_RCB_RING_SIZE(tp)        \
111         ((tp->tg3_flags2 & TG3_FLG2_5705_PLUS) ?  512 : 1024)
112
113 #define TG3_TX_RING_SIZE                512
114 #define TG3_DEF_TX_RING_PENDING         (TG3_TX_RING_SIZE - 1)
115
116 #define TG3_RX_RING_BYTES       (sizeof(struct tg3_rx_buffer_desc) * \
117                                  TG3_RX_RING_SIZE)
118 #define TG3_RX_JUMBO_RING_BYTES (sizeof(struct tg3_rx_buffer_desc) * \
119                                  TG3_RX_JUMBO_RING_SIZE)
120 #define TG3_RX_RCB_RING_BYTES(tp) (sizeof(struct tg3_rx_buffer_desc) * \
121                                    TG3_RX_RCB_RING_SIZE(tp))
122 #define TG3_TX_RING_BYTES       (sizeof(struct tg3_tx_buffer_desc) * \
123                                  TG3_TX_RING_SIZE)
124 #define TX_RING_GAP(TP) \
125         (TG3_TX_RING_SIZE - (TP)->tx_pending)
126 #define TX_BUFFS_AVAIL(TP)                                              \
127         (((TP)->tx_cons <= (TP)->tx_prod) ?                             \
128           (TP)->tx_cons + (TP)->tx_pending - (TP)->tx_prod :            \
129           (TP)->tx_cons - (TP)->tx_prod - TX_RING_GAP(TP))
130 #define NEXT_TX(N)              (((N) + 1) & (TG3_TX_RING_SIZE - 1))
131
132 #define RX_PKT_BUF_SZ           (1536 + tp->rx_offset + 64)
133 #define RX_JUMBO_PKT_BUF_SZ     (9046 + tp->rx_offset + 64)
134
135 /* minimum number of free TX descriptors required to wake up TX process */
136 #define TG3_TX_WAKEUP_THRESH            (TG3_TX_RING_SIZE / 4)
137
138 /* number of ETHTOOL_GSTATS u64's */
139 #define TG3_NUM_STATS           (sizeof(struct tg3_ethtool_stats)/sizeof(u64))
140
141 #define TG3_NUM_TEST            6
142
143 static char version[] __devinitdata =
144         DRV_MODULE_NAME ".c:v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
145
146 MODULE_AUTHOR("David S. Miller (davem@redhat.com) and Jeff Garzik (jgarzik@pobox.com)");
147 MODULE_DESCRIPTION("Broadcom Tigon3 ethernet driver");
148 MODULE_LICENSE("GPL");
149 MODULE_VERSION(DRV_MODULE_VERSION);
150
151 static int tg3_debug = -1;      /* -1 == use TG3_DEF_MSG_ENABLE as value */
152 module_param(tg3_debug, int, 0);
153 MODULE_PARM_DESC(tg3_debug, "Tigon3 bitmapped debugging message enable value");
154
155 static struct pci_device_id tg3_pci_tbl[] = {
156         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5700,
157           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
158         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5701,
159           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
160         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702,
161           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
162         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703,
163           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
164         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704,
165           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
166         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702FE,
167           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
168         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705,
169           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
170         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705_2,
171           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
172         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705M,
173           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
174         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705M_2,
175           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
176         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702X,
177           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
178         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703X,
179           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
180         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704S,
181           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
182         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702A3,
183           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
184         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703A3,
185           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
186         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5782,
187           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
188         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5788,
189           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
190         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5789,
191           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
192         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5901,
193           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
194         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5901_2,
195           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
196         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704S_2,
197           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
198         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705F,
199           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
200         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5720,
201           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
202         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5721,
203           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
204         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5750,
205           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
206         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751,
207           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
208         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5750M,
209           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
210         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751M,
211           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
212         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751F,
213           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
214         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5752,
215           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
216         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5752M,
217           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
218         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753,
219           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
220         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753M,
221           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
222         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753F,
223           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
224         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5780,
225           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
226         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5780S,
227           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
228         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5781,
229           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
230         { PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9DXX,
231           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
232         { PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9MXX,
233           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
234         { PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1000,
235           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
236         { PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1001,
237           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
238         { PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1003,
239           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
240         { PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC9100,
241           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
242         { PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_TIGON3,
243           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
244         { 0, }
245 };
246
247 MODULE_DEVICE_TABLE(pci, tg3_pci_tbl);
248
249 static struct {
250         const char string[ETH_GSTRING_LEN];
251 } ethtool_stats_keys[TG3_NUM_STATS] = {
252         { "rx_octets" },
253         { "rx_fragments" },
254         { "rx_ucast_packets" },
255         { "rx_mcast_packets" },
256         { "rx_bcast_packets" },
257         { "rx_fcs_errors" },
258         { "rx_align_errors" },
259         { "rx_xon_pause_rcvd" },
260         { "rx_xoff_pause_rcvd" },
261         { "rx_mac_ctrl_rcvd" },
262         { "rx_xoff_entered" },
263         { "rx_frame_too_long_errors" },
264         { "rx_jabbers" },
265         { "rx_undersize_packets" },
266         { "rx_in_length_errors" },
267         { "rx_out_length_errors" },
268         { "rx_64_or_less_octet_packets" },
269         { "rx_65_to_127_octet_packets" },
270         { "rx_128_to_255_octet_packets" },
271         { "rx_256_to_511_octet_packets" },
272         { "rx_512_to_1023_octet_packets" },
273         { "rx_1024_to_1522_octet_packets" },
274         { "rx_1523_to_2047_octet_packets" },
275         { "rx_2048_to_4095_octet_packets" },
276         { "rx_4096_to_8191_octet_packets" },
277         { "rx_8192_to_9022_octet_packets" },
278
279         { "tx_octets" },
280         { "tx_collisions" },
281
282         { "tx_xon_sent" },
283         { "tx_xoff_sent" },
284         { "tx_flow_control" },
285         { "tx_mac_errors" },
286         { "tx_single_collisions" },
287         { "tx_mult_collisions" },
288         { "tx_deferred" },
289         { "tx_excessive_collisions" },
290         { "tx_late_collisions" },
291         { "tx_collide_2times" },
292         { "tx_collide_3times" },
293         { "tx_collide_4times" },
294         { "tx_collide_5times" },
295         { "tx_collide_6times" },
296         { "tx_collide_7times" },
297         { "tx_collide_8times" },
298         { "tx_collide_9times" },
299         { "tx_collide_10times" },
300         { "tx_collide_11times" },
301         { "tx_collide_12times" },
302         { "tx_collide_13times" },
303         { "tx_collide_14times" },
304         { "tx_collide_15times" },
305         { "tx_ucast_packets" },
306         { "tx_mcast_packets" },
307         { "tx_bcast_packets" },
308         { "tx_carrier_sense_errors" },
309         { "tx_discards" },
310         { "tx_errors" },
311
312         { "dma_writeq_full" },
313         { "dma_write_prioq_full" },
314         { "rxbds_empty" },
315         { "rx_discards" },
316         { "rx_errors" },
317         { "rx_threshold_hit" },
318
319         { "dma_readq_full" },
320         { "dma_read_prioq_full" },
321         { "tx_comp_queue_full" },
322
323         { "ring_set_send_prod_index" },
324         { "ring_status_update" },
325         { "nic_irqs" },
326         { "nic_avoided_irqs" },
327         { "nic_tx_threshold_hit" }
328 };
329
330 static struct {
331         const char string[ETH_GSTRING_LEN];
332 } ethtool_test_keys[TG3_NUM_TEST] = {
333         { "nvram test     (online) " },
334         { "link test      (online) " },
335         { "register test  (offline)" },
336         { "memory test    (offline)" },
337         { "loopback test  (offline)" },
338         { "interrupt test (offline)" },
339 };
340
341 static void tg3_write_indirect_reg32(struct tg3 *tp, u32 off, u32 val)
342 {
343         if ((tp->tg3_flags & TG3_FLAG_PCIX_TARGET_HWBUG) != 0) {
344                 spin_lock_bh(&tp->indirect_lock);
345                 pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off);
346                 pci_write_config_dword(tp->pdev, TG3PCI_REG_DATA, val);
347                 spin_unlock_bh(&tp->indirect_lock);
348         } else {
349                 writel(val, tp->regs + off);
350                 if ((tp->tg3_flags & TG3_FLAG_5701_REG_WRITE_BUG) != 0)
351                         readl(tp->regs + off);
352         }
353 }
354
355 static void _tw32_flush(struct tg3 *tp, u32 off, u32 val)
356 {
357         if ((tp->tg3_flags & TG3_FLAG_PCIX_TARGET_HWBUG) != 0) {
358                 spin_lock_bh(&tp->indirect_lock);
359                 pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off);
360                 pci_write_config_dword(tp->pdev, TG3PCI_REG_DATA, val);
361                 spin_unlock_bh(&tp->indirect_lock);
362         } else {
363                 void __iomem *dest = tp->regs + off;
364                 writel(val, dest);
365                 readl(dest);    /* always flush PCI write */
366         }
367 }
368
369 static inline void _tw32_rx_mbox(struct tg3 *tp, u32 off, u32 val)
370 {
371         void __iomem *mbox = tp->regs + off;
372         writel(val, mbox);
373         if (tp->tg3_flags & TG3_FLAG_MBOX_WRITE_REORDER)
374                 readl(mbox);
375 }
376
377 static inline void _tw32_tx_mbox(struct tg3 *tp, u32 off, u32 val)
378 {
379         void __iomem *mbox = tp->regs + off;
380         writel(val, mbox);
381         if (tp->tg3_flags & TG3_FLAG_TXD_MBOX_HWBUG)
382                 writel(val, mbox);
383         if (tp->tg3_flags & TG3_FLAG_MBOX_WRITE_REORDER)
384                 readl(mbox);
385 }
386
387 #define tw32_mailbox(reg, val)  writel(((val) & 0xffffffff), tp->regs + (reg))
388 #define tw32_rx_mbox(reg, val)  _tw32_rx_mbox(tp, reg, val)
389 #define tw32_tx_mbox(reg, val)  _tw32_tx_mbox(tp, reg, val)
390
391 #define tw32(reg,val)           tg3_write_indirect_reg32(tp,(reg),(val))
392 #define tw32_f(reg,val)         _tw32_flush(tp,(reg),(val))
393 #define tw16(reg,val)           writew(((val) & 0xffff), tp->regs + (reg))
394 #define tw8(reg,val)            writeb(((val) & 0xff), tp->regs + (reg))
395 #define tr32(reg)               readl(tp->regs + (reg))
396 #define tr16(reg)               readw(tp->regs + (reg))
397 #define tr8(reg)                readb(tp->regs + (reg))
398
399 static void tg3_write_mem(struct tg3 *tp, u32 off, u32 val)
400 {
401         spin_lock_bh(&tp->indirect_lock);
402         pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off);
403         pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
404
405         /* Always leave this as zero. */
406         pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
407         spin_unlock_bh(&tp->indirect_lock);
408 }
409
410 static void tg3_read_mem(struct tg3 *tp, u32 off, u32 *val)
411 {
412         spin_lock_bh(&tp->indirect_lock);
413         pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off);
414         pci_read_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
415
416         /* Always leave this as zero. */
417         pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
418         spin_unlock_bh(&tp->indirect_lock);
419 }
420
421 static void tg3_disable_ints(struct tg3 *tp)
422 {
423         tw32(TG3PCI_MISC_HOST_CTRL,
424              (tp->misc_host_ctrl | MISC_HOST_CTRL_MASK_PCI_INT));
425         tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
426         tr32(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW);
427 }
428
429 static inline void tg3_cond_int(struct tg3 *tp)
430 {
431         if (tp->hw_status->status & SD_STATUS_UPDATED)
432                 tw32(GRC_LOCAL_CTRL, tp->grc_local_ctrl | GRC_LCLCTRL_SETINT);
433 }
434
435 static void tg3_enable_ints(struct tg3 *tp)
436 {
437         tp->irq_sync = 0;
438         wmb();
439
440         tw32(TG3PCI_MISC_HOST_CTRL,
441              (tp->misc_host_ctrl & ~MISC_HOST_CTRL_MASK_PCI_INT));
442         tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
443                      (tp->last_tag << 24));
444         tr32(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW);
445         tg3_cond_int(tp);
446 }
447
448 static inline unsigned int tg3_has_work(struct tg3 *tp)
449 {
450         struct tg3_hw_status *sblk = tp->hw_status;
451         unsigned int work_exists = 0;
452
453         /* check for phy events */
454         if (!(tp->tg3_flags &
455               (TG3_FLAG_USE_LINKCHG_REG |
456                TG3_FLAG_POLL_SERDES))) {
457                 if (sblk->status & SD_STATUS_LINK_CHG)
458                         work_exists = 1;
459         }
460         /* check for RX/TX work to do */
461         if (sblk->idx[0].tx_consumer != tp->tx_cons ||
462             sblk->idx[0].rx_producer != tp->rx_rcb_ptr)
463                 work_exists = 1;
464
465         return work_exists;
466 }
467
468 /* tg3_restart_ints
469  *  similar to tg3_enable_ints, but it accurately determines whether there
470  *  is new work pending and can return without flushing the PIO write
471  *  which reenables interrupts 
472  */
473 static void tg3_restart_ints(struct tg3 *tp)
474 {
475         tw32(TG3PCI_MISC_HOST_CTRL,
476                 (tp->misc_host_ctrl & ~MISC_HOST_CTRL_MASK_PCI_INT));
477         tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
478                      tp->last_tag << 24);
479         mmiowb();
480
481         /* When doing tagged status, this work check is unnecessary.
482          * The last_tag we write above tells the chip which piece of
483          * work we've completed.
484          */
485         if (!(tp->tg3_flags & TG3_FLAG_TAGGED_STATUS) &&
486             tg3_has_work(tp))
487                 tw32(HOSTCC_MODE, tp->coalesce_mode |
488                      (HOSTCC_MODE_ENABLE | HOSTCC_MODE_NOW));
489 }
490
491 static inline void tg3_netif_stop(struct tg3 *tp)
492 {
493         tp->dev->trans_start = jiffies; /* prevent tx timeout */
494         netif_poll_disable(tp->dev);
495         netif_tx_disable(tp->dev);
496 }
497
498 static inline void tg3_netif_start(struct tg3 *tp)
499 {
500         netif_wake_queue(tp->dev);
501         /* NOTE: unconditional netif_wake_queue is only appropriate
502          * so long as all callers are assured to have free tx slots
503          * (such as after tg3_init_hw)
504          */
505         netif_poll_enable(tp->dev);
506         tp->hw_status->status |= SD_STATUS_UPDATED;
507         tg3_enable_ints(tp);
508 }
509
510 static void tg3_switch_clocks(struct tg3 *tp)
511 {
512         u32 clock_ctrl = tr32(TG3PCI_CLOCK_CTRL);
513         u32 orig_clock_ctrl;
514
515         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780)
516                 return;
517
518         orig_clock_ctrl = clock_ctrl;
519         clock_ctrl &= (CLOCK_CTRL_FORCE_CLKRUN |
520                        CLOCK_CTRL_CLKRUN_OENABLE |
521                        0x1f);
522         tp->pci_clock_ctrl = clock_ctrl;
523
524         if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
525                 if (orig_clock_ctrl & CLOCK_CTRL_625_CORE) {
526                         tw32_f(TG3PCI_CLOCK_CTRL,
527                                clock_ctrl | CLOCK_CTRL_625_CORE);
528                         udelay(40);
529                 }
530         } else if ((orig_clock_ctrl & CLOCK_CTRL_44MHZ_CORE) != 0) {
531                 tw32_f(TG3PCI_CLOCK_CTRL,
532                      clock_ctrl |
533                      (CLOCK_CTRL_44MHZ_CORE | CLOCK_CTRL_ALTCLK));
534                 udelay(40);
535                 tw32_f(TG3PCI_CLOCK_CTRL,
536                      clock_ctrl | (CLOCK_CTRL_ALTCLK));
537                 udelay(40);
538         }
539         tw32_f(TG3PCI_CLOCK_CTRL, clock_ctrl);
540         udelay(40);
541 }
542
543 #define PHY_BUSY_LOOPS  5000
544
545 static int tg3_readphy(struct tg3 *tp, int reg, u32 *val)
546 {
547         u32 frame_val;
548         unsigned int loops;
549         int ret;
550
551         if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
552                 tw32_f(MAC_MI_MODE,
553                      (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
554                 udelay(80);
555         }
556
557         *val = 0x0;
558
559         frame_val  = ((PHY_ADDR << MI_COM_PHY_ADDR_SHIFT) &
560                       MI_COM_PHY_ADDR_MASK);
561         frame_val |= ((reg << MI_COM_REG_ADDR_SHIFT) &
562                       MI_COM_REG_ADDR_MASK);
563         frame_val |= (MI_COM_CMD_READ | MI_COM_START);
564         
565         tw32_f(MAC_MI_COM, frame_val);
566
567         loops = PHY_BUSY_LOOPS;
568         while (loops != 0) {
569                 udelay(10);
570                 frame_val = tr32(MAC_MI_COM);
571
572                 if ((frame_val & MI_COM_BUSY) == 0) {
573                         udelay(5);
574                         frame_val = tr32(MAC_MI_COM);
575                         break;
576                 }
577                 loops -= 1;
578         }
579
580         ret = -EBUSY;
581         if (loops != 0) {
582                 *val = frame_val & MI_COM_DATA_MASK;
583                 ret = 0;
584         }
585
586         if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
587                 tw32_f(MAC_MI_MODE, tp->mi_mode);
588                 udelay(80);
589         }
590
591         return ret;
592 }
593
594 static int tg3_writephy(struct tg3 *tp, int reg, u32 val)
595 {
596         u32 frame_val;
597         unsigned int loops;
598         int ret;
599
600         if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
601                 tw32_f(MAC_MI_MODE,
602                      (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
603                 udelay(80);
604         }
605
606         frame_val  = ((PHY_ADDR << MI_COM_PHY_ADDR_SHIFT) &
607                       MI_COM_PHY_ADDR_MASK);
608         frame_val |= ((reg << MI_COM_REG_ADDR_SHIFT) &
609                       MI_COM_REG_ADDR_MASK);
610         frame_val |= (val & MI_COM_DATA_MASK);
611         frame_val |= (MI_COM_CMD_WRITE | MI_COM_START);
612         
613         tw32_f(MAC_MI_COM, frame_val);
614
615         loops = PHY_BUSY_LOOPS;
616         while (loops != 0) {
617                 udelay(10);
618                 frame_val = tr32(MAC_MI_COM);
619                 if ((frame_val & MI_COM_BUSY) == 0) {
620                         udelay(5);
621                         frame_val = tr32(MAC_MI_COM);
622                         break;
623                 }
624                 loops -= 1;
625         }
626
627         ret = -EBUSY;
628         if (loops != 0)
629                 ret = 0;
630
631         if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
632                 tw32_f(MAC_MI_MODE, tp->mi_mode);
633                 udelay(80);
634         }
635
636         return ret;
637 }
638
639 static void tg3_phy_set_wirespeed(struct tg3 *tp)
640 {
641         u32 val;
642
643         if (tp->tg3_flags2 & TG3_FLG2_NO_ETH_WIRE_SPEED)
644                 return;
645
646         if (!tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x7007) &&
647             !tg3_readphy(tp, MII_TG3_AUX_CTRL, &val))
648                 tg3_writephy(tp, MII_TG3_AUX_CTRL,
649                              (val | (1 << 15) | (1 << 4)));
650 }
651
652 static int tg3_bmcr_reset(struct tg3 *tp)
653 {
654         u32 phy_control;
655         int limit, err;
656
657         /* OK, reset it, and poll the BMCR_RESET bit until it
658          * clears or we time out.
659          */
660         phy_control = BMCR_RESET;
661         err = tg3_writephy(tp, MII_BMCR, phy_control);
662         if (err != 0)
663                 return -EBUSY;
664
665         limit = 5000;
666         while (limit--) {
667                 err = tg3_readphy(tp, MII_BMCR, &phy_control);
668                 if (err != 0)
669                         return -EBUSY;
670
671                 if ((phy_control & BMCR_RESET) == 0) {
672                         udelay(40);
673                         break;
674                 }
675                 udelay(10);
676         }
677         if (limit <= 0)
678                 return -EBUSY;
679
680         return 0;
681 }
682
683 static int tg3_wait_macro_done(struct tg3 *tp)
684 {
685         int limit = 100;
686
687         while (limit--) {
688                 u32 tmp32;
689
690                 if (!tg3_readphy(tp, 0x16, &tmp32)) {
691                         if ((tmp32 & 0x1000) == 0)
692                                 break;
693                 }
694         }
695         if (limit <= 0)
696                 return -EBUSY;
697
698         return 0;
699 }
700
701 static int tg3_phy_write_and_check_testpat(struct tg3 *tp, int *resetp)
702 {
703         static const u32 test_pat[4][6] = {
704         { 0x00005555, 0x00000005, 0x00002aaa, 0x0000000a, 0x00003456, 0x00000003 },
705         { 0x00002aaa, 0x0000000a, 0x00003333, 0x00000003, 0x0000789a, 0x00000005 },
706         { 0x00005a5a, 0x00000005, 0x00002a6a, 0x0000000a, 0x00001bcd, 0x00000003 },
707         { 0x00002a5a, 0x0000000a, 0x000033c3, 0x00000003, 0x00002ef1, 0x00000005 }
708         };
709         int chan;
710
711         for (chan = 0; chan < 4; chan++) {
712                 int i;
713
714                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
715                              (chan * 0x2000) | 0x0200);
716                 tg3_writephy(tp, 0x16, 0x0002);
717
718                 for (i = 0; i < 6; i++)
719                         tg3_writephy(tp, MII_TG3_DSP_RW_PORT,
720                                      test_pat[chan][i]);
721
722                 tg3_writephy(tp, 0x16, 0x0202);
723                 if (tg3_wait_macro_done(tp)) {
724                         *resetp = 1;
725                         return -EBUSY;
726                 }
727
728                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
729                              (chan * 0x2000) | 0x0200);
730                 tg3_writephy(tp, 0x16, 0x0082);
731                 if (tg3_wait_macro_done(tp)) {
732                         *resetp = 1;
733                         return -EBUSY;
734                 }
735
736                 tg3_writephy(tp, 0x16, 0x0802);
737                 if (tg3_wait_macro_done(tp)) {
738                         *resetp = 1;
739                         return -EBUSY;
740                 }
741
742                 for (i = 0; i < 6; i += 2) {
743                         u32 low, high;
744
745                         if (tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &low) ||
746                             tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &high) ||
747                             tg3_wait_macro_done(tp)) {
748                                 *resetp = 1;
749                                 return -EBUSY;
750                         }
751                         low &= 0x7fff;
752                         high &= 0x000f;
753                         if (low != test_pat[chan][i] ||
754                             high != test_pat[chan][i+1]) {
755                                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000b);
756                                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x4001);
757                                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x4005);
758
759                                 return -EBUSY;
760                         }
761                 }
762         }
763
764         return 0;
765 }
766
767 static int tg3_phy_reset_chanpat(struct tg3 *tp)
768 {
769         int chan;
770
771         for (chan = 0; chan < 4; chan++) {
772                 int i;
773
774                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
775                              (chan * 0x2000) | 0x0200);
776                 tg3_writephy(tp, 0x16, 0x0002);
777                 for (i = 0; i < 6; i++)
778                         tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x000);
779                 tg3_writephy(tp, 0x16, 0x0202);
780                 if (tg3_wait_macro_done(tp))
781                         return -EBUSY;
782         }
783
784         return 0;
785 }
786
787 static int tg3_phy_reset_5703_4_5(struct tg3 *tp)
788 {
789         u32 reg32, phy9_orig;
790         int retries, do_phy_reset, err;
791
792         retries = 10;
793         do_phy_reset = 1;
794         do {
795                 if (do_phy_reset) {
796                         err = tg3_bmcr_reset(tp);
797                         if (err)
798                                 return err;
799                         do_phy_reset = 0;
800                 }
801
802                 /* Disable transmitter and interrupt.  */
803                 if (tg3_readphy(tp, MII_TG3_EXT_CTRL, &reg32))
804                         continue;
805
806                 reg32 |= 0x3000;
807                 tg3_writephy(tp, MII_TG3_EXT_CTRL, reg32);
808
809                 /* Set full-duplex, 1000 mbps.  */
810                 tg3_writephy(tp, MII_BMCR,
811                              BMCR_FULLDPLX | TG3_BMCR_SPEED1000);
812
813                 /* Set to master mode.  */
814                 if (tg3_readphy(tp, MII_TG3_CTRL, &phy9_orig))
815                         continue;
816
817                 tg3_writephy(tp, MII_TG3_CTRL,
818                              (MII_TG3_CTRL_AS_MASTER |
819                               MII_TG3_CTRL_ENABLE_AS_MASTER));
820
821                 /* Enable SM_DSP_CLOCK and 6dB.  */
822                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0c00);
823
824                 /* Block the PHY control access.  */
825                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8005);
826                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0800);
827
828                 err = tg3_phy_write_and_check_testpat(tp, &do_phy_reset);
829                 if (!err)
830                         break;
831         } while (--retries);
832
833         err = tg3_phy_reset_chanpat(tp);
834         if (err)
835                 return err;
836
837         tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8005);
838         tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0000);
839
840         tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8200);
841         tg3_writephy(tp, 0x16, 0x0000);
842
843         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
844             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
845                 /* Set Extended packet length bit for jumbo frames */
846                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x4400);
847         }
848         else {
849                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0400);
850         }
851
852         tg3_writephy(tp, MII_TG3_CTRL, phy9_orig);
853
854         if (!tg3_readphy(tp, MII_TG3_EXT_CTRL, &reg32)) {
855                 reg32 &= ~0x3000;
856                 tg3_writephy(tp, MII_TG3_EXT_CTRL, reg32);
857         } else if (!err)
858                 err = -EBUSY;
859
860         return err;
861 }
862
863 /* This will reset the tigon3 PHY if there is no valid
864  * link unless the FORCE argument is non-zero.
865  */
866 static int tg3_phy_reset(struct tg3 *tp)
867 {
868         u32 phy_status;
869         int err;
870
871         err  = tg3_readphy(tp, MII_BMSR, &phy_status);
872         err |= tg3_readphy(tp, MII_BMSR, &phy_status);
873         if (err != 0)
874                 return -EBUSY;
875
876         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
877             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
878             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
879                 err = tg3_phy_reset_5703_4_5(tp);
880                 if (err)
881                         return err;
882                 goto out;
883         }
884
885         err = tg3_bmcr_reset(tp);
886         if (err)
887                 return err;
888
889 out:
890         if (tp->tg3_flags2 & TG3_FLG2_PHY_ADC_BUG) {
891                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0c00);
892                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x201f);
893                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x2aaa);
894                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000a);
895                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0323);
896                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0400);
897         }
898         if (tp->tg3_flags2 & TG3_FLG2_PHY_5704_A0_BUG) {
899                 tg3_writephy(tp, 0x1c, 0x8d68);
900                 tg3_writephy(tp, 0x1c, 0x8d68);
901         }
902         if (tp->tg3_flags2 & TG3_FLG2_PHY_BER_BUG) {
903                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0c00);
904                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000a);
905                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x310b);
906                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x201f);
907                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x9506);
908                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x401f);
909                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x14e2);
910                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0400);
911         }
912         /* Set Extended packet length bit (bit 14) on all chips that */
913         /* support jumbo frames */
914         if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5401) {
915                 /* Cannot do read-modify-write on 5401 */
916                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x4c20);
917         } else if (tp->tg3_flags2 & TG3_FLG2_JUMBO_CAPABLE) {
918                 u32 phy_reg;
919
920                 /* Set bit 14 with read-modify-write to preserve other bits */
921                 if (!tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0007) &&
922                     !tg3_readphy(tp, MII_TG3_AUX_CTRL, &phy_reg))
923                         tg3_writephy(tp, MII_TG3_AUX_CTRL, phy_reg | 0x4000);
924         }
925
926         /* Set phy register 0x10 bit 0 to high fifo elasticity to support
927          * jumbo frames transmission.
928          */
929         if (tp->tg3_flags2 & TG3_FLG2_JUMBO_CAPABLE) {
930                 u32 phy_reg;
931
932                 if (!tg3_readphy(tp, MII_TG3_EXT_CTRL, &phy_reg))
933                     tg3_writephy(tp, MII_TG3_EXT_CTRL,
934                                  phy_reg | MII_TG3_EXT_CTRL_FIFO_ELASTIC);
935         }
936
937         tg3_phy_set_wirespeed(tp);
938         return 0;
939 }
940
941 static void tg3_frob_aux_power(struct tg3 *tp)
942 {
943         struct tg3 *tp_peer = tp;
944
945         if ((tp->tg3_flags & TG3_FLAG_EEPROM_WRITE_PROT) != 0)
946                 return;
947
948         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
949                 tp_peer = pci_get_drvdata(tp->pdev_peer);
950                 if (!tp_peer)
951                         BUG();
952         }
953
954
955         if ((tp->tg3_flags & TG3_FLAG_WOL_ENABLE) != 0 ||
956             (tp_peer->tg3_flags & TG3_FLAG_WOL_ENABLE) != 0) {
957                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
958                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
959                         tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
960                              (GRC_LCLCTRL_GPIO_OE0 |
961                               GRC_LCLCTRL_GPIO_OE1 |
962                               GRC_LCLCTRL_GPIO_OE2 |
963                               GRC_LCLCTRL_GPIO_OUTPUT0 |
964                               GRC_LCLCTRL_GPIO_OUTPUT1));
965                         udelay(100);
966                 } else {
967                         u32 no_gpio2;
968                         u32 grc_local_ctrl;
969
970                         if (tp_peer != tp &&
971                             (tp_peer->tg3_flags & TG3_FLAG_INIT_COMPLETE) != 0)
972                                 return;
973
974                         /* On 5753 and variants, GPIO2 cannot be used. */
975                         no_gpio2 = tp->nic_sram_data_cfg &
976                                     NIC_SRAM_DATA_CFG_NO_GPIO2;
977
978                         grc_local_ctrl = GRC_LCLCTRL_GPIO_OE0 |
979                                          GRC_LCLCTRL_GPIO_OE1 |
980                                          GRC_LCLCTRL_GPIO_OE2 |
981                                          GRC_LCLCTRL_GPIO_OUTPUT1 |
982                                          GRC_LCLCTRL_GPIO_OUTPUT2;
983                         if (no_gpio2) {
984                                 grc_local_ctrl &= ~(GRC_LCLCTRL_GPIO_OE2 |
985                                                     GRC_LCLCTRL_GPIO_OUTPUT2);
986                         }
987                         tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
988                                                 grc_local_ctrl);
989                         udelay(100);
990
991                         grc_local_ctrl |= GRC_LCLCTRL_GPIO_OUTPUT0;
992
993                         tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
994                                                 grc_local_ctrl);
995                         udelay(100);
996
997                         if (!no_gpio2) {
998                                 grc_local_ctrl &= ~GRC_LCLCTRL_GPIO_OUTPUT2;
999                                 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1000                                        grc_local_ctrl);
1001                                 udelay(100);
1002                         }
1003                 }
1004         } else {
1005                 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
1006                     GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701) {
1007                         if (tp_peer != tp &&
1008                             (tp_peer->tg3_flags & TG3_FLAG_INIT_COMPLETE) != 0)
1009                                 return;
1010
1011                         tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1012                              (GRC_LCLCTRL_GPIO_OE1 |
1013                               GRC_LCLCTRL_GPIO_OUTPUT1));
1014                         udelay(100);
1015
1016                         tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1017                              (GRC_LCLCTRL_GPIO_OE1));
1018                         udelay(100);
1019
1020                         tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1021                              (GRC_LCLCTRL_GPIO_OE1 |
1022                               GRC_LCLCTRL_GPIO_OUTPUT1));
1023                         udelay(100);
1024                 }
1025         }
1026 }
1027
1028 static int tg3_setup_phy(struct tg3 *, int);
1029
1030 #define RESET_KIND_SHUTDOWN     0
1031 #define RESET_KIND_INIT         1
1032 #define RESET_KIND_SUSPEND      2
1033
1034 static void tg3_write_sig_post_reset(struct tg3 *, int);
1035 static int tg3_halt_cpu(struct tg3 *, u32);
1036
1037 static int tg3_set_power_state(struct tg3 *tp, int state)
1038 {
1039         u32 misc_host_ctrl;
1040         u16 power_control, power_caps;
1041         int pm = tp->pm_cap;
1042
1043         /* Make sure register accesses (indirect or otherwise)
1044          * will function correctly.
1045          */
1046         pci_write_config_dword(tp->pdev,
1047                                TG3PCI_MISC_HOST_CTRL,
1048                                tp->misc_host_ctrl);
1049
1050         pci_read_config_word(tp->pdev,
1051                              pm + PCI_PM_CTRL,
1052                              &power_control);
1053         power_control |= PCI_PM_CTRL_PME_STATUS;
1054         power_control &= ~(PCI_PM_CTRL_STATE_MASK);
1055         switch (state) {
1056         case 0:
1057                 power_control |= 0;
1058                 pci_write_config_word(tp->pdev,
1059                                       pm + PCI_PM_CTRL,
1060                                       power_control);
1061                 udelay(100);    /* Delay after power state change */
1062
1063                 /* Switch out of Vaux if it is not a LOM */
1064                 if (!(tp->tg3_flags & TG3_FLAG_EEPROM_WRITE_PROT)) {
1065                         tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
1066                         udelay(100);
1067                 }
1068
1069                 return 0;
1070
1071         case 1:
1072                 power_control |= 1;
1073                 break;
1074
1075         case 2:
1076                 power_control |= 2;
1077                 break;
1078
1079         case 3:
1080                 power_control |= 3;
1081                 break;
1082
1083         default:
1084                 printk(KERN_WARNING PFX "%s: Invalid power state (%d) "
1085                        "requested.\n",
1086                        tp->dev->name, state);
1087                 return -EINVAL;
1088         };
1089
1090         power_control |= PCI_PM_CTRL_PME_ENABLE;
1091
1092         misc_host_ctrl = tr32(TG3PCI_MISC_HOST_CTRL);
1093         tw32(TG3PCI_MISC_HOST_CTRL,
1094              misc_host_ctrl | MISC_HOST_CTRL_MASK_PCI_INT);
1095
1096         if (tp->link_config.phy_is_low_power == 0) {
1097                 tp->link_config.phy_is_low_power = 1;
1098                 tp->link_config.orig_speed = tp->link_config.speed;
1099                 tp->link_config.orig_duplex = tp->link_config.duplex;
1100                 tp->link_config.orig_autoneg = tp->link_config.autoneg;
1101         }
1102
1103         if (!(tp->tg3_flags2 & TG3_FLG2_ANY_SERDES)) {
1104                 tp->link_config.speed = SPEED_10;
1105                 tp->link_config.duplex = DUPLEX_HALF;
1106                 tp->link_config.autoneg = AUTONEG_ENABLE;
1107                 tg3_setup_phy(tp, 0);
1108         }
1109
1110         pci_read_config_word(tp->pdev, pm + PCI_PM_PMC, &power_caps);
1111
1112         if (tp->tg3_flags & TG3_FLAG_WOL_ENABLE) {
1113                 u32 mac_mode;
1114
1115                 if (!(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)) {
1116                         tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x5a);
1117                         udelay(40);
1118
1119                         mac_mode = MAC_MODE_PORT_MODE_MII;
1120
1121                         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 ||
1122                             !(tp->tg3_flags & TG3_FLAG_WOL_SPEED_100MB))
1123                                 mac_mode |= MAC_MODE_LINK_POLARITY;
1124                 } else {
1125                         mac_mode = MAC_MODE_PORT_MODE_TBI;
1126                 }
1127
1128                 if (!(tp->tg3_flags2 & TG3_FLG2_5750_PLUS))
1129                         tw32(MAC_LED_CTRL, tp->led_ctrl);
1130
1131                 if (((power_caps & PCI_PM_CAP_PME_D3cold) &&
1132                      (tp->tg3_flags & TG3_FLAG_WOL_ENABLE)))
1133                         mac_mode |= MAC_MODE_MAGIC_PKT_ENABLE;
1134
1135                 tw32_f(MAC_MODE, mac_mode);
1136                 udelay(100);
1137
1138                 tw32_f(MAC_RX_MODE, RX_MODE_ENABLE);
1139                 udelay(10);
1140         }
1141
1142         if (!(tp->tg3_flags & TG3_FLAG_WOL_SPEED_100MB) &&
1143             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
1144              GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)) {
1145                 u32 base_val;
1146
1147                 base_val = tp->pci_clock_ctrl;
1148                 base_val |= (CLOCK_CTRL_RXCLK_DISABLE |
1149                              CLOCK_CTRL_TXCLK_DISABLE);
1150
1151                 tw32_f(TG3PCI_CLOCK_CTRL, base_val |
1152                      CLOCK_CTRL_ALTCLK |
1153                      CLOCK_CTRL_PWRDOWN_PLL133);
1154                 udelay(40);
1155         } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780) {
1156                 /* do nothing */
1157         } else if (!((tp->tg3_flags2 & TG3_FLG2_5750_PLUS) &&
1158                      (tp->tg3_flags & TG3_FLAG_ENABLE_ASF))) {
1159                 u32 newbits1, newbits2;
1160
1161                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
1162                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
1163                         newbits1 = (CLOCK_CTRL_RXCLK_DISABLE |
1164                                     CLOCK_CTRL_TXCLK_DISABLE |
1165                                     CLOCK_CTRL_ALTCLK);
1166                         newbits2 = newbits1 | CLOCK_CTRL_44MHZ_CORE;
1167                 } else if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
1168                         newbits1 = CLOCK_CTRL_625_CORE;
1169                         newbits2 = newbits1 | CLOCK_CTRL_ALTCLK;
1170                 } else {
1171                         newbits1 = CLOCK_CTRL_ALTCLK;
1172                         newbits2 = newbits1 | CLOCK_CTRL_44MHZ_CORE;
1173                 }
1174
1175                 tw32_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl | newbits1);
1176                 udelay(40);
1177
1178                 tw32_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl | newbits2);
1179                 udelay(40);
1180
1181                 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
1182                         u32 newbits3;
1183
1184                         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
1185                             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
1186                                 newbits3 = (CLOCK_CTRL_RXCLK_DISABLE |
1187                                             CLOCK_CTRL_TXCLK_DISABLE |
1188                                             CLOCK_CTRL_44MHZ_CORE);
1189                         } else {
1190                                 newbits3 = CLOCK_CTRL_44MHZ_CORE;
1191                         }
1192
1193                         tw32_f(TG3PCI_CLOCK_CTRL,
1194                                          tp->pci_clock_ctrl | newbits3);
1195                         udelay(40);
1196                 }
1197         }
1198
1199         tg3_frob_aux_power(tp);
1200
1201         /* Workaround for unstable PLL clock */
1202         if ((GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_AX) ||
1203             (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_BX)) {
1204                 u32 val = tr32(0x7d00);
1205
1206                 val &= ~((1 << 16) | (1 << 4) | (1 << 2) | (1 << 1) | 1);
1207                 tw32(0x7d00, val);
1208                 if (!(tp->tg3_flags & TG3_FLAG_ENABLE_ASF))
1209                         tg3_halt_cpu(tp, RX_CPU_BASE);
1210         }
1211
1212         /* Finally, set the new power state. */
1213         pci_write_config_word(tp->pdev, pm + PCI_PM_CTRL, power_control);
1214         udelay(100);    /* Delay after power state change */
1215
1216         tg3_write_sig_post_reset(tp, RESET_KIND_SHUTDOWN);
1217
1218         return 0;
1219 }
1220
1221 static void tg3_link_report(struct tg3 *tp)
1222 {
1223         if (!netif_carrier_ok(tp->dev)) {
1224                 printk(KERN_INFO PFX "%s: Link is down.\n", tp->dev->name);
1225         } else {
1226                 printk(KERN_INFO PFX "%s: Link is up at %d Mbps, %s duplex.\n",
1227                        tp->dev->name,
1228                        (tp->link_config.active_speed == SPEED_1000 ?
1229                         1000 :
1230                         (tp->link_config.active_speed == SPEED_100 ?
1231                          100 : 10)),
1232                        (tp->link_config.active_duplex == DUPLEX_FULL ?
1233                         "full" : "half"));
1234
1235                 printk(KERN_INFO PFX "%s: Flow control is %s for TX and "
1236                        "%s for RX.\n",
1237                        tp->dev->name,
1238                        (tp->tg3_flags & TG3_FLAG_TX_PAUSE) ? "on" : "off",
1239                        (tp->tg3_flags & TG3_FLAG_RX_PAUSE) ? "on" : "off");
1240         }
1241 }
1242
1243 static void tg3_setup_flow_control(struct tg3 *tp, u32 local_adv, u32 remote_adv)
1244 {
1245         u32 new_tg3_flags = 0;
1246         u32 old_rx_mode = tp->rx_mode;
1247         u32 old_tx_mode = tp->tx_mode;
1248
1249         if (tp->tg3_flags & TG3_FLAG_PAUSE_AUTONEG) {
1250
1251                 /* Convert 1000BaseX flow control bits to 1000BaseT
1252                  * bits before resolving flow control.
1253                  */
1254                 if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES) {
1255                         local_adv &= ~(ADVERTISE_PAUSE_CAP |
1256                                        ADVERTISE_PAUSE_ASYM);
1257                         remote_adv &= ~(LPA_PAUSE_CAP | LPA_PAUSE_ASYM);
1258
1259                         if (local_adv & ADVERTISE_1000XPAUSE)
1260                                 local_adv |= ADVERTISE_PAUSE_CAP;
1261                         if (local_adv & ADVERTISE_1000XPSE_ASYM)
1262                                 local_adv |= ADVERTISE_PAUSE_ASYM;
1263                         if (remote_adv & LPA_1000XPAUSE)
1264                                 remote_adv |= LPA_PAUSE_CAP;
1265                         if (remote_adv & LPA_1000XPAUSE_ASYM)
1266                                 remote_adv |= LPA_PAUSE_ASYM;
1267                 }
1268
1269                 if (local_adv & ADVERTISE_PAUSE_CAP) {
1270                         if (local_adv & ADVERTISE_PAUSE_ASYM) {
1271                                 if (remote_adv & LPA_PAUSE_CAP)
1272                                         new_tg3_flags |=
1273                                                 (TG3_FLAG_RX_PAUSE |
1274                                                 TG3_FLAG_TX_PAUSE);
1275                                 else if (remote_adv & LPA_PAUSE_ASYM)
1276                                         new_tg3_flags |=
1277                                                 (TG3_FLAG_RX_PAUSE);
1278                         } else {
1279                                 if (remote_adv & LPA_PAUSE_CAP)
1280                                         new_tg3_flags |=
1281                                                 (TG3_FLAG_RX_PAUSE |
1282                                                 TG3_FLAG_TX_PAUSE);
1283                         }
1284                 } else if (local_adv & ADVERTISE_PAUSE_ASYM) {
1285                         if ((remote_adv & LPA_PAUSE_CAP) &&
1286                         (remote_adv & LPA_PAUSE_ASYM))
1287                                 new_tg3_flags |= TG3_FLAG_TX_PAUSE;
1288                 }
1289
1290                 tp->tg3_flags &= ~(TG3_FLAG_RX_PAUSE | TG3_FLAG_TX_PAUSE);
1291                 tp->tg3_flags |= new_tg3_flags;
1292         } else {
1293                 new_tg3_flags = tp->tg3_flags;
1294         }
1295
1296         if (new_tg3_flags & TG3_FLAG_RX_PAUSE)
1297                 tp->rx_mode |= RX_MODE_FLOW_CTRL_ENABLE;
1298         else
1299                 tp->rx_mode &= ~RX_MODE_FLOW_CTRL_ENABLE;
1300
1301         if (old_rx_mode != tp->rx_mode) {
1302                 tw32_f(MAC_RX_MODE, tp->rx_mode);
1303         }
1304         
1305         if (new_tg3_flags & TG3_FLAG_TX_PAUSE)
1306                 tp->tx_mode |= TX_MODE_FLOW_CTRL_ENABLE;
1307         else
1308                 tp->tx_mode &= ~TX_MODE_FLOW_CTRL_ENABLE;
1309
1310         if (old_tx_mode != tp->tx_mode) {
1311                 tw32_f(MAC_TX_MODE, tp->tx_mode);
1312         }
1313 }
1314
1315 static void tg3_aux_stat_to_speed_duplex(struct tg3 *tp, u32 val, u16 *speed, u8 *duplex)
1316 {
1317         switch (val & MII_TG3_AUX_STAT_SPDMASK) {
1318         case MII_TG3_AUX_STAT_10HALF:
1319                 *speed = SPEED_10;
1320                 *duplex = DUPLEX_HALF;
1321                 break;
1322
1323         case MII_TG3_AUX_STAT_10FULL:
1324                 *speed = SPEED_10;
1325                 *duplex = DUPLEX_FULL;
1326                 break;
1327
1328         case MII_TG3_AUX_STAT_100HALF:
1329                 *speed = SPEED_100;
1330                 *duplex = DUPLEX_HALF;
1331                 break;
1332
1333         case MII_TG3_AUX_STAT_100FULL:
1334                 *speed = SPEED_100;
1335                 *duplex = DUPLEX_FULL;
1336                 break;
1337
1338         case MII_TG3_AUX_STAT_1000HALF:
1339                 *speed = SPEED_1000;
1340                 *duplex = DUPLEX_HALF;
1341                 break;
1342
1343         case MII_TG3_AUX_STAT_1000FULL:
1344                 *speed = SPEED_1000;
1345                 *duplex = DUPLEX_FULL;
1346                 break;
1347
1348         default:
1349                 *speed = SPEED_INVALID;
1350                 *duplex = DUPLEX_INVALID;
1351                 break;
1352         };
1353 }
1354
1355 static void tg3_phy_copper_begin(struct tg3 *tp)
1356 {
1357         u32 new_adv;
1358         int i;
1359
1360         if (tp->link_config.phy_is_low_power) {
1361                 /* Entering low power mode.  Disable gigabit and
1362                  * 100baseT advertisements.
1363                  */
1364                 tg3_writephy(tp, MII_TG3_CTRL, 0);
1365
1366                 new_adv = (ADVERTISE_10HALF | ADVERTISE_10FULL |
1367                            ADVERTISE_CSMA | ADVERTISE_PAUSE_CAP);
1368                 if (tp->tg3_flags & TG3_FLAG_WOL_SPEED_100MB)
1369                         new_adv |= (ADVERTISE_100HALF | ADVERTISE_100FULL);
1370
1371                 tg3_writephy(tp, MII_ADVERTISE, new_adv);
1372         } else if (tp->link_config.speed == SPEED_INVALID) {
1373                 tp->link_config.advertising =
1374                         (ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full |
1375                          ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full |
1376                          ADVERTISED_1000baseT_Half | ADVERTISED_1000baseT_Full |
1377                          ADVERTISED_Autoneg | ADVERTISED_MII);
1378
1379                 if (tp->tg3_flags & TG3_FLAG_10_100_ONLY)
1380                         tp->link_config.advertising &=
1381                                 ~(ADVERTISED_1000baseT_Half |
1382                                   ADVERTISED_1000baseT_Full);
1383
1384                 new_adv = (ADVERTISE_CSMA | ADVERTISE_PAUSE_CAP);
1385                 if (tp->link_config.advertising & ADVERTISED_10baseT_Half)
1386                         new_adv |= ADVERTISE_10HALF;
1387                 if (tp->link_config.advertising & ADVERTISED_10baseT_Full)
1388                         new_adv |= ADVERTISE_10FULL;
1389                 if (tp->link_config.advertising & ADVERTISED_100baseT_Half)
1390                         new_adv |= ADVERTISE_100HALF;
1391                 if (tp->link_config.advertising & ADVERTISED_100baseT_Full)
1392                         new_adv |= ADVERTISE_100FULL;
1393                 tg3_writephy(tp, MII_ADVERTISE, new_adv);
1394
1395                 if (tp->link_config.advertising &
1396                     (ADVERTISED_1000baseT_Half | ADVERTISED_1000baseT_Full)) {
1397                         new_adv = 0;
1398                         if (tp->link_config.advertising & ADVERTISED_1000baseT_Half)
1399                                 new_adv |= MII_TG3_CTRL_ADV_1000_HALF;
1400                         if (tp->link_config.advertising & ADVERTISED_1000baseT_Full)
1401                                 new_adv |= MII_TG3_CTRL_ADV_1000_FULL;
1402                         if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY) &&
1403                             (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
1404                              tp->pci_chip_rev_id == CHIPREV_ID_5701_B0))
1405                                 new_adv |= (MII_TG3_CTRL_AS_MASTER |
1406                                             MII_TG3_CTRL_ENABLE_AS_MASTER);
1407                         tg3_writephy(tp, MII_TG3_CTRL, new_adv);
1408                 } else {
1409                         tg3_writephy(tp, MII_TG3_CTRL, 0);
1410                 }
1411         } else {
1412                 /* Asking for a specific link mode. */
1413                 if (tp->link_config.speed == SPEED_1000) {
1414                         new_adv = ADVERTISE_CSMA | ADVERTISE_PAUSE_CAP;
1415                         tg3_writephy(tp, MII_ADVERTISE, new_adv);
1416
1417                         if (tp->link_config.duplex == DUPLEX_FULL)
1418                                 new_adv = MII_TG3_CTRL_ADV_1000_FULL;
1419                         else
1420                                 new_adv = MII_TG3_CTRL_ADV_1000_HALF;
1421                         if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
1422                             tp->pci_chip_rev_id == CHIPREV_ID_5701_B0)
1423                                 new_adv |= (MII_TG3_CTRL_AS_MASTER |
1424                                             MII_TG3_CTRL_ENABLE_AS_MASTER);
1425                         tg3_writephy(tp, MII_TG3_CTRL, new_adv);
1426                 } else {
1427                         tg3_writephy(tp, MII_TG3_CTRL, 0);
1428
1429                         new_adv = ADVERTISE_CSMA | ADVERTISE_PAUSE_CAP;
1430                         if (tp->link_config.speed == SPEED_100) {
1431                                 if (tp->link_config.duplex == DUPLEX_FULL)
1432                                         new_adv |= ADVERTISE_100FULL;
1433                                 else
1434                                         new_adv |= ADVERTISE_100HALF;
1435                         } else {
1436                                 if (tp->link_config.duplex == DUPLEX_FULL)
1437                                         new_adv |= ADVERTISE_10FULL;
1438                                 else
1439                                         new_adv |= ADVERTISE_10HALF;
1440                         }
1441                         tg3_writephy(tp, MII_ADVERTISE, new_adv);
1442                 }
1443         }
1444
1445         if (tp->link_config.autoneg == AUTONEG_DISABLE &&
1446             tp->link_config.speed != SPEED_INVALID) {
1447                 u32 bmcr, orig_bmcr;
1448
1449                 tp->link_config.active_speed = tp->link_config.speed;
1450                 tp->link_config.active_duplex = tp->link_config.duplex;
1451
1452                 bmcr = 0;
1453                 switch (tp->link_config.speed) {
1454                 default:
1455                 case SPEED_10:
1456                         break;
1457
1458                 case SPEED_100:
1459                         bmcr |= BMCR_SPEED100;
1460                         break;
1461
1462                 case SPEED_1000:
1463                         bmcr |= TG3_BMCR_SPEED1000;
1464                         break;
1465                 };
1466
1467                 if (tp->link_config.duplex == DUPLEX_FULL)
1468                         bmcr |= BMCR_FULLDPLX;
1469
1470                 if (!tg3_readphy(tp, MII_BMCR, &orig_bmcr) &&
1471                     (bmcr != orig_bmcr)) {
1472                         tg3_writephy(tp, MII_BMCR, BMCR_LOOPBACK);
1473                         for (i = 0; i < 1500; i++) {
1474                                 u32 tmp;
1475
1476                                 udelay(10);
1477                                 if (tg3_readphy(tp, MII_BMSR, &tmp) ||
1478                                     tg3_readphy(tp, MII_BMSR, &tmp))
1479                                         continue;
1480                                 if (!(tmp & BMSR_LSTATUS)) {
1481                                         udelay(40);
1482                                         break;
1483                                 }
1484                         }
1485                         tg3_writephy(tp, MII_BMCR, bmcr);
1486                         udelay(40);
1487                 }
1488         } else {
1489                 tg3_writephy(tp, MII_BMCR,
1490                              BMCR_ANENABLE | BMCR_ANRESTART);
1491         }
1492 }
1493
1494 static int tg3_init_5401phy_dsp(struct tg3 *tp)
1495 {
1496         int err;
1497
1498         /* Turn off tap power management. */
1499         /* Set Extended packet length bit */
1500         err  = tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x4c20);
1501
1502         err |= tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x0012);
1503         err |= tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x1804);
1504
1505         err |= tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x0013);
1506         err |= tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x1204);
1507
1508         err |= tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8006);
1509         err |= tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0132);
1510
1511         err |= tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8006);
1512         err |= tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0232);
1513
1514         err |= tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x201f);
1515         err |= tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0a20);
1516
1517         udelay(40);
1518
1519         return err;
1520 }
1521
1522 static int tg3_copper_is_advertising_all(struct tg3 *tp)
1523 {
1524         u32 adv_reg, all_mask;
1525
1526         if (tg3_readphy(tp, MII_ADVERTISE, &adv_reg))
1527                 return 0;
1528
1529         all_mask = (ADVERTISE_10HALF | ADVERTISE_10FULL |
1530                     ADVERTISE_100HALF | ADVERTISE_100FULL);
1531         if ((adv_reg & all_mask) != all_mask)
1532                 return 0;
1533         if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY)) {
1534                 u32 tg3_ctrl;
1535
1536                 if (tg3_readphy(tp, MII_TG3_CTRL, &tg3_ctrl))
1537                         return 0;
1538
1539                 all_mask = (MII_TG3_CTRL_ADV_1000_HALF |
1540                             MII_TG3_CTRL_ADV_1000_FULL);
1541                 if ((tg3_ctrl & all_mask) != all_mask)
1542                         return 0;
1543         }
1544         return 1;
1545 }
1546
1547 static int tg3_setup_copper_phy(struct tg3 *tp, int force_reset)
1548 {
1549         int current_link_up;
1550         u32 bmsr, dummy;
1551         u16 current_speed;
1552         u8 current_duplex;
1553         int i, err;
1554
1555         tw32(MAC_EVENT, 0);
1556
1557         tw32_f(MAC_STATUS,
1558              (MAC_STATUS_SYNC_CHANGED |
1559               MAC_STATUS_CFG_CHANGED |
1560               MAC_STATUS_MI_COMPLETION |
1561               MAC_STATUS_LNKSTATE_CHANGED));
1562         udelay(40);
1563
1564         tp->mi_mode = MAC_MI_MODE_BASE;
1565         tw32_f(MAC_MI_MODE, tp->mi_mode);
1566         udelay(80);
1567
1568         tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x02);
1569
1570         /* Some third-party PHYs need to be reset on link going
1571          * down.
1572          */
1573         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
1574              GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
1575              GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) &&
1576             netif_carrier_ok(tp->dev)) {
1577                 tg3_readphy(tp, MII_BMSR, &bmsr);
1578                 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
1579                     !(bmsr & BMSR_LSTATUS))
1580                         force_reset = 1;
1581         }
1582         if (force_reset)
1583                 tg3_phy_reset(tp);
1584
1585         if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5401) {
1586                 tg3_readphy(tp, MII_BMSR, &bmsr);
1587                 if (tg3_readphy(tp, MII_BMSR, &bmsr) ||
1588                     !(tp->tg3_flags & TG3_FLAG_INIT_COMPLETE))
1589                         bmsr = 0;
1590
1591                 if (!(bmsr & BMSR_LSTATUS)) {
1592                         err = tg3_init_5401phy_dsp(tp);
1593                         if (err)
1594                                 return err;
1595
1596                         tg3_readphy(tp, MII_BMSR, &bmsr);
1597                         for (i = 0; i < 1000; i++) {
1598                                 udelay(10);
1599                                 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
1600                                     (bmsr & BMSR_LSTATUS)) {
1601                                         udelay(40);
1602                                         break;
1603                                 }
1604                         }
1605
1606                         if ((tp->phy_id & PHY_ID_REV_MASK) == PHY_REV_BCM5401_B0 &&
1607                             !(bmsr & BMSR_LSTATUS) &&
1608                             tp->link_config.active_speed == SPEED_1000) {
1609                                 err = tg3_phy_reset(tp);
1610                                 if (!err)
1611                                         err = tg3_init_5401phy_dsp(tp);
1612                                 if (err)
1613                                         return err;
1614                         }
1615                 }
1616         } else if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
1617                    tp->pci_chip_rev_id == CHIPREV_ID_5701_B0) {
1618                 /* 5701 {A0,B0} CRC bug workaround */
1619                 tg3_writephy(tp, 0x15, 0x0a75);
1620                 tg3_writephy(tp, 0x1c, 0x8c68);
1621                 tg3_writephy(tp, 0x1c, 0x8d68);
1622                 tg3_writephy(tp, 0x1c, 0x8c68);
1623         }
1624
1625         /* Clear pending interrupts... */
1626         tg3_readphy(tp, MII_TG3_ISTAT, &dummy);
1627         tg3_readphy(tp, MII_TG3_ISTAT, &dummy);
1628
1629         if (tp->tg3_flags & TG3_FLAG_USE_MI_INTERRUPT)
1630                 tg3_writephy(tp, MII_TG3_IMASK, ~MII_TG3_INT_LINKCHG);
1631         else
1632                 tg3_writephy(tp, MII_TG3_IMASK, ~0);
1633
1634         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
1635             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
1636                 if (tp->led_ctrl == LED_CTRL_MODE_PHY_1)
1637                         tg3_writephy(tp, MII_TG3_EXT_CTRL,
1638                                      MII_TG3_EXT_CTRL_LNK3_LED_MODE);
1639                 else
1640                         tg3_writephy(tp, MII_TG3_EXT_CTRL, 0);
1641         }
1642
1643         current_link_up = 0;
1644         current_speed = SPEED_INVALID;
1645         current_duplex = DUPLEX_INVALID;
1646
1647         if (tp->tg3_flags2 & TG3_FLG2_CAPACITIVE_COUPLING) {
1648                 u32 val;
1649
1650                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x4007);
1651                 tg3_readphy(tp, MII_TG3_AUX_CTRL, &val);
1652                 if (!(val & (1 << 10))) {
1653                         val |= (1 << 10);
1654                         tg3_writephy(tp, MII_TG3_AUX_CTRL, val);
1655                         goto relink;
1656                 }
1657         }
1658
1659         bmsr = 0;
1660         for (i = 0; i < 100; i++) {
1661                 tg3_readphy(tp, MII_BMSR, &bmsr);
1662                 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
1663                     (bmsr & BMSR_LSTATUS))
1664                         break;
1665                 udelay(40);
1666         }
1667
1668         if (bmsr & BMSR_LSTATUS) {
1669                 u32 aux_stat, bmcr;
1670
1671                 tg3_readphy(tp, MII_TG3_AUX_STAT, &aux_stat);
1672                 for (i = 0; i < 2000; i++) {
1673                         udelay(10);
1674                         if (!tg3_readphy(tp, MII_TG3_AUX_STAT, &aux_stat) &&
1675                             aux_stat)
1676                                 break;
1677                 }
1678
1679                 tg3_aux_stat_to_speed_duplex(tp, aux_stat,
1680                                              &current_speed,
1681                                              &current_duplex);
1682
1683                 bmcr = 0;
1684                 for (i = 0; i < 200; i++) {
1685                         tg3_readphy(tp, MII_BMCR, &bmcr);
1686                         if (tg3_readphy(tp, MII_BMCR, &bmcr))
1687                                 continue;
1688                         if (bmcr && bmcr != 0x7fff)
1689                                 break;
1690                         udelay(10);
1691                 }
1692
1693                 if (tp->link_config.autoneg == AUTONEG_ENABLE) {
1694                         if (bmcr & BMCR_ANENABLE) {
1695                                 current_link_up = 1;
1696
1697                                 /* Force autoneg restart if we are exiting
1698                                  * low power mode.
1699                                  */
1700                                 if (!tg3_copper_is_advertising_all(tp))
1701                                         current_link_up = 0;
1702                         } else {
1703                                 current_link_up = 0;
1704                         }
1705                 } else {
1706                         if (!(bmcr & BMCR_ANENABLE) &&
1707                             tp->link_config.speed == current_speed &&
1708                             tp->link_config.duplex == current_duplex) {
1709                                 current_link_up = 1;
1710                         } else {
1711                                 current_link_up = 0;
1712                         }
1713                 }
1714
1715                 tp->link_config.active_speed = current_speed;
1716                 tp->link_config.active_duplex = current_duplex;
1717         }
1718
1719         if (current_link_up == 1 &&
1720             (tp->link_config.active_duplex == DUPLEX_FULL) &&
1721             (tp->link_config.autoneg == AUTONEG_ENABLE)) {
1722                 u32 local_adv, remote_adv;
1723
1724                 if (tg3_readphy(tp, MII_ADVERTISE, &local_adv))
1725                         local_adv = 0;
1726                 local_adv &= (ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM);
1727
1728                 if (tg3_readphy(tp, MII_LPA, &remote_adv))
1729                         remote_adv = 0;
1730
1731                 remote_adv &= (LPA_PAUSE_CAP | LPA_PAUSE_ASYM);
1732
1733                 /* If we are not advertising full pause capability,
1734                  * something is wrong.  Bring the link down and reconfigure.
1735                  */
1736                 if (local_adv != ADVERTISE_PAUSE_CAP) {
1737                         current_link_up = 0;
1738                 } else {
1739                         tg3_setup_flow_control(tp, local_adv, remote_adv);
1740                 }
1741         }
1742 relink:
1743         if (current_link_up == 0) {
1744                 u32 tmp;
1745
1746                 tg3_phy_copper_begin(tp);
1747
1748                 tg3_readphy(tp, MII_BMSR, &tmp);
1749                 if (!tg3_readphy(tp, MII_BMSR, &tmp) &&
1750                     (tmp & BMSR_LSTATUS))
1751                         current_link_up = 1;
1752         }
1753
1754         tp->mac_mode &= ~MAC_MODE_PORT_MODE_MASK;
1755         if (current_link_up == 1) {
1756                 if (tp->link_config.active_speed == SPEED_100 ||
1757                     tp->link_config.active_speed == SPEED_10)
1758                         tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
1759                 else
1760                         tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
1761         } else
1762                 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
1763
1764         tp->mac_mode &= ~MAC_MODE_HALF_DUPLEX;
1765         if (tp->link_config.active_duplex == DUPLEX_HALF)
1766                 tp->mac_mode |= MAC_MODE_HALF_DUPLEX;
1767
1768         tp->mac_mode &= ~MAC_MODE_LINK_POLARITY;
1769         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700) {
1770                 if ((tp->led_ctrl == LED_CTRL_MODE_PHY_2) ||
1771                     (current_link_up == 1 &&
1772                      tp->link_config.active_speed == SPEED_10))
1773                         tp->mac_mode |= MAC_MODE_LINK_POLARITY;
1774         } else {
1775                 if (current_link_up == 1)
1776                         tp->mac_mode |= MAC_MODE_LINK_POLARITY;
1777         }
1778
1779         /* ??? Without this setting Netgear GA302T PHY does not
1780          * ??? send/receive packets...
1781          */
1782         if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5411 &&
1783             tp->pci_chip_rev_id == CHIPREV_ID_5700_ALTIMA) {
1784                 tp->mi_mode |= MAC_MI_MODE_AUTO_POLL;
1785                 tw32_f(MAC_MI_MODE, tp->mi_mode);
1786                 udelay(80);
1787         }
1788
1789         tw32_f(MAC_MODE, tp->mac_mode);
1790         udelay(40);
1791
1792         if (tp->tg3_flags & TG3_FLAG_USE_LINKCHG_REG) {
1793                 /* Polled via timer. */
1794                 tw32_f(MAC_EVENT, 0);
1795         } else {
1796                 tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
1797         }
1798         udelay(40);
1799
1800         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 &&
1801             current_link_up == 1 &&
1802             tp->link_config.active_speed == SPEED_1000 &&
1803             ((tp->tg3_flags & TG3_FLAG_PCIX_MODE) ||
1804              (tp->tg3_flags & TG3_FLAG_PCI_HIGH_SPEED))) {
1805                 udelay(120);
1806                 tw32_f(MAC_STATUS,
1807                      (MAC_STATUS_SYNC_CHANGED |
1808                       MAC_STATUS_CFG_CHANGED));
1809                 udelay(40);
1810                 tg3_write_mem(tp,
1811                               NIC_SRAM_FIRMWARE_MBOX,
1812                               NIC_SRAM_FIRMWARE_MBOX_MAGIC2);
1813         }
1814
1815         if (current_link_up != netif_carrier_ok(tp->dev)) {
1816                 if (current_link_up)
1817                         netif_carrier_on(tp->dev);
1818                 else
1819                         netif_carrier_off(tp->dev);
1820                 tg3_link_report(tp);
1821         }
1822
1823         return 0;
1824 }
1825
1826 struct tg3_fiber_aneginfo {
1827         int state;
1828 #define ANEG_STATE_UNKNOWN              0
1829 #define ANEG_STATE_AN_ENABLE            1
1830 #define ANEG_STATE_RESTART_INIT         2
1831 #define ANEG_STATE_RESTART              3
1832 #define ANEG_STATE_DISABLE_LINK_OK      4
1833 #define ANEG_STATE_ABILITY_DETECT_INIT  5
1834 #define ANEG_STATE_ABILITY_DETECT       6
1835 #define ANEG_STATE_ACK_DETECT_INIT      7
1836 #define ANEG_STATE_ACK_DETECT           8
1837 #define ANEG_STATE_COMPLETE_ACK_INIT    9
1838 #define ANEG_STATE_COMPLETE_ACK         10
1839 #define ANEG_STATE_IDLE_DETECT_INIT     11
1840 #define ANEG_STATE_IDLE_DETECT          12
1841 #define ANEG_STATE_LINK_OK              13
1842 #define ANEG_STATE_NEXT_PAGE_WAIT_INIT  14
1843 #define ANEG_STATE_NEXT_PAGE_WAIT       15
1844
1845         u32 flags;
1846 #define MR_AN_ENABLE            0x00000001
1847 #define MR_RESTART_AN           0x00000002
1848 #define MR_AN_COMPLETE          0x00000004
1849 #define MR_PAGE_RX              0x00000008
1850 #define MR_NP_LOADED            0x00000010
1851 #define MR_TOGGLE_TX            0x00000020
1852 #define MR_LP_ADV_FULL_DUPLEX   0x00000040
1853 #define MR_LP_ADV_HALF_DUPLEX   0x00000080
1854 #define MR_LP_ADV_SYM_PAUSE     0x00000100
1855 #define MR_LP_ADV_ASYM_PAUSE    0x00000200
1856 #define MR_LP_ADV_REMOTE_FAULT1 0x00000400
1857 #define MR_LP_ADV_REMOTE_FAULT2 0x00000800
1858 #define MR_LP_ADV_NEXT_PAGE     0x00001000
1859 #define MR_TOGGLE_RX            0x00002000
1860 #define MR_NP_RX                0x00004000
1861
1862 #define MR_LINK_OK              0x80000000
1863
1864         unsigned long link_time, cur_time;
1865
1866         u32 ability_match_cfg;
1867         int ability_match_count;
1868
1869         char ability_match, idle_match, ack_match;
1870
1871         u32 txconfig, rxconfig;
1872 #define ANEG_CFG_NP             0x00000080
1873 #define ANEG_CFG_ACK            0x00000040
1874 #define ANEG_CFG_RF2            0x00000020
1875 #define ANEG_CFG_RF1            0x00000010
1876 #define ANEG_CFG_PS2            0x00000001
1877 #define ANEG_CFG_PS1            0x00008000
1878 #define ANEG_CFG_HD             0x00004000
1879 #define ANEG_CFG_FD             0x00002000
1880 #define ANEG_CFG_INVAL          0x00001f06
1881
1882 };
1883 #define ANEG_OK         0
1884 #define ANEG_DONE       1
1885 #define ANEG_TIMER_ENAB 2
1886 #define ANEG_FAILED     -1
1887
1888 #define ANEG_STATE_SETTLE_TIME  10000
1889
1890 static int tg3_fiber_aneg_smachine(struct tg3 *tp,
1891                                    struct tg3_fiber_aneginfo *ap)
1892 {
1893         unsigned long delta;
1894         u32 rx_cfg_reg;
1895         int ret;
1896
1897         if (ap->state == ANEG_STATE_UNKNOWN) {
1898                 ap->rxconfig = 0;
1899                 ap->link_time = 0;
1900                 ap->cur_time = 0;
1901                 ap->ability_match_cfg = 0;
1902                 ap->ability_match_count = 0;
1903                 ap->ability_match = 0;
1904                 ap->idle_match = 0;
1905                 ap->ack_match = 0;
1906         }
1907         ap->cur_time++;
1908
1909         if (tr32(MAC_STATUS) & MAC_STATUS_RCVD_CFG) {
1910                 rx_cfg_reg = tr32(MAC_RX_AUTO_NEG);
1911
1912                 if (rx_cfg_reg != ap->ability_match_cfg) {
1913                         ap->ability_match_cfg = rx_cfg_reg;
1914                         ap->ability_match = 0;
1915                         ap->ability_match_count = 0;
1916                 } else {
1917                         if (++ap->ability_match_count > 1) {
1918                                 ap->ability_match = 1;
1919                                 ap->ability_match_cfg = rx_cfg_reg;
1920                         }
1921                 }
1922                 if (rx_cfg_reg & ANEG_CFG_ACK)
1923                         ap->ack_match = 1;
1924                 else
1925                         ap->ack_match = 0;
1926
1927                 ap->idle_match = 0;
1928         } else {
1929                 ap->idle_match = 1;
1930                 ap->ability_match_cfg = 0;
1931                 ap->ability_match_count = 0;
1932                 ap->ability_match = 0;
1933                 ap->ack_match = 0;
1934
1935                 rx_cfg_reg = 0;
1936         }
1937
1938         ap->rxconfig = rx_cfg_reg;
1939         ret = ANEG_OK;
1940
1941         switch(ap->state) {
1942         case ANEG_STATE_UNKNOWN:
1943                 if (ap->flags & (MR_AN_ENABLE | MR_RESTART_AN))
1944                         ap->state = ANEG_STATE_AN_ENABLE;
1945
1946                 /* fallthru */
1947         case ANEG_STATE_AN_ENABLE:
1948                 ap->flags &= ~(MR_AN_COMPLETE | MR_PAGE_RX);
1949                 if (ap->flags & MR_AN_ENABLE) {
1950                         ap->link_time = 0;
1951                         ap->cur_time = 0;
1952                         ap->ability_match_cfg = 0;
1953                         ap->ability_match_count = 0;
1954                         ap->ability_match = 0;
1955                         ap->idle_match = 0;
1956                         ap->ack_match = 0;
1957
1958                         ap->state = ANEG_STATE_RESTART_INIT;
1959                 } else {
1960                         ap->state = ANEG_STATE_DISABLE_LINK_OK;
1961                 }
1962                 break;
1963
1964         case ANEG_STATE_RESTART_INIT:
1965                 ap->link_time = ap->cur_time;
1966                 ap->flags &= ~(MR_NP_LOADED);
1967                 ap->txconfig = 0;
1968                 tw32(MAC_TX_AUTO_NEG, 0);
1969                 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
1970                 tw32_f(MAC_MODE, tp->mac_mode);
1971                 udelay(40);
1972
1973                 ret = ANEG_TIMER_ENAB;
1974                 ap->state = ANEG_STATE_RESTART;
1975
1976                 /* fallthru */
1977         case ANEG_STATE_RESTART:
1978                 delta = ap->cur_time - ap->link_time;
1979                 if (delta > ANEG_STATE_SETTLE_TIME) {
1980                         ap->state = ANEG_STATE_ABILITY_DETECT_INIT;
1981                 } else {
1982                         ret = ANEG_TIMER_ENAB;
1983                 }
1984                 break;
1985
1986         case ANEG_STATE_DISABLE_LINK_OK:
1987                 ret = ANEG_DONE;
1988                 break;
1989
1990         case ANEG_STATE_ABILITY_DETECT_INIT:
1991                 ap->flags &= ~(MR_TOGGLE_TX);
1992                 ap->txconfig = (ANEG_CFG_FD | ANEG_CFG_PS1);
1993                 tw32(MAC_TX_AUTO_NEG, ap->txconfig);
1994                 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
1995                 tw32_f(MAC_MODE, tp->mac_mode);
1996                 udelay(40);
1997
1998                 ap->state = ANEG_STATE_ABILITY_DETECT;
1999                 break;
2000
2001         case ANEG_STATE_ABILITY_DETECT:
2002                 if (ap->ability_match != 0 && ap->rxconfig != 0) {
2003                         ap->state = ANEG_STATE_ACK_DETECT_INIT;
2004                 }
2005                 break;
2006
2007         case ANEG_STATE_ACK_DETECT_INIT:
2008                 ap->txconfig |= ANEG_CFG_ACK;
2009                 tw32(MAC_TX_AUTO_NEG, ap->txconfig);
2010                 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
2011                 tw32_f(MAC_MODE, tp->mac_mode);
2012                 udelay(40);
2013
2014                 ap->state = ANEG_STATE_ACK_DETECT;
2015
2016                 /* fallthru */
2017         case ANEG_STATE_ACK_DETECT:
2018                 if (ap->ack_match != 0) {
2019                         if ((ap->rxconfig & ~ANEG_CFG_ACK) ==
2020                             (ap->ability_match_cfg & ~ANEG_CFG_ACK)) {
2021                                 ap->state = ANEG_STATE_COMPLETE_ACK_INIT;
2022                         } else {
2023                                 ap->state = ANEG_STATE_AN_ENABLE;
2024                         }
2025                 } else if (ap->ability_match != 0 &&
2026                            ap->rxconfig == 0) {
2027                         ap->state = ANEG_STATE_AN_ENABLE;
2028                 }
2029                 break;
2030
2031         case ANEG_STATE_COMPLETE_ACK_INIT:
2032                 if (ap->rxconfig & ANEG_CFG_INVAL) {
2033                         ret = ANEG_FAILED;
2034                         break;
2035                 }
2036                 ap->flags &= ~(MR_LP_ADV_FULL_DUPLEX |
2037                                MR_LP_ADV_HALF_DUPLEX |
2038                                MR_LP_ADV_SYM_PAUSE |
2039                                MR_LP_ADV_ASYM_PAUSE |
2040                                MR_LP_ADV_REMOTE_FAULT1 |
2041                                MR_LP_ADV_REMOTE_FAULT2 |
2042                                MR_LP_ADV_NEXT_PAGE |
2043                                MR_TOGGLE_RX |
2044                                MR_NP_RX);
2045                 if (ap->rxconfig & ANEG_CFG_FD)
2046                         ap->flags |= MR_LP_ADV_FULL_DUPLEX;
2047                 if (ap->rxconfig & ANEG_CFG_HD)
2048                         ap->flags |= MR_LP_ADV_HALF_DUPLEX;
2049                 if (ap->rxconfig & ANEG_CFG_PS1)
2050                         ap->flags |= MR_LP_ADV_SYM_PAUSE;
2051                 if (ap->rxconfig & ANEG_CFG_PS2)
2052                         ap->flags |= MR_LP_ADV_ASYM_PAUSE;
2053                 if (ap->rxconfig & ANEG_CFG_RF1)
2054                         ap->flags |= MR_LP_ADV_REMOTE_FAULT1;
2055                 if (ap->rxconfig & ANEG_CFG_RF2)
2056                         ap->flags |= MR_LP_ADV_REMOTE_FAULT2;
2057                 if (ap->rxconfig & ANEG_CFG_NP)
2058                         ap->flags |= MR_LP_ADV_NEXT_PAGE;
2059
2060                 ap->link_time = ap->cur_time;
2061
2062                 ap->flags ^= (MR_TOGGLE_TX);
2063                 if (ap->rxconfig & 0x0008)
2064                         ap->flags |= MR_TOGGLE_RX;
2065                 if (ap->rxconfig & ANEG_CFG_NP)
2066                         ap->flags |= MR_NP_RX;
2067                 ap->flags |= MR_PAGE_RX;
2068
2069                 ap->state = ANEG_STATE_COMPLETE_ACK;
2070                 ret = ANEG_TIMER_ENAB;
2071                 break;
2072
2073         case ANEG_STATE_COMPLETE_ACK:
2074                 if (ap->ability_match != 0 &&
2075                     ap->rxconfig == 0) {
2076                         ap->state = ANEG_STATE_AN_ENABLE;
2077                         break;
2078                 }
2079                 delta = ap->cur_time - ap->link_time;
2080                 if (delta > ANEG_STATE_SETTLE_TIME) {
2081                         if (!(ap->flags & (MR_LP_ADV_NEXT_PAGE))) {
2082                                 ap->state = ANEG_STATE_IDLE_DETECT_INIT;
2083                         } else {
2084                                 if ((ap->txconfig & ANEG_CFG_NP) == 0 &&
2085                                     !(ap->flags & MR_NP_RX)) {
2086                                         ap->state = ANEG_STATE_IDLE_DETECT_INIT;
2087                                 } else {
2088                                         ret = ANEG_FAILED;
2089                                 }
2090                         }
2091                 }
2092                 break;
2093
2094         case ANEG_STATE_IDLE_DETECT_INIT:
2095                 ap->link_time = ap->cur_time;
2096                 tp->mac_mode &= ~MAC_MODE_SEND_CONFIGS;
2097                 tw32_f(MAC_MODE, tp->mac_mode);
2098                 udelay(40);
2099
2100                 ap->state = ANEG_STATE_IDLE_DETECT;
2101                 ret = ANEG_TIMER_ENAB;
2102                 break;
2103
2104         case ANEG_STATE_IDLE_DETECT:
2105                 if (ap->ability_match != 0 &&
2106                     ap->rxconfig == 0) {
2107                         ap->state = ANEG_STATE_AN_ENABLE;
2108                         break;
2109                 }
2110                 delta = ap->cur_time - ap->link_time;
2111                 if (delta > ANEG_STATE_SETTLE_TIME) {
2112                         /* XXX another gem from the Broadcom driver :( */
2113                         ap->state = ANEG_STATE_LINK_OK;
2114                 }
2115                 break;
2116
2117         case ANEG_STATE_LINK_OK:
2118                 ap->flags |= (MR_AN_COMPLETE | MR_LINK_OK);
2119                 ret = ANEG_DONE;
2120                 break;
2121
2122         case ANEG_STATE_NEXT_PAGE_WAIT_INIT:
2123                 /* ??? unimplemented */
2124                 break;
2125
2126         case ANEG_STATE_NEXT_PAGE_WAIT:
2127                 /* ??? unimplemented */
2128                 break;
2129
2130         default:
2131                 ret = ANEG_FAILED;
2132                 break;
2133         };
2134
2135         return ret;
2136 }
2137
2138 static int fiber_autoneg(struct tg3 *tp, u32 *flags)
2139 {
2140         int res = 0;
2141         struct tg3_fiber_aneginfo aninfo;
2142         int status = ANEG_FAILED;
2143         unsigned int tick;
2144         u32 tmp;
2145
2146         tw32_f(MAC_TX_AUTO_NEG, 0);
2147
2148         tmp = tp->mac_mode & ~MAC_MODE_PORT_MODE_MASK;
2149         tw32_f(MAC_MODE, tmp | MAC_MODE_PORT_MODE_GMII);
2150         udelay(40);
2151
2152         tw32_f(MAC_MODE, tp->mac_mode | MAC_MODE_SEND_CONFIGS);
2153         udelay(40);
2154
2155         memset(&aninfo, 0, sizeof(aninfo));
2156         aninfo.flags |= MR_AN_ENABLE;
2157         aninfo.state = ANEG_STATE_UNKNOWN;
2158         aninfo.cur_time = 0;
2159         tick = 0;
2160         while (++tick < 195000) {
2161                 status = tg3_fiber_aneg_smachine(tp, &aninfo);
2162                 if (status == ANEG_DONE || status == ANEG_FAILED)
2163                         break;
2164
2165                 udelay(1);
2166         }
2167
2168         tp->mac_mode &= ~MAC_MODE_SEND_CONFIGS;
2169         tw32_f(MAC_MODE, tp->mac_mode);
2170         udelay(40);
2171
2172         *flags = aninfo.flags;
2173
2174         if (status == ANEG_DONE &&
2175             (aninfo.flags & (MR_AN_COMPLETE | MR_LINK_OK |
2176                              MR_LP_ADV_FULL_DUPLEX)))
2177                 res = 1;
2178
2179         return res;
2180 }
2181
2182 static void tg3_init_bcm8002(struct tg3 *tp)
2183 {
2184         u32 mac_status = tr32(MAC_STATUS);
2185         int i;
2186
2187         /* Reset when initting first time or we have a link. */
2188         if ((tp->tg3_flags & TG3_FLAG_INIT_COMPLETE) &&
2189             !(mac_status & MAC_STATUS_PCS_SYNCED))
2190                 return;
2191
2192         /* Set PLL lock range. */
2193         tg3_writephy(tp, 0x16, 0x8007);
2194
2195         /* SW reset */
2196         tg3_writephy(tp, MII_BMCR, BMCR_RESET);
2197
2198         /* Wait for reset to complete. */
2199         /* XXX schedule_timeout() ... */
2200         for (i = 0; i < 500; i++)
2201                 udelay(10);
2202
2203         /* Config mode; select PMA/Ch 1 regs. */
2204         tg3_writephy(tp, 0x10, 0x8411);
2205
2206         /* Enable auto-lock and comdet, select txclk for tx. */
2207         tg3_writephy(tp, 0x11, 0x0a10);
2208
2209         tg3_writephy(tp, 0x18, 0x00a0);
2210         tg3_writephy(tp, 0x16, 0x41ff);
2211
2212         /* Assert and deassert POR. */
2213         tg3_writephy(tp, 0x13, 0x0400);
2214         udelay(40);
2215         tg3_writephy(tp, 0x13, 0x0000);
2216
2217         tg3_writephy(tp, 0x11, 0x0a50);
2218         udelay(40);
2219         tg3_writephy(tp, 0x11, 0x0a10);
2220
2221         /* Wait for signal to stabilize */
2222         /* XXX schedule_timeout() ... */
2223         for (i = 0; i < 15000; i++)
2224                 udelay(10);
2225
2226         /* Deselect the channel register so we can read the PHYID
2227          * later.
2228          */
2229         tg3_writephy(tp, 0x10, 0x8011);
2230 }
2231
2232 static int tg3_setup_fiber_hw_autoneg(struct tg3 *tp, u32 mac_status)
2233 {
2234         u32 sg_dig_ctrl, sg_dig_status;
2235         u32 serdes_cfg, expected_sg_dig_ctrl;
2236         int workaround, port_a;
2237         int current_link_up;
2238
2239         serdes_cfg = 0;
2240         expected_sg_dig_ctrl = 0;
2241         workaround = 0;
2242         port_a = 1;
2243         current_link_up = 0;
2244
2245         if (tp->pci_chip_rev_id != CHIPREV_ID_5704_A0 &&
2246             tp->pci_chip_rev_id != CHIPREV_ID_5704_A1) {
2247                 workaround = 1;
2248                 if (tr32(TG3PCI_DUAL_MAC_CTRL) & DUAL_MAC_CTRL_ID)
2249                         port_a = 0;
2250
2251                 /* preserve bits 0-11,13,14 for signal pre-emphasis */
2252                 /* preserve bits 20-23 for voltage regulator */
2253                 serdes_cfg = tr32(MAC_SERDES_CFG) & 0x00f06fff;
2254         }
2255
2256         sg_dig_ctrl = tr32(SG_DIG_CTRL);
2257
2258         if (tp->link_config.autoneg != AUTONEG_ENABLE) {
2259                 if (sg_dig_ctrl & (1 << 31)) {
2260                         if (workaround) {
2261                                 u32 val = serdes_cfg;
2262
2263                                 if (port_a)
2264                                         val |= 0xc010000;
2265                                 else
2266                                         val |= 0x4010000;
2267                                 tw32_f(MAC_SERDES_CFG, val);
2268                         }
2269                         tw32_f(SG_DIG_CTRL, 0x01388400);
2270                 }
2271                 if (mac_status & MAC_STATUS_PCS_SYNCED) {
2272                         tg3_setup_flow_control(tp, 0, 0);
2273                         current_link_up = 1;
2274                 }
2275                 goto out;
2276         }
2277
2278         /* Want auto-negotiation.  */
2279         expected_sg_dig_ctrl = 0x81388400;
2280
2281         /* Pause capability */
2282         expected_sg_dig_ctrl |= (1 << 11);
2283
2284         /* Asymettric pause */
2285         expected_sg_dig_ctrl |= (1 << 12);
2286
2287         if (sg_dig_ctrl != expected_sg_dig_ctrl) {
2288                 if (workaround)
2289                         tw32_f(MAC_SERDES_CFG, serdes_cfg | 0xc011000);
2290                 tw32_f(SG_DIG_CTRL, expected_sg_dig_ctrl | (1 << 30));
2291                 udelay(5);
2292                 tw32_f(SG_DIG_CTRL, expected_sg_dig_ctrl);
2293
2294                 tp->tg3_flags2 |= TG3_FLG2_PHY_JUST_INITTED;
2295         } else if (mac_status & (MAC_STATUS_PCS_SYNCED |
2296                                  MAC_STATUS_SIGNAL_DET)) {
2297                 int i;
2298
2299                 /* Giver time to negotiate (~200ms) */
2300                 for (i = 0; i < 40000; i++) {
2301                         sg_dig_status = tr32(SG_DIG_STATUS);
2302                         if (sg_dig_status & (0x3))
2303                                 break;
2304                         udelay(5);
2305                 }
2306                 mac_status = tr32(MAC_STATUS);
2307
2308                 if ((sg_dig_status & (1 << 1)) &&
2309                     (mac_status & MAC_STATUS_PCS_SYNCED)) {
2310                         u32 local_adv, remote_adv;
2311
2312                         local_adv = ADVERTISE_PAUSE_CAP;
2313                         remote_adv = 0;
2314                         if (sg_dig_status & (1 << 19))
2315                                 remote_adv |= LPA_PAUSE_CAP;
2316                         if (sg_dig_status & (1 << 20))
2317                                 remote_adv |= LPA_PAUSE_ASYM;
2318
2319                         tg3_setup_flow_control(tp, local_adv, remote_adv);
2320                         current_link_up = 1;
2321                         tp->tg3_flags2 &= ~TG3_FLG2_PHY_JUST_INITTED;
2322                 } else if (!(sg_dig_status & (1 << 1))) {
2323                         if (tp->tg3_flags2 & TG3_FLG2_PHY_JUST_INITTED)
2324                                 tp->tg3_flags2 &= ~TG3_FLG2_PHY_JUST_INITTED;
2325                         else {
2326                                 if (workaround) {
2327                                         u32 val = serdes_cfg;
2328
2329                                         if (port_a)
2330                                                 val |= 0xc010000;
2331                                         else
2332                                                 val |= 0x4010000;
2333
2334                                         tw32_f(MAC_SERDES_CFG, val);
2335                                 }
2336
2337                                 tw32_f(SG_DIG_CTRL, 0x01388400);
2338                                 udelay(40);
2339
2340                                 /* Link parallel detection - link is up */
2341                                 /* only if we have PCS_SYNC and not */
2342                                 /* receiving config code words */
2343                                 mac_status = tr32(MAC_STATUS);
2344                                 if ((mac_status & MAC_STATUS_PCS_SYNCED) &&
2345                                     !(mac_status & MAC_STATUS_RCVD_CFG)) {
2346                                         tg3_setup_flow_control(tp, 0, 0);
2347                                         current_link_up = 1;
2348                                 }
2349                         }
2350                 }
2351         }
2352
2353 out:
2354         return current_link_up;
2355 }
2356
2357 static int tg3_setup_fiber_by_hand(struct tg3 *tp, u32 mac_status)
2358 {
2359         int current_link_up = 0;
2360
2361         if (!(mac_status & MAC_STATUS_PCS_SYNCED)) {
2362                 tp->tg3_flags &= ~TG3_FLAG_GOT_SERDES_FLOWCTL;
2363                 goto out;
2364         }
2365
2366         if (tp->link_config.autoneg == AUTONEG_ENABLE) {
2367                 u32 flags;
2368                 int i;
2369   
2370                 if (fiber_autoneg(tp, &flags)) {
2371                         u32 local_adv, remote_adv;
2372
2373                         local_adv = ADVERTISE_PAUSE_CAP;
2374                         remote_adv = 0;
2375                         if (flags & MR_LP_ADV_SYM_PAUSE)
2376                                 remote_adv |= LPA_PAUSE_CAP;
2377                         if (flags & MR_LP_ADV_ASYM_PAUSE)
2378                                 remote_adv |= LPA_PAUSE_ASYM;
2379
2380                         tg3_setup_flow_control(tp, local_adv, remote_adv);
2381
2382                         tp->tg3_flags |= TG3_FLAG_GOT_SERDES_FLOWCTL;
2383                         current_link_up = 1;
2384                 }
2385                 for (i = 0; i < 30; i++) {
2386                         udelay(20);
2387                         tw32_f(MAC_STATUS,
2388                                (MAC_STATUS_SYNC_CHANGED |
2389                                 MAC_STATUS_CFG_CHANGED));
2390                         udelay(40);
2391                         if ((tr32(MAC_STATUS) &
2392                              (MAC_STATUS_SYNC_CHANGED |
2393                               MAC_STATUS_CFG_CHANGED)) == 0)
2394                                 break;
2395                 }
2396
2397                 mac_status = tr32(MAC_STATUS);
2398                 if (current_link_up == 0 &&
2399                     (mac_status & MAC_STATUS_PCS_SYNCED) &&
2400                     !(mac_status & MAC_STATUS_RCVD_CFG))
2401                         current_link_up = 1;
2402         } else {
2403                 /* Forcing 1000FD link up. */
2404                 current_link_up = 1;
2405                 tp->tg3_flags |= TG3_FLAG_GOT_SERDES_FLOWCTL;
2406
2407                 tw32_f(MAC_MODE, (tp->mac_mode | MAC_MODE_SEND_CONFIGS));
2408                 udelay(40);
2409         }
2410
2411 out:
2412         return current_link_up;
2413 }
2414
2415 static int tg3_setup_fiber_phy(struct tg3 *tp, int force_reset)
2416 {
2417         u32 orig_pause_cfg;
2418         u16 orig_active_speed;
2419         u8 orig_active_duplex;
2420         u32 mac_status;
2421         int current_link_up;
2422         int i;
2423
2424         orig_pause_cfg =
2425                 (tp->tg3_flags & (TG3_FLAG_RX_PAUSE |
2426                                   TG3_FLAG_TX_PAUSE));
2427         orig_active_speed = tp->link_config.active_speed;
2428         orig_active_duplex = tp->link_config.active_duplex;
2429
2430         if (!(tp->tg3_flags2 & TG3_FLG2_HW_AUTONEG) &&
2431             netif_carrier_ok(tp->dev) &&
2432             (tp->tg3_flags & TG3_FLAG_INIT_COMPLETE)) {
2433                 mac_status = tr32(MAC_STATUS);
2434                 mac_status &= (MAC_STATUS_PCS_SYNCED |
2435                                MAC_STATUS_SIGNAL_DET |
2436                                MAC_STATUS_CFG_CHANGED |
2437                                MAC_STATUS_RCVD_CFG);
2438                 if (mac_status == (MAC_STATUS_PCS_SYNCED |
2439                                    MAC_STATUS_SIGNAL_DET)) {
2440                         tw32_f(MAC_STATUS, (MAC_STATUS_SYNC_CHANGED |
2441                                             MAC_STATUS_CFG_CHANGED));
2442                         return 0;
2443                 }
2444         }
2445
2446         tw32_f(MAC_TX_AUTO_NEG, 0);
2447
2448         tp->mac_mode &= ~(MAC_MODE_PORT_MODE_MASK | MAC_MODE_HALF_DUPLEX);
2449         tp->mac_mode |= MAC_MODE_PORT_MODE_TBI;
2450         tw32_f(MAC_MODE, tp->mac_mode);
2451         udelay(40);
2452
2453         if (tp->phy_id == PHY_ID_BCM8002)
2454                 tg3_init_bcm8002(tp);
2455
2456         /* Enable link change event even when serdes polling.  */
2457         tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
2458         udelay(40);
2459
2460         current_link_up = 0;
2461         mac_status = tr32(MAC_STATUS);
2462
2463         if (tp->tg3_flags2 & TG3_FLG2_HW_AUTONEG)
2464                 current_link_up = tg3_setup_fiber_hw_autoneg(tp, mac_status);
2465         else
2466                 current_link_up = tg3_setup_fiber_by_hand(tp, mac_status);
2467
2468         tp->mac_mode &= ~MAC_MODE_LINK_POLARITY;
2469         tw32_f(MAC_MODE, tp->mac_mode);
2470         udelay(40);
2471
2472         tp->hw_status->status =
2473                 (SD_STATUS_UPDATED |
2474                  (tp->hw_status->status & ~SD_STATUS_LINK_CHG));
2475
2476         for (i = 0; i < 100; i++) {
2477                 tw32_f(MAC_STATUS, (MAC_STATUS_SYNC_CHANGED |
2478                                     MAC_STATUS_CFG_CHANGED));
2479                 udelay(5);
2480                 if ((tr32(MAC_STATUS) & (MAC_STATUS_SYNC_CHANGED |
2481                                          MAC_STATUS_CFG_CHANGED)) == 0)
2482                         break;
2483         }
2484
2485         mac_status = tr32(MAC_STATUS);
2486         if ((mac_status & MAC_STATUS_PCS_SYNCED) == 0) {
2487                 current_link_up = 0;
2488                 if (tp->link_config.autoneg == AUTONEG_ENABLE) {
2489                         tw32_f(MAC_MODE, (tp->mac_mode |
2490                                           MAC_MODE_SEND_CONFIGS));
2491                         udelay(1);
2492                         tw32_f(MAC_MODE, tp->mac_mode);
2493                 }
2494         }
2495
2496         if (current_link_up == 1) {
2497                 tp->link_config.active_speed = SPEED_1000;
2498                 tp->link_config.active_duplex = DUPLEX_FULL;
2499                 tw32(MAC_LED_CTRL, (tp->led_ctrl |
2500                                     LED_CTRL_LNKLED_OVERRIDE |
2501                                     LED_CTRL_1000MBPS_ON));
2502         } else {
2503                 tp->link_config.active_speed = SPEED_INVALID;
2504                 tp->link_config.active_duplex = DUPLEX_INVALID;
2505                 tw32(MAC_LED_CTRL, (tp->led_ctrl |
2506                                     LED_CTRL_LNKLED_OVERRIDE |
2507                                     LED_CTRL_TRAFFIC_OVERRIDE));
2508         }
2509
2510         if (current_link_up != netif_carrier_ok(tp->dev)) {
2511                 if (current_link_up)
2512                         netif_carrier_on(tp->dev);
2513                 else
2514                         netif_carrier_off(tp->dev);
2515                 tg3_link_report(tp);
2516         } else {
2517                 u32 now_pause_cfg =
2518                         tp->tg3_flags & (TG3_FLAG_RX_PAUSE |
2519                                          TG3_FLAG_TX_PAUSE);
2520                 if (orig_pause_cfg != now_pause_cfg ||
2521                     orig_active_speed != tp->link_config.active_speed ||
2522                     orig_active_duplex != tp->link_config.active_duplex)
2523                         tg3_link_report(tp);
2524         }
2525
2526         return 0;
2527 }
2528
2529 static int tg3_setup_fiber_mii_phy(struct tg3 *tp, int force_reset)
2530 {
2531         int current_link_up, err = 0;
2532         u32 bmsr, bmcr;
2533         u16 current_speed;
2534         u8 current_duplex;
2535
2536         tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
2537         tw32_f(MAC_MODE, tp->mac_mode);
2538         udelay(40);
2539
2540         tw32(MAC_EVENT, 0);
2541
2542         tw32_f(MAC_STATUS,
2543              (MAC_STATUS_SYNC_CHANGED |
2544               MAC_STATUS_CFG_CHANGED |
2545               MAC_STATUS_MI_COMPLETION |
2546               MAC_STATUS_LNKSTATE_CHANGED));
2547         udelay(40);
2548
2549         if (force_reset)
2550                 tg3_phy_reset(tp);
2551
2552         current_link_up = 0;
2553         current_speed = SPEED_INVALID;
2554         current_duplex = DUPLEX_INVALID;
2555
2556         err |= tg3_readphy(tp, MII_BMSR, &bmsr);
2557         err |= tg3_readphy(tp, MII_BMSR, &bmsr);
2558
2559         err |= tg3_readphy(tp, MII_BMCR, &bmcr);
2560
2561         if ((tp->link_config.autoneg == AUTONEG_ENABLE) && !force_reset &&
2562             (tp->tg3_flags2 & TG3_FLG2_PARALLEL_DETECT)) {
2563                 /* do nothing, just check for link up at the end */
2564         } else if (tp->link_config.autoneg == AUTONEG_ENABLE) {
2565                 u32 adv, new_adv;
2566
2567                 err |= tg3_readphy(tp, MII_ADVERTISE, &adv);
2568                 new_adv = adv & ~(ADVERTISE_1000XFULL | ADVERTISE_1000XHALF |
2569                                   ADVERTISE_1000XPAUSE |
2570                                   ADVERTISE_1000XPSE_ASYM |
2571                                   ADVERTISE_SLCT);
2572
2573                 /* Always advertise symmetric PAUSE just like copper */
2574                 new_adv |= ADVERTISE_1000XPAUSE;
2575
2576                 if (tp->link_config.advertising & ADVERTISED_1000baseT_Half)
2577                         new_adv |= ADVERTISE_1000XHALF;
2578                 if (tp->link_config.advertising & ADVERTISED_1000baseT_Full)
2579                         new_adv |= ADVERTISE_1000XFULL;
2580
2581                 if ((new_adv != adv) || !(bmcr & BMCR_ANENABLE)) {
2582                         tg3_writephy(tp, MII_ADVERTISE, new_adv);
2583                         bmcr |= BMCR_ANENABLE | BMCR_ANRESTART;
2584                         tg3_writephy(tp, MII_BMCR, bmcr);
2585
2586                         tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
2587                         tp->tg3_flags2 |= TG3_FLG2_PHY_JUST_INITTED;
2588                         tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
2589
2590                         return err;
2591                 }
2592         } else {
2593                 u32 new_bmcr;
2594
2595                 bmcr &= ~BMCR_SPEED1000;
2596                 new_bmcr = bmcr & ~(BMCR_ANENABLE | BMCR_FULLDPLX);
2597
2598                 if (tp->link_config.duplex == DUPLEX_FULL)
2599                         new_bmcr |= BMCR_FULLDPLX;
2600
2601                 if (new_bmcr != bmcr) {
2602                         /* BMCR_SPEED1000 is a reserved bit that needs
2603                          * to be set on write.
2604                          */
2605                         new_bmcr |= BMCR_SPEED1000;
2606
2607                         /* Force a linkdown */
2608                         if (netif_carrier_ok(tp->dev)) {
2609                                 u32 adv;
2610
2611                                 err |= tg3_readphy(tp, MII_ADVERTISE, &adv);
2612                                 adv &= ~(ADVERTISE_1000XFULL |
2613                                          ADVERTISE_1000XHALF |
2614                                          ADVERTISE_SLCT);
2615                                 tg3_writephy(tp, MII_ADVERTISE, adv);
2616                                 tg3_writephy(tp, MII_BMCR, bmcr |
2617                                                            BMCR_ANRESTART |
2618                                                            BMCR_ANENABLE);
2619                                 udelay(10);
2620                                 netif_carrier_off(tp->dev);
2621                         }
2622                         tg3_writephy(tp, MII_BMCR, new_bmcr);
2623                         bmcr = new_bmcr;
2624                         err |= tg3_readphy(tp, MII_BMSR, &bmsr);
2625                         err |= tg3_readphy(tp, MII_BMSR, &bmsr);
2626                         tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
2627                 }
2628         }
2629
2630         if (bmsr & BMSR_LSTATUS) {
2631                 current_speed = SPEED_1000;
2632                 current_link_up = 1;
2633                 if (bmcr & BMCR_FULLDPLX)
2634                         current_duplex = DUPLEX_FULL;
2635                 else
2636                         current_duplex = DUPLEX_HALF;
2637
2638                 if (bmcr & BMCR_ANENABLE) {
2639                         u32 local_adv, remote_adv, common;
2640
2641                         err |= tg3_readphy(tp, MII_ADVERTISE, &local_adv);
2642                         err |= tg3_readphy(tp, MII_LPA, &remote_adv);
2643                         common = local_adv & remote_adv;
2644                         if (common & (ADVERTISE_1000XHALF |
2645                                       ADVERTISE_1000XFULL)) {
2646                                 if (common & ADVERTISE_1000XFULL)
2647                                         current_duplex = DUPLEX_FULL;
2648                                 else
2649                                         current_duplex = DUPLEX_HALF;
2650
2651                                 tg3_setup_flow_control(tp, local_adv,
2652                                                        remote_adv);
2653                         }
2654                         else
2655                                 current_link_up = 0;
2656                 }
2657         }
2658
2659         tp->mac_mode &= ~MAC_MODE_HALF_DUPLEX;
2660         if (tp->link_config.active_duplex == DUPLEX_HALF)
2661                 tp->mac_mode |= MAC_MODE_HALF_DUPLEX;
2662
2663         tw32_f(MAC_MODE, tp->mac_mode);
2664         udelay(40);
2665
2666         tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
2667
2668         tp->link_config.active_speed = current_speed;
2669         tp->link_config.active_duplex = current_duplex;
2670
2671         if (current_link_up != netif_carrier_ok(tp->dev)) {
2672                 if (current_link_up)
2673                         netif_carrier_on(tp->dev);
2674                 else {
2675                         netif_carrier_off(tp->dev);
2676                         tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
2677                 }
2678                 tg3_link_report(tp);
2679         }
2680         return err;
2681 }
2682
2683 static void tg3_serdes_parallel_detect(struct tg3 *tp)
2684 {
2685         if (tp->tg3_flags2 & TG3_FLG2_PHY_JUST_INITTED) {
2686                 /* Give autoneg time to complete. */
2687                 tp->tg3_flags2 &= ~TG3_FLG2_PHY_JUST_INITTED;
2688                 return;
2689         }
2690         if (!netif_carrier_ok(tp->dev) &&
2691             (tp->link_config.autoneg == AUTONEG_ENABLE)) {
2692                 u32 bmcr;
2693
2694                 tg3_readphy(tp, MII_BMCR, &bmcr);
2695                 if (bmcr & BMCR_ANENABLE) {
2696                         u32 phy1, phy2;
2697
2698                         /* Select shadow register 0x1f */
2699                         tg3_writephy(tp, 0x1c, 0x7c00);
2700                         tg3_readphy(tp, 0x1c, &phy1);
2701
2702                         /* Select expansion interrupt status register */
2703                         tg3_writephy(tp, 0x17, 0x0f01);
2704                         tg3_readphy(tp, 0x15, &phy2);
2705                         tg3_readphy(tp, 0x15, &phy2);
2706
2707                         if ((phy1 & 0x10) && !(phy2 & 0x20)) {
2708                                 /* We have signal detect and not receiving
2709                                  * config code words, link is up by parallel
2710                                  * detection.
2711                                  */
2712
2713                                 bmcr &= ~BMCR_ANENABLE;
2714                                 bmcr |= BMCR_SPEED1000 | BMCR_FULLDPLX;
2715                                 tg3_writephy(tp, MII_BMCR, bmcr);
2716                                 tp->tg3_flags2 |= TG3_FLG2_PARALLEL_DETECT;
2717                         }
2718                 }
2719         }
2720         else if (netif_carrier_ok(tp->dev) &&
2721                  (tp->link_config.autoneg == AUTONEG_ENABLE) &&
2722                  (tp->tg3_flags2 & TG3_FLG2_PARALLEL_DETECT)) {
2723                 u32 phy2;
2724
2725                 /* Select expansion interrupt status register */
2726                 tg3_writephy(tp, 0x17, 0x0f01);
2727                 tg3_readphy(tp, 0x15, &phy2);
2728                 if (phy2 & 0x20) {
2729                         u32 bmcr;
2730
2731                         /* Config code words received, turn on autoneg. */
2732                         tg3_readphy(tp, MII_BMCR, &bmcr);
2733                         tg3_writephy(tp, MII_BMCR, bmcr | BMCR_ANENABLE);
2734
2735                         tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
2736
2737                 }
2738         }
2739 }
2740
2741 static int tg3_setup_phy(struct tg3 *tp, int force_reset)
2742 {
2743         int err;
2744
2745         if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) {
2746                 err = tg3_setup_fiber_phy(tp, force_reset);
2747         } else if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES) {
2748                 err = tg3_setup_fiber_mii_phy(tp, force_reset);
2749         } else {
2750                 err = tg3_setup_copper_phy(tp, force_reset);
2751         }
2752
2753         if (tp->link_config.active_speed == SPEED_1000 &&
2754             tp->link_config.active_duplex == DUPLEX_HALF)
2755                 tw32(MAC_TX_LENGTHS,
2756                      ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
2757                       (6 << TX_LENGTHS_IPG_SHIFT) |
2758                       (0xff << TX_LENGTHS_SLOT_TIME_SHIFT)));
2759         else
2760                 tw32(MAC_TX_LENGTHS,
2761                      ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
2762                       (6 << TX_LENGTHS_IPG_SHIFT) |
2763                       (32 << TX_LENGTHS_SLOT_TIME_SHIFT)));
2764
2765         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
2766                 if (netif_carrier_ok(tp->dev)) {
2767                         tw32(HOSTCC_STAT_COAL_TICKS,
2768                              tp->coal.stats_block_coalesce_usecs);
2769                 } else {
2770                         tw32(HOSTCC_STAT_COAL_TICKS, 0);
2771                 }
2772         }
2773
2774         return err;
2775 }
2776
2777 /* Tigon3 never reports partial packet sends.  So we do not
2778  * need special logic to handle SKBs that have not had all
2779  * of their frags sent yet, like SunGEM does.
2780  */
2781 static void tg3_tx(struct tg3 *tp)
2782 {
2783         u32 hw_idx = tp->hw_status->idx[0].tx_consumer;
2784         u32 sw_idx = tp->tx_cons;
2785
2786         while (sw_idx != hw_idx) {
2787                 struct tx_ring_info *ri = &tp->tx_buffers[sw_idx];
2788                 struct sk_buff *skb = ri->skb;
2789                 int i;
2790
2791                 if (unlikely(skb == NULL))
2792                         BUG();
2793
2794                 pci_unmap_single(tp->pdev,
2795                                  pci_unmap_addr(ri, mapping),
2796                                  skb_headlen(skb),
2797                                  PCI_DMA_TODEVICE);
2798
2799                 ri->skb = NULL;
2800
2801                 sw_idx = NEXT_TX(sw_idx);
2802
2803                 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
2804                         if (unlikely(sw_idx == hw_idx))
2805                                 BUG();
2806
2807                         ri = &tp->tx_buffers[sw_idx];
2808                         if (unlikely(ri->skb != NULL))
2809                                 BUG();
2810
2811                         pci_unmap_page(tp->pdev,
2812                                        pci_unmap_addr(ri, mapping),
2813                                        skb_shinfo(skb)->frags[i].size,
2814                                        PCI_DMA_TODEVICE);
2815
2816                         sw_idx = NEXT_TX(sw_idx);
2817                 }
2818
2819                 dev_kfree_skb(skb);
2820         }
2821
2822         tp->tx_cons = sw_idx;
2823
2824         if (netif_queue_stopped(tp->dev) &&
2825             (TX_BUFFS_AVAIL(tp) > TG3_TX_WAKEUP_THRESH))
2826                 netif_wake_queue(tp->dev);
2827 }
2828
2829 /* Returns size of skb allocated or < 0 on error.
2830  *
2831  * We only need to fill in the address because the other members
2832  * of the RX descriptor are invariant, see tg3_init_rings.
2833  *
2834  * Note the purposeful assymetry of cpu vs. chip accesses.  For
2835  * posting buffers we only dirty the first cache line of the RX
2836  * descriptor (containing the address).  Whereas for the RX status
2837  * buffers the cpu only reads the last cacheline of the RX descriptor
2838  * (to fetch the error flags, vlan tag, checksum, and opaque cookie).
2839  */
2840 static int tg3_alloc_rx_skb(struct tg3 *tp, u32 opaque_key,
2841                             int src_idx, u32 dest_idx_unmasked)
2842 {
2843         struct tg3_rx_buffer_desc *desc;
2844         struct ring_info *map, *src_map;
2845         struct sk_buff *skb;
2846         dma_addr_t mapping;
2847         int skb_size, dest_idx;
2848
2849         src_map = NULL;
2850         switch (opaque_key) {
2851         case RXD_OPAQUE_RING_STD:
2852                 dest_idx = dest_idx_unmasked % TG3_RX_RING_SIZE;
2853                 desc = &tp->rx_std[dest_idx];
2854                 map = &tp->rx_std_buffers[dest_idx];
2855                 if (src_idx >= 0)
2856                         src_map = &tp->rx_std_buffers[src_idx];
2857                 skb_size = tp->rx_pkt_buf_sz;
2858                 break;
2859
2860         case RXD_OPAQUE_RING_JUMBO:
2861                 dest_idx = dest_idx_unmasked % TG3_RX_JUMBO_RING_SIZE;
2862                 desc = &tp->rx_jumbo[dest_idx];
2863                 map = &tp->rx_jumbo_buffers[dest_idx];
2864                 if (src_idx >= 0)
2865                         src_map = &tp->rx_jumbo_buffers[src_idx];
2866                 skb_size = RX_JUMBO_PKT_BUF_SZ;
2867                 break;
2868
2869         default:
2870                 return -EINVAL;
2871         };
2872
2873         /* Do not overwrite any of the map or rp information
2874          * until we are sure we can commit to a new buffer.
2875          *
2876          * Callers depend upon this behavior and assume that
2877          * we leave everything unchanged if we fail.
2878          */
2879         skb = dev_alloc_skb(skb_size);
2880         if (skb == NULL)
2881                 return -ENOMEM;
2882
2883         skb->dev = tp->dev;
2884         skb_reserve(skb, tp->rx_offset);
2885
2886         mapping = pci_map_single(tp->pdev, skb->data,
2887                                  skb_size - tp->rx_offset,
2888                                  PCI_DMA_FROMDEVICE);
2889
2890         map->skb = skb;
2891         pci_unmap_addr_set(map, mapping, mapping);
2892
2893         if (src_map != NULL)
2894                 src_map->skb = NULL;
2895
2896         desc->addr_hi = ((u64)mapping >> 32);
2897         desc->addr_lo = ((u64)mapping & 0xffffffff);
2898
2899         return skb_size;
2900 }
2901
2902 /* We only need to move over in the address because the other
2903  * members of the RX descriptor are invariant.  See notes above
2904  * tg3_alloc_rx_skb for full details.
2905  */
2906 static void tg3_recycle_rx(struct tg3 *tp, u32 opaque_key,
2907                            int src_idx, u32 dest_idx_unmasked)
2908 {
2909         struct tg3_rx_buffer_desc *src_desc, *dest_desc;
2910         struct ring_info *src_map, *dest_map;
2911         int dest_idx;
2912
2913         switch (opaque_key) {
2914         case RXD_OPAQUE_RING_STD:
2915                 dest_idx = dest_idx_unmasked % TG3_RX_RING_SIZE;
2916                 dest_desc = &tp->rx_std[dest_idx];
2917                 dest_map = &tp->rx_std_buffers[dest_idx];
2918                 src_desc = &tp->rx_std[src_idx];
2919                 src_map = &tp->rx_std_buffers[src_idx];
2920                 break;
2921
2922         case RXD_OPAQUE_RING_JUMBO:
2923                 dest_idx = dest_idx_unmasked % TG3_RX_JUMBO_RING_SIZE;
2924                 dest_desc = &tp->rx_jumbo[dest_idx];
2925                 dest_map = &tp->rx_jumbo_buffers[dest_idx];
2926                 src_desc = &tp->rx_jumbo[src_idx];
2927                 src_map = &tp->rx_jumbo_buffers[src_idx];
2928                 break;
2929
2930         default:
2931                 return;
2932         };
2933
2934         dest_map->skb = src_map->skb;
2935         pci_unmap_addr_set(dest_map, mapping,
2936                            pci_unmap_addr(src_map, mapping));
2937         dest_desc->addr_hi = src_desc->addr_hi;
2938         dest_desc->addr_lo = src_desc->addr_lo;
2939
2940         src_map->skb = NULL;
2941 }
2942
2943 #if TG3_VLAN_TAG_USED
2944 static int tg3_vlan_rx(struct tg3 *tp, struct sk_buff *skb, u16 vlan_tag)
2945 {
2946         return vlan_hwaccel_receive_skb(skb, tp->vlgrp, vlan_tag);
2947 }
2948 #endif
2949
2950 /* The RX ring scheme is composed of multiple rings which post fresh
2951  * buffers to the chip, and one special ring the chip uses to report
2952  * status back to the host.
2953  *
2954  * The special ring reports the status of received packets to the
2955  * host.  The chip does not write into the original descriptor the
2956  * RX buffer was obtained from.  The chip simply takes the original
2957  * descriptor as provided by the host, updates the status and length
2958  * field, then writes this into the next status ring entry.
2959  *
2960  * Each ring the host uses to post buffers to the chip is described
2961  * by a TG3_BDINFO entry in the chips SRAM area.  When a packet arrives,
2962  * it is first placed into the on-chip ram.  When the packet's length
2963  * is known, it walks down the TG3_BDINFO entries to select the ring.
2964  * Each TG3_BDINFO specifies a MAXLEN field and the first TG3_BDINFO
2965  * which is within the range of the new packet's length is chosen.
2966  *
2967  * The "separate ring for rx status" scheme may sound queer, but it makes
2968  * sense from a cache coherency perspective.  If only the host writes
2969  * to the buffer post rings, and only the chip writes to the rx status
2970  * rings, then cache lines never move beyond shared-modified state.
2971  * If both the host and chip were to write into the same ring, cache line
2972  * eviction could occur since both entities want it in an exclusive state.
2973  */
2974 static int tg3_rx(struct tg3 *tp, int budget)
2975 {
2976         u32 work_mask;
2977         u32 sw_idx = tp->rx_rcb_ptr;
2978         u16 hw_idx;
2979         int received;
2980
2981         hw_idx = tp->hw_status->idx[0].rx_producer;
2982         /*
2983          * We need to order the read of hw_idx and the read of
2984          * the opaque cookie.
2985          */
2986         rmb();
2987         work_mask = 0;
2988         received = 0;
2989         while (sw_idx != hw_idx && budget > 0) {
2990                 struct tg3_rx_buffer_desc *desc = &tp->rx_rcb[sw_idx];
2991                 unsigned int len;
2992                 struct sk_buff *skb;
2993                 dma_addr_t dma_addr;
2994                 u32 opaque_key, desc_idx, *post_ptr;
2995
2996                 desc_idx = desc->opaque & RXD_OPAQUE_INDEX_MASK;
2997                 opaque_key = desc->opaque & RXD_OPAQUE_RING_MASK;
2998                 if (opaque_key == RXD_OPAQUE_RING_STD) {
2999                         dma_addr = pci_unmap_addr(&tp->rx_std_buffers[desc_idx],
3000                                                   mapping);
3001                         skb = tp->rx_std_buffers[desc_idx].skb;
3002                         post_ptr = &tp->rx_std_ptr;
3003                 } else if (opaque_key == RXD_OPAQUE_RING_JUMBO) {
3004                         dma_addr = pci_unmap_addr(&tp->rx_jumbo_buffers[desc_idx],
3005                                                   mapping);
3006                         skb = tp->rx_jumbo_buffers[desc_idx].skb;
3007                         post_ptr = &tp->rx_jumbo_ptr;
3008                 }
3009                 else {
3010                         goto next_pkt_nopost;
3011                 }
3012
3013                 work_mask |= opaque_key;
3014
3015                 if ((desc->err_vlan & RXD_ERR_MASK) != 0 &&
3016                     (desc->err_vlan != RXD_ERR_ODD_NIBBLE_RCVD_MII)) {
3017                 drop_it:
3018                         tg3_recycle_rx(tp, opaque_key,
3019                                        desc_idx, *post_ptr);
3020                 drop_it_no_recycle:
3021                         /* Other statistics kept track of by card. */
3022                         tp->net_stats.rx_dropped++;
3023                         goto next_pkt;
3024                 }
3025
3026                 len = ((desc->idx_len & RXD_LEN_MASK) >> RXD_LEN_SHIFT) - 4; /* omit crc */
3027
3028                 if (len > RX_COPY_THRESHOLD 
3029                         && tp->rx_offset == 2
3030                         /* rx_offset != 2 iff this is a 5701 card running
3031                          * in PCI-X mode [see tg3_get_invariants()] */
3032                 ) {
3033                         int skb_size;
3034
3035                         skb_size = tg3_alloc_rx_skb(tp, opaque_key,
3036                                                     desc_idx, *post_ptr);
3037                         if (skb_size < 0)
3038                                 goto drop_it;
3039
3040                         pci_unmap_single(tp->pdev, dma_addr,
3041                                          skb_size - tp->rx_offset,
3042                                          PCI_DMA_FROMDEVICE);
3043
3044                         skb_put(skb, len);
3045                 } else {
3046                         struct sk_buff *copy_skb;
3047
3048                         tg3_recycle_rx(tp, opaque_key,
3049                                        desc_idx, *post_ptr);
3050
3051                         copy_skb = dev_alloc_skb(len + 2);
3052                         if (copy_skb == NULL)
3053                                 goto drop_it_no_recycle;
3054
3055                         copy_skb->dev = tp->dev;
3056                         skb_reserve(copy_skb, 2);
3057                         skb_put(copy_skb, len);
3058                         pci_dma_sync_single_for_cpu(tp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE);
3059                         memcpy(copy_skb->data, skb->data, len);
3060                         pci_dma_sync_single_for_device(tp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE);
3061
3062                         /* We'll reuse the original ring buffer. */
3063                         skb = copy_skb;
3064                 }
3065
3066                 if ((tp->tg3_flags & TG3_FLAG_RX_CHECKSUMS) &&
3067                     (desc->type_flags & RXD_FLAG_TCPUDP_CSUM) &&
3068                     (((desc->ip_tcp_csum & RXD_TCPCSUM_MASK)
3069                       >> RXD_TCPCSUM_SHIFT) == 0xffff))
3070                         skb->ip_summed = CHECKSUM_UNNECESSARY;
3071                 else
3072                         skb->ip_summed = CHECKSUM_NONE;
3073
3074                 skb->protocol = eth_type_trans(skb, tp->dev);
3075 #if TG3_VLAN_TAG_USED
3076                 if (tp->vlgrp != NULL &&
3077                     desc->type_flags & RXD_FLAG_VLAN) {
3078                         tg3_vlan_rx(tp, skb,
3079                                     desc->err_vlan & RXD_VLAN_MASK);
3080                 } else
3081 #endif
3082                         netif_receive_skb(skb);
3083
3084                 tp->dev->last_rx = jiffies;
3085                 received++;
3086                 budget--;
3087
3088 next_pkt:
3089                 (*post_ptr)++;
3090 next_pkt_nopost:
3091                 sw_idx++;
3092                 sw_idx %= TG3_RX_RCB_RING_SIZE(tp);
3093
3094                 /* Refresh hw_idx to see if there is new work */
3095                 if (sw_idx == hw_idx) {
3096                         hw_idx = tp->hw_status->idx[0].rx_producer;
3097                         rmb();
3098                 }
3099         }
3100
3101         /* ACK the status ring. */
3102         tp->rx_rcb_ptr = sw_idx;
3103         tw32_rx_mbox(MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW, sw_idx);
3104
3105         /* Refill RX ring(s). */
3106         if (work_mask & RXD_OPAQUE_RING_STD) {
3107                 sw_idx = tp->rx_std_ptr % TG3_RX_RING_SIZE;
3108                 tw32_rx_mbox(MAILBOX_RCV_STD_PROD_IDX + TG3_64BIT_REG_LOW,
3109                              sw_idx);
3110         }
3111         if (work_mask & RXD_OPAQUE_RING_JUMBO) {
3112                 sw_idx = tp->rx_jumbo_ptr % TG3_RX_JUMBO_RING_SIZE;
3113                 tw32_rx_mbox(MAILBOX_RCV_JUMBO_PROD_IDX + TG3_64BIT_REG_LOW,
3114                              sw_idx);
3115         }
3116         mmiowb();
3117
3118         return received;
3119 }
3120
3121 static int tg3_poll(struct net_device *netdev, int *budget)
3122 {
3123         struct tg3 *tp = netdev_priv(netdev);
3124         struct tg3_hw_status *sblk = tp->hw_status;
3125         int done;
3126
3127         /* handle link change and other phy events */
3128         if (!(tp->tg3_flags &
3129               (TG3_FLAG_USE_LINKCHG_REG |
3130                TG3_FLAG_POLL_SERDES))) {
3131                 if (sblk->status & SD_STATUS_LINK_CHG) {
3132                         sblk->status = SD_STATUS_UPDATED |
3133                                 (sblk->status & ~SD_STATUS_LINK_CHG);
3134                         spin_lock(&tp->lock);
3135                         tg3_setup_phy(tp, 0);
3136                         spin_unlock(&tp->lock);
3137                 }
3138         }
3139
3140         /* run TX completion thread */
3141         if (sblk->idx[0].tx_consumer != tp->tx_cons) {
3142                 spin_lock(&tp->tx_lock);
3143                 tg3_tx(tp);
3144                 spin_unlock(&tp->tx_lock);
3145         }
3146
3147         /* run RX thread, within the bounds set by NAPI.
3148          * All RX "locking" is done by ensuring outside
3149          * code synchronizes with dev->poll()
3150          */
3151         if (sblk->idx[0].rx_producer != tp->rx_rcb_ptr) {
3152                 int orig_budget = *budget;
3153                 int work_done;
3154
3155                 if (orig_budget > netdev->quota)
3156                         orig_budget = netdev->quota;
3157
3158                 work_done = tg3_rx(tp, orig_budget);
3159
3160                 *budget -= work_done;
3161                 netdev->quota -= work_done;
3162         }
3163
3164         if (tp->tg3_flags & TG3_FLAG_TAGGED_STATUS)
3165                 tp->last_tag = sblk->status_tag;
3166         rmb();
3167         sblk->status &= ~SD_STATUS_UPDATED;
3168
3169         /* if no more work, tell net stack and NIC we're done */
3170         done = !tg3_has_work(tp);
3171         if (done) {
3172                 spin_lock(&tp->lock);
3173                 netif_rx_complete(netdev);
3174                 tg3_restart_ints(tp);
3175                 spin_unlock(&tp->lock);
3176         }
3177
3178         return (done ? 0 : 1);
3179 }
3180
3181 static void tg3_irq_quiesce(struct tg3 *tp)
3182 {
3183         BUG_ON(tp->irq_sync);
3184
3185         tp->irq_sync = 1;
3186         smp_mb();
3187
3188         synchronize_irq(tp->pdev->irq);
3189 }
3190
3191 static inline int tg3_irq_sync(struct tg3 *tp)
3192 {
3193         return tp->irq_sync;
3194 }
3195
3196 /* Fully shutdown all tg3 driver activity elsewhere in the system.
3197  * If irq_sync is non-zero, then the IRQ handler must be synchronized
3198  * with as well.  Most of the time, this is not necessary except when
3199  * shutting down the device.
3200  */
3201 static inline void tg3_full_lock(struct tg3 *tp, int irq_sync)
3202 {
3203         if (irq_sync)
3204                 tg3_irq_quiesce(tp);
3205         spin_lock_bh(&tp->lock);
3206         spin_lock(&tp->tx_lock);
3207 }
3208
3209 static inline void tg3_full_unlock(struct tg3 *tp)
3210 {
3211         spin_unlock(&tp->tx_lock);
3212         spin_unlock_bh(&tp->lock);
3213 }
3214
3215 /* MSI ISR - No need to check for interrupt sharing and no need to
3216  * flush status block and interrupt mailbox. PCI ordering rules
3217  * guarantee that MSI will arrive after the status block.
3218  */
3219 static irqreturn_t tg3_msi(int irq, void *dev_id, struct pt_regs *regs)
3220 {
3221         struct net_device *dev = dev_id;
3222