]> nv-tegra.nvidia Code Review - linux-3.10.git/blob - drivers/net/tg3.c
[TG3]: Increase TEST_BUFFER_SIZE to 8K.
[linux-3.10.git] / drivers / net / tg3.c
1 /*
2  * tg3.c: Broadcom Tigon3 ethernet driver.
3  *
4  * Copyright (C) 2001, 2002, 2003, 2004 David S. Miller (davem@redhat.com)
5  * Copyright (C) 2001, 2002, 2003 Jeff Garzik (jgarzik@pobox.com)
6  * Copyright (C) 2004 Sun Microsystems Inc.
7  * Copyright (C) 2005 Broadcom Corporation.
8  *
9  * Firmware is:
10  *      Copyright (C) 2000-2003 Broadcom Corporation.
11  */
12
13 #include <linux/config.h>
14
15 #include <linux/module.h>
16 #include <linux/moduleparam.h>
17 #include <linux/kernel.h>
18 #include <linux/types.h>
19 #include <linux/compiler.h>
20 #include <linux/slab.h>
21 #include <linux/delay.h>
22 #include <linux/init.h>
23 #include <linux/ioport.h>
24 #include <linux/pci.h>
25 #include <linux/netdevice.h>
26 #include <linux/etherdevice.h>
27 #include <linux/skbuff.h>
28 #include <linux/ethtool.h>
29 #include <linux/mii.h>
30 #include <linux/if_vlan.h>
31 #include <linux/ip.h>
32 #include <linux/tcp.h>
33 #include <linux/workqueue.h>
34
35 #include <net/checksum.h>
36
37 #include <asm/system.h>
38 #include <asm/io.h>
39 #include <asm/byteorder.h>
40 #include <asm/uaccess.h>
41
42 #ifdef CONFIG_SPARC64
43 #include <asm/idprom.h>
44 #include <asm/oplib.h>
45 #include <asm/pbm.h>
46 #endif
47
48 #if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE)
49 #define TG3_VLAN_TAG_USED 1
50 #else
51 #define TG3_VLAN_TAG_USED 0
52 #endif
53
54 #ifdef NETIF_F_TSO
55 #define TG3_TSO_SUPPORT 1
56 #else
57 #define TG3_TSO_SUPPORT 0
58 #endif
59
60 #include "tg3.h"
61
62 #define DRV_MODULE_NAME         "tg3"
63 #define PFX DRV_MODULE_NAME     ": "
64 #define DRV_MODULE_VERSION      "3.28"
65 #define DRV_MODULE_RELDATE      "May 23, 2005"
66
67 #define TG3_DEF_MAC_MODE        0
68 #define TG3_DEF_RX_MODE         0
69 #define TG3_DEF_TX_MODE         0
70 #define TG3_DEF_MSG_ENABLE        \
71         (NETIF_MSG_DRV          | \
72          NETIF_MSG_PROBE        | \
73          NETIF_MSG_LINK         | \
74          NETIF_MSG_TIMER        | \
75          NETIF_MSG_IFDOWN       | \
76          NETIF_MSG_IFUP         | \
77          NETIF_MSG_RX_ERR       | \
78          NETIF_MSG_TX_ERR)
79
80 /* length of time before we decide the hardware is borked,
81  * and dev->tx_timeout() should be called to fix the problem
82  */
83 #define TG3_TX_TIMEOUT                  (5 * HZ)
84
85 /* hardware minimum and maximum for a single frame's data payload */
86 #define TG3_MIN_MTU                     60
87 #define TG3_MAX_MTU(tp) \
88         (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS) ? 9000 : 1500)
89
90 /* These numbers seem to be hard coded in the NIC firmware somehow.
91  * You can't change the ring sizes, but you can change where you place
92  * them in the NIC onboard memory.
93  */
94 #define TG3_RX_RING_SIZE                512
95 #define TG3_DEF_RX_RING_PENDING         200
96 #define TG3_RX_JUMBO_RING_SIZE          256
97 #define TG3_DEF_RX_JUMBO_RING_PENDING   100
98
99 /* Do not place this n-ring entries value into the tp struct itself,
100  * we really want to expose these constants to GCC so that modulo et
101  * al.  operations are done with shifts and masks instead of with
102  * hw multiply/modulo instructions.  Another solution would be to
103  * replace things like '% foo' with '& (foo - 1)'.
104  */
105 #define TG3_RX_RCB_RING_SIZE(tp)        \
106         ((tp->tg3_flags2 & TG3_FLG2_5705_PLUS) ?  512 : 1024)
107
108 #define TG3_TX_RING_SIZE                512
109 #define TG3_DEF_TX_RING_PENDING         (TG3_TX_RING_SIZE - 1)
110
111 #define TG3_RX_RING_BYTES       (sizeof(struct tg3_rx_buffer_desc) * \
112                                  TG3_RX_RING_SIZE)
113 #define TG3_RX_JUMBO_RING_BYTES (sizeof(struct tg3_rx_buffer_desc) * \
114                                  TG3_RX_JUMBO_RING_SIZE)
115 #define TG3_RX_RCB_RING_BYTES(tp) (sizeof(struct tg3_rx_buffer_desc) * \
116                                    TG3_RX_RCB_RING_SIZE(tp))
117 #define TG3_TX_RING_BYTES       (sizeof(struct tg3_tx_buffer_desc) * \
118                                  TG3_TX_RING_SIZE)
119 #define TX_RING_GAP(TP) \
120         (TG3_TX_RING_SIZE - (TP)->tx_pending)
121 #define TX_BUFFS_AVAIL(TP)                                              \
122         (((TP)->tx_cons <= (TP)->tx_prod) ?                             \
123           (TP)->tx_cons + (TP)->tx_pending - (TP)->tx_prod :            \
124           (TP)->tx_cons - (TP)->tx_prod - TX_RING_GAP(TP))
125 #define NEXT_TX(N)              (((N) + 1) & (TG3_TX_RING_SIZE - 1))
126
127 #define RX_PKT_BUF_SZ           (1536 + tp->rx_offset + 64)
128 #define RX_JUMBO_PKT_BUF_SZ     (9046 + tp->rx_offset + 64)
129
130 /* minimum number of free TX descriptors required to wake up TX process */
131 #define TG3_TX_WAKEUP_THRESH            (TG3_TX_RING_SIZE / 4)
132
133 /* number of ETHTOOL_GSTATS u64's */
134 #define TG3_NUM_STATS           (sizeof(struct tg3_ethtool_stats)/sizeof(u64))
135
136 static char version[] __devinitdata =
137         DRV_MODULE_NAME ".c:v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
138
139 MODULE_AUTHOR("David S. Miller (davem@redhat.com) and Jeff Garzik (jgarzik@pobox.com)");
140 MODULE_DESCRIPTION("Broadcom Tigon3 ethernet driver");
141 MODULE_LICENSE("GPL");
142 MODULE_VERSION(DRV_MODULE_VERSION);
143
144 static int tg3_debug = -1;      /* -1 == use TG3_DEF_MSG_ENABLE as value */
145 module_param(tg3_debug, int, 0);
146 MODULE_PARM_DESC(tg3_debug, "Tigon3 bitmapped debugging message enable value");
147
148 static struct pci_device_id tg3_pci_tbl[] = {
149         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5700,
150           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
151         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5701,
152           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
153         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702,
154           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
155         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703,
156           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
157         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704,
158           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
159         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702FE,
160           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
161         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705,
162           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
163         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705_2,
164           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
165         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705M,
166           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
167         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705M_2,
168           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
169         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702X,
170           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
171         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703X,
172           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
173         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704S,
174           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
175         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702A3,
176           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
177         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703A3,
178           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
179         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5782,
180           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
181         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5788,
182           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
183         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5789,
184           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
185         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5901,
186           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
187         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5901_2,
188           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
189         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704S_2,
190           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
191         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705F,
192           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
193         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5720,
194           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
195         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5721,
196           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
197         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5750,
198           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
199         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751,
200           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
201         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5750M,
202           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
203         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751M,
204           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
205         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751F,
206           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
207         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5752,
208           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
209         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5752M,
210           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
211         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753,
212           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
213         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753M,
214           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
215         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753F,
216           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
217         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5781,
218           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
219         { PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9DXX,
220           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
221         { PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9MXX,
222           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
223         { PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1000,
224           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
225         { PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1001,
226           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
227         { PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1003,
228           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
229         { PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC9100,
230           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
231         { PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_TIGON3,
232           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
233         { 0, }
234 };
235
236 MODULE_DEVICE_TABLE(pci, tg3_pci_tbl);
237
238 static struct {
239         const char string[ETH_GSTRING_LEN];
240 } ethtool_stats_keys[TG3_NUM_STATS] = {
241         { "rx_octets" },
242         { "rx_fragments" },
243         { "rx_ucast_packets" },
244         { "rx_mcast_packets" },
245         { "rx_bcast_packets" },
246         { "rx_fcs_errors" },
247         { "rx_align_errors" },
248         { "rx_xon_pause_rcvd" },
249         { "rx_xoff_pause_rcvd" },
250         { "rx_mac_ctrl_rcvd" },
251         { "rx_xoff_entered" },
252         { "rx_frame_too_long_errors" },
253         { "rx_jabbers" },
254         { "rx_undersize_packets" },
255         { "rx_in_length_errors" },
256         { "rx_out_length_errors" },
257         { "rx_64_or_less_octet_packets" },
258         { "rx_65_to_127_octet_packets" },
259         { "rx_128_to_255_octet_packets" },
260         { "rx_256_to_511_octet_packets" },
261         { "rx_512_to_1023_octet_packets" },
262         { "rx_1024_to_1522_octet_packets" },
263         { "rx_1523_to_2047_octet_packets" },
264         { "rx_2048_to_4095_octet_packets" },
265         { "rx_4096_to_8191_octet_packets" },
266         { "rx_8192_to_9022_octet_packets" },
267
268         { "tx_octets" },
269         { "tx_collisions" },
270
271         { "tx_xon_sent" },
272         { "tx_xoff_sent" },
273         { "tx_flow_control" },
274         { "tx_mac_errors" },
275         { "tx_single_collisions" },
276         { "tx_mult_collisions" },
277         { "tx_deferred" },
278         { "tx_excessive_collisions" },
279         { "tx_late_collisions" },
280         { "tx_collide_2times" },
281         { "tx_collide_3times" },
282         { "tx_collide_4times" },
283         { "tx_collide_5times" },
284         { "tx_collide_6times" },
285         { "tx_collide_7times" },
286         { "tx_collide_8times" },
287         { "tx_collide_9times" },
288         { "tx_collide_10times" },
289         { "tx_collide_11times" },
290         { "tx_collide_12times" },
291         { "tx_collide_13times" },
292         { "tx_collide_14times" },
293         { "tx_collide_15times" },
294         { "tx_ucast_packets" },
295         { "tx_mcast_packets" },
296         { "tx_bcast_packets" },
297         { "tx_carrier_sense_errors" },
298         { "tx_discards" },
299         { "tx_errors" },
300
301         { "dma_writeq_full" },
302         { "dma_write_prioq_full" },
303         { "rxbds_empty" },
304         { "rx_discards" },
305         { "rx_errors" },
306         { "rx_threshold_hit" },
307
308         { "dma_readq_full" },
309         { "dma_read_prioq_full" },
310         { "tx_comp_queue_full" },
311
312         { "ring_set_send_prod_index" },
313         { "ring_status_update" },
314         { "nic_irqs" },
315         { "nic_avoided_irqs" },
316         { "nic_tx_threshold_hit" }
317 };
318
319 static void tg3_write_indirect_reg32(struct tg3 *tp, u32 off, u32 val)
320 {
321         if ((tp->tg3_flags & TG3_FLAG_PCIX_TARGET_HWBUG) != 0) {
322                 unsigned long flags;
323
324                 spin_lock_irqsave(&tp->indirect_lock, flags);
325                 pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off);
326                 pci_write_config_dword(tp->pdev, TG3PCI_REG_DATA, val);
327                 spin_unlock_irqrestore(&tp->indirect_lock, flags);
328         } else {
329                 writel(val, tp->regs + off);
330                 if ((tp->tg3_flags & TG3_FLAG_5701_REG_WRITE_BUG) != 0)
331                         readl(tp->regs + off);
332         }
333 }
334
335 static void _tw32_flush(struct tg3 *tp, u32 off, u32 val)
336 {
337         if ((tp->tg3_flags & TG3_FLAG_PCIX_TARGET_HWBUG) != 0) {
338                 unsigned long flags;
339
340                 spin_lock_irqsave(&tp->indirect_lock, flags);
341                 pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off);
342                 pci_write_config_dword(tp->pdev, TG3PCI_REG_DATA, val);
343                 spin_unlock_irqrestore(&tp->indirect_lock, flags);
344         } else {
345                 void __iomem *dest = tp->regs + off;
346                 writel(val, dest);
347                 readl(dest);    /* always flush PCI write */
348         }
349 }
350
351 static inline void _tw32_rx_mbox(struct tg3 *tp, u32 off, u32 val)
352 {
353         void __iomem *mbox = tp->regs + off;
354         writel(val, mbox);
355         if (tp->tg3_flags & TG3_FLAG_MBOX_WRITE_REORDER)
356                 readl(mbox);
357 }
358
359 static inline void _tw32_tx_mbox(struct tg3 *tp, u32 off, u32 val)
360 {
361         void __iomem *mbox = tp->regs + off;
362         writel(val, mbox);
363         if (tp->tg3_flags & TG3_FLAG_TXD_MBOX_HWBUG)
364                 writel(val, mbox);
365         if (tp->tg3_flags & TG3_FLAG_MBOX_WRITE_REORDER)
366                 readl(mbox);
367 }
368
369 #define tw32_mailbox(reg, val)  writel(((val) & 0xffffffff), tp->regs + (reg))
370 #define tw32_rx_mbox(reg, val)  _tw32_rx_mbox(tp, reg, val)
371 #define tw32_tx_mbox(reg, val)  _tw32_tx_mbox(tp, reg, val)
372
373 #define tw32(reg,val)           tg3_write_indirect_reg32(tp,(reg),(val))
374 #define tw32_f(reg,val)         _tw32_flush(tp,(reg),(val))
375 #define tw16(reg,val)           writew(((val) & 0xffff), tp->regs + (reg))
376 #define tw8(reg,val)            writeb(((val) & 0xff), tp->regs + (reg))
377 #define tr32(reg)               readl(tp->regs + (reg))
378 #define tr16(reg)               readw(tp->regs + (reg))
379 #define tr8(reg)                readb(tp->regs + (reg))
380
381 static void tg3_write_mem(struct tg3 *tp, u32 off, u32 val)
382 {
383         unsigned long flags;
384
385         spin_lock_irqsave(&tp->indirect_lock, flags);
386         pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off);
387         pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
388
389         /* Always leave this as zero. */
390         pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
391         spin_unlock_irqrestore(&tp->indirect_lock, flags);
392 }
393
394 static void tg3_read_mem(struct tg3 *tp, u32 off, u32 *val)
395 {
396         unsigned long flags;
397
398         spin_lock_irqsave(&tp->indirect_lock, flags);
399         pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off);
400         pci_read_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
401
402         /* Always leave this as zero. */
403         pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
404         spin_unlock_irqrestore(&tp->indirect_lock, flags);
405 }
406
407 static void tg3_disable_ints(struct tg3 *tp)
408 {
409         tw32(TG3PCI_MISC_HOST_CTRL,
410              (tp->misc_host_ctrl | MISC_HOST_CTRL_MASK_PCI_INT));
411         tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
412         tr32(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW);
413 }
414
415 static inline void tg3_cond_int(struct tg3 *tp)
416 {
417         if (tp->hw_status->status & SD_STATUS_UPDATED)
418                 tw32(GRC_LOCAL_CTRL, tp->grc_local_ctrl | GRC_LCLCTRL_SETINT);
419 }
420
421 static void tg3_enable_ints(struct tg3 *tp)
422 {
423         tw32(TG3PCI_MISC_HOST_CTRL,
424              (tp->misc_host_ctrl & ~MISC_HOST_CTRL_MASK_PCI_INT));
425         tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
426                      (tp->last_tag << 24));
427         tr32(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW);
428
429         tg3_cond_int(tp);
430 }
431
432 static inline unsigned int tg3_has_work(struct tg3 *tp)
433 {
434         struct tg3_hw_status *sblk = tp->hw_status;
435         unsigned int work_exists = 0;
436
437         /* check for phy events */
438         if (!(tp->tg3_flags &
439               (TG3_FLAG_USE_LINKCHG_REG |
440                TG3_FLAG_POLL_SERDES))) {
441                 if (sblk->status & SD_STATUS_LINK_CHG)
442                         work_exists = 1;
443         }
444         /* check for RX/TX work to do */
445         if (sblk->idx[0].tx_consumer != tp->tx_cons ||
446             sblk->idx[0].rx_producer != tp->rx_rcb_ptr)
447                 work_exists = 1;
448
449         return work_exists;
450 }
451
452 /* tg3_restart_ints
453  *  similar to tg3_enable_ints, but it accurately determines whether there
454  *  is new work pending and can return without flushing the PIO write
455  *  which reenables interrupts 
456  */
457 static void tg3_restart_ints(struct tg3 *tp)
458 {
459         tw32(TG3PCI_MISC_HOST_CTRL,
460                 (tp->misc_host_ctrl & ~MISC_HOST_CTRL_MASK_PCI_INT));
461         tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
462                      tp->last_tag << 24);
463         mmiowb();
464
465         /* When doing tagged status, this work check is unnecessary.
466          * The last_tag we write above tells the chip which piece of
467          * work we've completed.
468          */
469         if (!(tp->tg3_flags & TG3_FLAG_TAGGED_STATUS) &&
470             tg3_has_work(tp))
471                 tw32(HOSTCC_MODE, tp->coalesce_mode |
472                      (HOSTCC_MODE_ENABLE | HOSTCC_MODE_NOW));
473 }
474
475 static inline void tg3_netif_stop(struct tg3 *tp)
476 {
477         netif_poll_disable(tp->dev);
478         netif_tx_disable(tp->dev);
479 }
480
481 static inline void tg3_netif_start(struct tg3 *tp)
482 {
483         netif_wake_queue(tp->dev);
484         /* NOTE: unconditional netif_wake_queue is only appropriate
485          * so long as all callers are assured to have free tx slots
486          * (such as after tg3_init_hw)
487          */
488         netif_poll_enable(tp->dev);
489         tg3_cond_int(tp);
490 }
491
492 static void tg3_switch_clocks(struct tg3 *tp)
493 {
494         u32 clock_ctrl = tr32(TG3PCI_CLOCK_CTRL);
495         u32 orig_clock_ctrl;
496
497         orig_clock_ctrl = clock_ctrl;
498         clock_ctrl &= (CLOCK_CTRL_FORCE_CLKRUN |
499                        CLOCK_CTRL_CLKRUN_OENABLE |
500                        0x1f);
501         tp->pci_clock_ctrl = clock_ctrl;
502
503         if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
504                 if (orig_clock_ctrl & CLOCK_CTRL_625_CORE) {
505                         tw32_f(TG3PCI_CLOCK_CTRL,
506                                clock_ctrl | CLOCK_CTRL_625_CORE);
507                         udelay(40);
508                 }
509         } else if ((orig_clock_ctrl & CLOCK_CTRL_44MHZ_CORE) != 0) {
510                 tw32_f(TG3PCI_CLOCK_CTRL,
511                      clock_ctrl |
512                      (CLOCK_CTRL_44MHZ_CORE | CLOCK_CTRL_ALTCLK));
513                 udelay(40);
514                 tw32_f(TG3PCI_CLOCK_CTRL,
515                      clock_ctrl | (CLOCK_CTRL_ALTCLK));
516                 udelay(40);
517         }
518         tw32_f(TG3PCI_CLOCK_CTRL, clock_ctrl);
519         udelay(40);
520 }
521
522 #define PHY_BUSY_LOOPS  5000
523
524 static int tg3_readphy(struct tg3 *tp, int reg, u32 *val)
525 {
526         u32 frame_val;
527         unsigned int loops;
528         int ret;
529
530         if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
531                 tw32_f(MAC_MI_MODE,
532                      (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
533                 udelay(80);
534         }
535
536         *val = 0x0;
537
538         frame_val  = ((PHY_ADDR << MI_COM_PHY_ADDR_SHIFT) &
539                       MI_COM_PHY_ADDR_MASK);
540         frame_val |= ((reg << MI_COM_REG_ADDR_SHIFT) &
541                       MI_COM_REG_ADDR_MASK);
542         frame_val |= (MI_COM_CMD_READ | MI_COM_START);
543         
544         tw32_f(MAC_MI_COM, frame_val);
545
546         loops = PHY_BUSY_LOOPS;
547         while (loops != 0) {
548                 udelay(10);
549                 frame_val = tr32(MAC_MI_COM);
550
551                 if ((frame_val & MI_COM_BUSY) == 0) {
552                         udelay(5);
553                         frame_val = tr32(MAC_MI_COM);
554                         break;
555                 }
556                 loops -= 1;
557         }
558
559         ret = -EBUSY;
560         if (loops != 0) {
561                 *val = frame_val & MI_COM_DATA_MASK;
562                 ret = 0;
563         }
564
565         if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
566                 tw32_f(MAC_MI_MODE, tp->mi_mode);
567                 udelay(80);
568         }
569
570         return ret;
571 }
572
573 static int tg3_writephy(struct tg3 *tp, int reg, u32 val)
574 {
575         u32 frame_val;
576         unsigned int loops;
577         int ret;
578
579         if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
580                 tw32_f(MAC_MI_MODE,
581                      (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
582                 udelay(80);
583         }
584
585         frame_val  = ((PHY_ADDR << MI_COM_PHY_ADDR_SHIFT) &
586                       MI_COM_PHY_ADDR_MASK);
587         frame_val |= ((reg << MI_COM_REG_ADDR_SHIFT) &
588                       MI_COM_REG_ADDR_MASK);
589         frame_val |= (val & MI_COM_DATA_MASK);
590         frame_val |= (MI_COM_CMD_WRITE | MI_COM_START);
591         
592         tw32_f(MAC_MI_COM, frame_val);
593
594         loops = PHY_BUSY_LOOPS;
595         while (loops != 0) {
596                 udelay(10);
597                 frame_val = tr32(MAC_MI_COM);
598                 if ((frame_val & MI_COM_BUSY) == 0) {
599                         udelay(5);
600                         frame_val = tr32(MAC_MI_COM);
601                         break;
602                 }
603                 loops -= 1;
604         }
605
606         ret = -EBUSY;
607         if (loops != 0)
608                 ret = 0;
609
610         if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
611                 tw32_f(MAC_MI_MODE, tp->mi_mode);
612                 udelay(80);
613         }
614
615         return ret;
616 }
617
618 static void tg3_phy_set_wirespeed(struct tg3 *tp)
619 {
620         u32 val;
621
622         if (tp->tg3_flags2 & TG3_FLG2_NO_ETH_WIRE_SPEED)
623                 return;
624
625         if (!tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x7007) &&
626             !tg3_readphy(tp, MII_TG3_AUX_CTRL, &val))
627                 tg3_writephy(tp, MII_TG3_AUX_CTRL,
628                              (val | (1 << 15) | (1 << 4)));
629 }
630
631 static int tg3_bmcr_reset(struct tg3 *tp)
632 {
633         u32 phy_control;
634         int limit, err;
635
636         /* OK, reset it, and poll the BMCR_RESET bit until it
637          * clears or we time out.
638          */
639         phy_control = BMCR_RESET;
640         err = tg3_writephy(tp, MII_BMCR, phy_control);
641         if (err != 0)
642                 return -EBUSY;
643
644         limit = 5000;
645         while (limit--) {
646                 err = tg3_readphy(tp, MII_BMCR, &phy_control);
647                 if (err != 0)
648                         return -EBUSY;
649
650                 if ((phy_control & BMCR_RESET) == 0) {
651                         udelay(40);
652                         break;
653                 }
654                 udelay(10);
655         }
656         if (limit <= 0)
657                 return -EBUSY;
658
659         return 0;
660 }
661
662 static int tg3_wait_macro_done(struct tg3 *tp)
663 {
664         int limit = 100;
665
666         while (limit--) {
667                 u32 tmp32;
668
669                 if (!tg3_readphy(tp, 0x16, &tmp32)) {
670                         if ((tmp32 & 0x1000) == 0)
671                                 break;
672                 }
673         }
674         if (limit <= 0)
675                 return -EBUSY;
676
677         return 0;
678 }
679
680 static int tg3_phy_write_and_check_testpat(struct tg3 *tp, int *resetp)
681 {
682         static const u32 test_pat[4][6] = {
683         { 0x00005555, 0x00000005, 0x00002aaa, 0x0000000a, 0x00003456, 0x00000003 },
684         { 0x00002aaa, 0x0000000a, 0x00003333, 0x00000003, 0x0000789a, 0x00000005 },
685         { 0x00005a5a, 0x00000005, 0x00002a6a, 0x0000000a, 0x00001bcd, 0x00000003 },
686         { 0x00002a5a, 0x0000000a, 0x000033c3, 0x00000003, 0x00002ef1, 0x00000005 }
687         };
688         int chan;
689
690         for (chan = 0; chan < 4; chan++) {
691                 int i;
692
693                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
694                              (chan * 0x2000) | 0x0200);
695                 tg3_writephy(tp, 0x16, 0x0002);
696
697                 for (i = 0; i < 6; i++)
698                         tg3_writephy(tp, MII_TG3_DSP_RW_PORT,
699                                      test_pat[chan][i]);
700
701                 tg3_writephy(tp, 0x16, 0x0202);
702                 if (tg3_wait_macro_done(tp)) {
703                         *resetp = 1;
704                         return -EBUSY;
705                 }
706
707                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
708                              (chan * 0x2000) | 0x0200);
709                 tg3_writephy(tp, 0x16, 0x0082);
710                 if (tg3_wait_macro_done(tp)) {
711                         *resetp = 1;
712                         return -EBUSY;
713                 }
714
715                 tg3_writephy(tp, 0x16, 0x0802);
716                 if (tg3_wait_macro_done(tp)) {
717                         *resetp = 1;
718                         return -EBUSY;
719                 }
720
721                 for (i = 0; i < 6; i += 2) {
722                         u32 low, high;
723
724                         if (tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &low) ||
725                             tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &high) ||
726                             tg3_wait_macro_done(tp)) {
727                                 *resetp = 1;
728                                 return -EBUSY;
729                         }
730                         low &= 0x7fff;
731                         high &= 0x000f;
732                         if (low != test_pat[chan][i] ||
733                             high != test_pat[chan][i+1]) {
734                                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000b);
735                                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x4001);
736                                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x4005);
737
738                                 return -EBUSY;
739                         }
740                 }
741         }
742
743         return 0;
744 }
745
746 static int tg3_phy_reset_chanpat(struct tg3 *tp)
747 {
748         int chan;
749
750         for (chan = 0; chan < 4; chan++) {
751                 int i;
752
753                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
754                              (chan * 0x2000) | 0x0200);
755                 tg3_writephy(tp, 0x16, 0x0002);
756                 for (i = 0; i < 6; i++)
757                         tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x000);
758                 tg3_writephy(tp, 0x16, 0x0202);
759                 if (tg3_wait_macro_done(tp))
760                         return -EBUSY;
761         }
762
763         return 0;
764 }
765
766 static int tg3_phy_reset_5703_4_5(struct tg3 *tp)
767 {
768         u32 reg32, phy9_orig;
769         int retries, do_phy_reset, err;
770
771         retries = 10;
772         do_phy_reset = 1;
773         do {
774                 if (do_phy_reset) {
775                         err = tg3_bmcr_reset(tp);
776                         if (err)
777                                 return err;
778                         do_phy_reset = 0;
779                 }
780
781                 /* Disable transmitter and interrupt.  */
782                 if (tg3_readphy(tp, MII_TG3_EXT_CTRL, &reg32))
783                         continue;
784
785                 reg32 |= 0x3000;
786                 tg3_writephy(tp, MII_TG3_EXT_CTRL, reg32);
787
788                 /* Set full-duplex, 1000 mbps.  */
789                 tg3_writephy(tp, MII_BMCR,
790                              BMCR_FULLDPLX | TG3_BMCR_SPEED1000);
791
792                 /* Set to master mode.  */
793                 if (tg3_readphy(tp, MII_TG3_CTRL, &phy9_orig))
794                         continue;
795
796                 tg3_writephy(tp, MII_TG3_CTRL,
797                              (MII_TG3_CTRL_AS_MASTER |
798                               MII_TG3_CTRL_ENABLE_AS_MASTER));
799
800                 /* Enable SM_DSP_CLOCK and 6dB.  */
801                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0c00);
802
803                 /* Block the PHY control access.  */
804                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8005);
805                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0800);
806
807                 err = tg3_phy_write_and_check_testpat(tp, &do_phy_reset);
808                 if (!err)
809                         break;
810         } while (--retries);
811
812         err = tg3_phy_reset_chanpat(tp);
813         if (err)
814                 return err;
815
816         tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8005);
817         tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0000);
818
819         tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8200);
820         tg3_writephy(tp, 0x16, 0x0000);
821
822         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
823             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
824                 /* Set Extended packet length bit for jumbo frames */
825                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x4400);
826         }
827         else {
828                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0400);
829         }
830
831         tg3_writephy(tp, MII_TG3_CTRL, phy9_orig);
832
833         if (!tg3_readphy(tp, MII_TG3_EXT_CTRL, &reg32)) {
834                 reg32 &= ~0x3000;
835                 tg3_writephy(tp, MII_TG3_EXT_CTRL, reg32);
836         } else if (!err)
837                 err = -EBUSY;
838
839         return err;
840 }
841
842 /* This will reset the tigon3 PHY if there is no valid
843  * link unless the FORCE argument is non-zero.
844  */
845 static int tg3_phy_reset(struct tg3 *tp)
846 {
847         u32 phy_status;
848         int err;
849
850         err  = tg3_readphy(tp, MII_BMSR, &phy_status);
851         err |= tg3_readphy(tp, MII_BMSR, &phy_status);
852         if (err != 0)
853                 return -EBUSY;
854
855         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
856             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
857             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
858                 err = tg3_phy_reset_5703_4_5(tp);
859                 if (err)
860                         return err;
861                 goto out;
862         }
863
864         err = tg3_bmcr_reset(tp);
865         if (err)
866                 return err;
867
868 out:
869         if (tp->tg3_flags2 & TG3_FLG2_PHY_ADC_BUG) {
870                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0c00);
871                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x201f);
872                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x2aaa);
873                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000a);
874                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0323);
875                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0400);
876         }
877         if (tp->tg3_flags2 & TG3_FLG2_PHY_5704_A0_BUG) {
878                 tg3_writephy(tp, 0x1c, 0x8d68);
879                 tg3_writephy(tp, 0x1c, 0x8d68);
880         }
881         if (tp->tg3_flags2 & TG3_FLG2_PHY_BER_BUG) {
882                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0c00);
883                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000a);
884                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x310b);
885                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x201f);
886                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x9506);
887                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x401f);
888                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x14e2);
889                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0400);
890         }
891         /* Set Extended packet length bit (bit 14) on all chips that */
892         /* support jumbo frames */
893         if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5401) {
894                 /* Cannot do read-modify-write on 5401 */
895                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x4c20);
896         } else if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
897                 u32 phy_reg;
898
899                 /* Set bit 14 with read-modify-write to preserve other bits */
900                 if (!tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0007) &&
901                     !tg3_readphy(tp, MII_TG3_AUX_CTRL, &phy_reg))
902                         tg3_writephy(tp, MII_TG3_AUX_CTRL, phy_reg | 0x4000);
903         }
904
905         /* Set phy register 0x10 bit 0 to high fifo elasticity to support
906          * jumbo frames transmission.
907          */
908         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
909                 u32 phy_reg;
910
911                 if (!tg3_readphy(tp, MII_TG3_EXT_CTRL, &phy_reg))
912                     tg3_writephy(tp, MII_TG3_EXT_CTRL,
913                                  phy_reg | MII_TG3_EXT_CTRL_FIFO_ELASTIC);
914         }
915
916         tg3_phy_set_wirespeed(tp);
917         return 0;
918 }
919
920 static void tg3_frob_aux_power(struct tg3 *tp)
921 {
922         struct tg3 *tp_peer = tp;
923
924         if ((tp->tg3_flags & TG3_FLAG_EEPROM_WRITE_PROT) != 0)
925                 return;
926
927         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
928                 tp_peer = pci_get_drvdata(tp->pdev_peer);
929                 if (!tp_peer)
930                         BUG();
931         }
932
933
934         if ((tp->tg3_flags & TG3_FLAG_WOL_ENABLE) != 0 ||
935             (tp_peer->tg3_flags & TG3_FLAG_WOL_ENABLE) != 0) {
936                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
937                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
938                         tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
939                              (GRC_LCLCTRL_GPIO_OE0 |
940                               GRC_LCLCTRL_GPIO_OE1 |
941                               GRC_LCLCTRL_GPIO_OE2 |
942                               GRC_LCLCTRL_GPIO_OUTPUT0 |
943                               GRC_LCLCTRL_GPIO_OUTPUT1));
944                         udelay(100);
945                 } else {
946                         u32 no_gpio2;
947                         u32 grc_local_ctrl;
948
949                         if (tp_peer != tp &&
950                             (tp_peer->tg3_flags & TG3_FLAG_INIT_COMPLETE) != 0)
951                                 return;
952
953                         /* On 5753 and variants, GPIO2 cannot be used. */
954                         no_gpio2 = tp->nic_sram_data_cfg &
955                                     NIC_SRAM_DATA_CFG_NO_GPIO2;
956
957                         grc_local_ctrl = GRC_LCLCTRL_GPIO_OE0 |
958                                          GRC_LCLCTRL_GPIO_OE1 |
959                                          GRC_LCLCTRL_GPIO_OE2 |
960                                          GRC_LCLCTRL_GPIO_OUTPUT1 |
961                                          GRC_LCLCTRL_GPIO_OUTPUT2;
962                         if (no_gpio2) {
963                                 grc_local_ctrl &= ~(GRC_LCLCTRL_GPIO_OE2 |
964                                                     GRC_LCLCTRL_GPIO_OUTPUT2);
965                         }
966                         tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
967                                                 grc_local_ctrl);
968                         udelay(100);
969
970                         grc_local_ctrl |= GRC_LCLCTRL_GPIO_OUTPUT0;
971
972                         tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
973                                                 grc_local_ctrl);
974                         udelay(100);
975
976                         if (!no_gpio2) {
977                                 grc_local_ctrl &= ~GRC_LCLCTRL_GPIO_OUTPUT2;
978                                 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
979                                        grc_local_ctrl);
980                                 udelay(100);
981                         }
982                 }
983         } else {
984                 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
985                     GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701) {
986                         if (tp_peer != tp &&
987                             (tp_peer->tg3_flags & TG3_FLAG_INIT_COMPLETE) != 0)
988                                 return;
989
990                         tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
991                              (GRC_LCLCTRL_GPIO_OE1 |
992                               GRC_LCLCTRL_GPIO_OUTPUT1));
993                         udelay(100);
994
995                         tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
996                              (GRC_LCLCTRL_GPIO_OE1));
997                         udelay(100);
998
999                         tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1000                              (GRC_LCLCTRL_GPIO_OE1 |
1001                               GRC_LCLCTRL_GPIO_OUTPUT1));
1002                         udelay(100);
1003                 }
1004         }
1005 }
1006
1007 static int tg3_setup_phy(struct tg3 *, int);
1008
1009 #define RESET_KIND_SHUTDOWN     0
1010 #define RESET_KIND_INIT         1
1011 #define RESET_KIND_SUSPEND      2
1012
1013 static void tg3_write_sig_post_reset(struct tg3 *, int);
1014 static int tg3_halt_cpu(struct tg3 *, u32);
1015
1016 static int tg3_set_power_state(struct tg3 *tp, int state)
1017 {
1018         u32 misc_host_ctrl;
1019         u16 power_control, power_caps;
1020         int pm = tp->pm_cap;
1021
1022         /* Make sure register accesses (indirect or otherwise)
1023          * will function correctly.
1024          */
1025         pci_write_config_dword(tp->pdev,
1026                                TG3PCI_MISC_HOST_CTRL,
1027                                tp->misc_host_ctrl);
1028
1029         pci_read_config_word(tp->pdev,
1030                              pm + PCI_PM_CTRL,
1031                              &power_control);
1032         power_control |= PCI_PM_CTRL_PME_STATUS;
1033         power_control &= ~(PCI_PM_CTRL_STATE_MASK);
1034         switch (state) {
1035         case 0:
1036                 power_control |= 0;
1037                 pci_write_config_word(tp->pdev,
1038                                       pm + PCI_PM_CTRL,
1039                                       power_control);
1040                 udelay(100);    /* Delay after power state change */
1041
1042                 /* Switch out of Vaux if it is not a LOM */
1043                 if (!(tp->tg3_flags & TG3_FLAG_EEPROM_WRITE_PROT)) {
1044                         tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
1045                         udelay(100);
1046                 }
1047
1048                 return 0;
1049
1050         case 1:
1051                 power_control |= 1;
1052                 break;
1053
1054         case 2:
1055                 power_control |= 2;
1056                 break;
1057
1058         case 3:
1059                 power_control |= 3;
1060                 break;
1061
1062         default:
1063                 printk(KERN_WARNING PFX "%s: Invalid power state (%d) "
1064                        "requested.\n",
1065                        tp->dev->name, state);
1066                 return -EINVAL;
1067         };
1068
1069         power_control |= PCI_PM_CTRL_PME_ENABLE;
1070
1071         misc_host_ctrl = tr32(TG3PCI_MISC_HOST_CTRL);
1072         tw32(TG3PCI_MISC_HOST_CTRL,
1073              misc_host_ctrl | MISC_HOST_CTRL_MASK_PCI_INT);
1074
1075         if (tp->link_config.phy_is_low_power == 0) {
1076                 tp->link_config.phy_is_low_power = 1;
1077                 tp->link_config.orig_speed = tp->link_config.speed;
1078                 tp->link_config.orig_duplex = tp->link_config.duplex;
1079                 tp->link_config.orig_autoneg = tp->link_config.autoneg;
1080         }
1081
1082         if (!(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)) {
1083                 tp->link_config.speed = SPEED_10;
1084                 tp->link_config.duplex = DUPLEX_HALF;
1085                 tp->link_config.autoneg = AUTONEG_ENABLE;
1086                 tg3_setup_phy(tp, 0);
1087         }
1088
1089         pci_read_config_word(tp->pdev, pm + PCI_PM_PMC, &power_caps);
1090
1091         if (tp->tg3_flags & TG3_FLAG_WOL_ENABLE) {
1092                 u32 mac_mode;
1093
1094                 if (!(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)) {
1095                         tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x5a);
1096                         udelay(40);
1097
1098                         mac_mode = MAC_MODE_PORT_MODE_MII;
1099
1100                         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 ||
1101                             !(tp->tg3_flags & TG3_FLAG_WOL_SPEED_100MB))
1102                                 mac_mode |= MAC_MODE_LINK_POLARITY;
1103                 } else {
1104                         mac_mode = MAC_MODE_PORT_MODE_TBI;
1105                 }
1106
1107                 if (!(tp->tg3_flags2 & TG3_FLG2_5750_PLUS))
1108                         tw32(MAC_LED_CTRL, tp->led_ctrl);
1109
1110                 if (((power_caps & PCI_PM_CAP_PME_D3cold) &&
1111                      (tp->tg3_flags & TG3_FLAG_WOL_ENABLE)))
1112                         mac_mode |= MAC_MODE_MAGIC_PKT_ENABLE;
1113
1114                 tw32_f(MAC_MODE, mac_mode);
1115                 udelay(100);
1116
1117                 tw32_f(MAC_RX_MODE, RX_MODE_ENABLE);
1118                 udelay(10);
1119         }
1120
1121         if (!(tp->tg3_flags & TG3_FLAG_WOL_SPEED_100MB) &&
1122             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
1123              GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)) {
1124                 u32 base_val;
1125
1126                 base_val = tp->pci_clock_ctrl;
1127                 base_val |= (CLOCK_CTRL_RXCLK_DISABLE |
1128                              CLOCK_CTRL_TXCLK_DISABLE);
1129
1130                 tw32_f(TG3PCI_CLOCK_CTRL, base_val |
1131                      CLOCK_CTRL_ALTCLK |
1132                      CLOCK_CTRL_PWRDOWN_PLL133);
1133                 udelay(40);
1134         } else if (!((tp->tg3_flags2 & TG3_FLG2_5750_PLUS) &&
1135                      (tp->tg3_flags & TG3_FLAG_ENABLE_ASF))) {
1136                 u32 newbits1, newbits2;
1137
1138                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
1139                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
1140                         newbits1 = (CLOCK_CTRL_RXCLK_DISABLE |
1141                                     CLOCK_CTRL_TXCLK_DISABLE |
1142                                     CLOCK_CTRL_ALTCLK);
1143                         newbits2 = newbits1 | CLOCK_CTRL_44MHZ_CORE;
1144                 } else if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
1145                         newbits1 = CLOCK_CTRL_625_CORE;
1146                         newbits2 = newbits1 | CLOCK_CTRL_ALTCLK;
1147                 } else {
1148                         newbits1 = CLOCK_CTRL_ALTCLK;
1149                         newbits2 = newbits1 | CLOCK_CTRL_44MHZ_CORE;
1150                 }
1151
1152                 tw32_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl | newbits1);
1153                 udelay(40);
1154
1155                 tw32_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl | newbits2);
1156                 udelay(40);
1157
1158                 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
1159                         u32 newbits3;
1160
1161                         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
1162                             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
1163                                 newbits3 = (CLOCK_CTRL_RXCLK_DISABLE |
1164                                             CLOCK_CTRL_TXCLK_DISABLE |
1165                                             CLOCK_CTRL_44MHZ_CORE);
1166                         } else {
1167                                 newbits3 = CLOCK_CTRL_44MHZ_CORE;
1168                         }
1169
1170                         tw32_f(TG3PCI_CLOCK_CTRL,
1171                                          tp->pci_clock_ctrl | newbits3);
1172                         udelay(40);
1173                 }
1174         }
1175
1176         tg3_frob_aux_power(tp);
1177
1178         /* Workaround for unstable PLL clock */
1179         if ((GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_AX) ||
1180             (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_BX)) {
1181                 u32 val = tr32(0x7d00);
1182
1183                 val &= ~((1 << 16) | (1 << 4) | (1 << 2) | (1 << 1) | 1);
1184                 tw32(0x7d00, val);
1185                 if (!(tp->tg3_flags & TG3_FLAG_ENABLE_ASF))
1186                         tg3_halt_cpu(tp, RX_CPU_BASE);
1187         }
1188
1189         /* Finally, set the new power state. */
1190         pci_write_config_word(tp->pdev, pm + PCI_PM_CTRL, power_control);
1191         udelay(100);    /* Delay after power state change */
1192
1193         tg3_write_sig_post_reset(tp, RESET_KIND_SHUTDOWN);
1194
1195         return 0;
1196 }
1197
1198 static void tg3_link_report(struct tg3 *tp)
1199 {
1200         if (!netif_carrier_ok(tp->dev)) {
1201                 printk(KERN_INFO PFX "%s: Link is down.\n", tp->dev->name);
1202         } else {
1203                 printk(KERN_INFO PFX "%s: Link is up at %d Mbps, %s duplex.\n",
1204                        tp->dev->name,
1205                        (tp->link_config.active_speed == SPEED_1000 ?
1206                         1000 :
1207                         (tp->link_config.active_speed == SPEED_100 ?
1208                          100 : 10)),
1209                        (tp->link_config.active_duplex == DUPLEX_FULL ?
1210                         "full" : "half"));
1211
1212                 printk(KERN_INFO PFX "%s: Flow control is %s for TX and "
1213                        "%s for RX.\n",
1214                        tp->dev->name,
1215                        (tp->tg3_flags & TG3_FLAG_TX_PAUSE) ? "on" : "off",
1216                        (tp->tg3_flags & TG3_FLAG_RX_PAUSE) ? "on" : "off");
1217         }
1218 }
1219
1220 static void tg3_setup_flow_control(struct tg3 *tp, u32 local_adv, u32 remote_adv)
1221 {
1222         u32 new_tg3_flags = 0;
1223         u32 old_rx_mode = tp->rx_mode;
1224         u32 old_tx_mode = tp->tx_mode;
1225
1226         if (tp->tg3_flags & TG3_FLAG_PAUSE_AUTONEG) {
1227                 if (local_adv & ADVERTISE_PAUSE_CAP) {
1228                         if (local_adv & ADVERTISE_PAUSE_ASYM) {
1229                                 if (remote_adv & LPA_PAUSE_CAP)
1230                                         new_tg3_flags |=
1231                                                 (TG3_FLAG_RX_PAUSE |
1232                                                 TG3_FLAG_TX_PAUSE);
1233                                 else if (remote_adv & LPA_PAUSE_ASYM)
1234                                         new_tg3_flags |=
1235                                                 (TG3_FLAG_RX_PAUSE);
1236                         } else {
1237                                 if (remote_adv & LPA_PAUSE_CAP)
1238                                         new_tg3_flags |=
1239                                                 (TG3_FLAG_RX_PAUSE |
1240                                                 TG3_FLAG_TX_PAUSE);
1241                         }
1242                 } else if (local_adv & ADVERTISE_PAUSE_ASYM) {
1243                         if ((remote_adv & LPA_PAUSE_CAP) &&
1244                         (remote_adv & LPA_PAUSE_ASYM))
1245                                 new_tg3_flags |= TG3_FLAG_TX_PAUSE;
1246                 }
1247
1248                 tp->tg3_flags &= ~(TG3_FLAG_RX_PAUSE | TG3_FLAG_TX_PAUSE);
1249                 tp->tg3_flags |= new_tg3_flags;
1250         } else {
1251                 new_tg3_flags = tp->tg3_flags;
1252         }
1253
1254         if (new_tg3_flags & TG3_FLAG_RX_PAUSE)
1255                 tp->rx_mode |= RX_MODE_FLOW_CTRL_ENABLE;
1256         else
1257                 tp->rx_mode &= ~RX_MODE_FLOW_CTRL_ENABLE;
1258
1259         if (old_rx_mode != tp->rx_mode) {
1260                 tw32_f(MAC_RX_MODE, tp->rx_mode);
1261         }
1262         
1263         if (new_tg3_flags & TG3_FLAG_TX_PAUSE)
1264                 tp->tx_mode |= TX_MODE_FLOW_CTRL_ENABLE;
1265         else
1266                 tp->tx_mode &= ~TX_MODE_FLOW_CTRL_ENABLE;
1267
1268         if (old_tx_mode != tp->tx_mode) {
1269                 tw32_f(MAC_TX_MODE, tp->tx_mode);
1270         }
1271 }
1272
1273 static void tg3_aux_stat_to_speed_duplex(struct tg3 *tp, u32 val, u16 *speed, u8 *duplex)
1274 {
1275         switch (val & MII_TG3_AUX_STAT_SPDMASK) {
1276         case MII_TG3_AUX_STAT_10HALF:
1277                 *speed = SPEED_10;
1278                 *duplex = DUPLEX_HALF;
1279                 break;
1280
1281         case MII_TG3_AUX_STAT_10FULL:
1282                 *speed = SPEED_10;
1283                 *duplex = DUPLEX_FULL;
1284                 break;
1285
1286         case MII_TG3_AUX_STAT_100HALF:
1287                 *speed = SPEED_100;
1288                 *duplex = DUPLEX_HALF;
1289                 break;
1290
1291         case MII_TG3_AUX_STAT_100FULL:
1292                 *speed = SPEED_100;
1293                 *duplex = DUPLEX_FULL;
1294                 break;
1295
1296         case MII_TG3_AUX_STAT_1000HALF:
1297                 *speed = SPEED_1000;
1298                 *duplex = DUPLEX_HALF;
1299                 break;
1300
1301         case MII_TG3_AUX_STAT_1000FULL:
1302                 *speed = SPEED_1000;
1303                 *duplex = DUPLEX_FULL;
1304                 break;
1305
1306         default:
1307                 *speed = SPEED_INVALID;
1308                 *duplex = DUPLEX_INVALID;
1309                 break;
1310         };
1311 }
1312
1313 static void tg3_phy_copper_begin(struct tg3 *tp)
1314 {
1315         u32 new_adv;
1316         int i;
1317
1318         if (tp->link_config.phy_is_low_power) {
1319                 /* Entering low power mode.  Disable gigabit and
1320                  * 100baseT advertisements.
1321                  */
1322                 tg3_writephy(tp, MII_TG3_CTRL, 0);
1323
1324                 new_adv = (ADVERTISE_10HALF | ADVERTISE_10FULL |
1325                            ADVERTISE_CSMA | ADVERTISE_PAUSE_CAP);
1326                 if (tp->tg3_flags & TG3_FLAG_WOL_SPEED_100MB)
1327                         new_adv |= (ADVERTISE_100HALF | ADVERTISE_100FULL);
1328
1329                 tg3_writephy(tp, MII_ADVERTISE, new_adv);
1330         } else if (tp->link_config.speed == SPEED_INVALID) {
1331                 tp->link_config.advertising =
1332                         (ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full |
1333                          ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full |
1334                          ADVERTISED_1000baseT_Half | ADVERTISED_1000baseT_Full |
1335                          ADVERTISED_Autoneg | ADVERTISED_MII);
1336
1337                 if (tp->tg3_flags & TG3_FLAG_10_100_ONLY)
1338                         tp->link_config.advertising &=
1339                                 ~(ADVERTISED_1000baseT_Half |
1340                                   ADVERTISED_1000baseT_Full);
1341
1342                 new_adv = (ADVERTISE_CSMA | ADVERTISE_PAUSE_CAP);
1343                 if (tp->link_config.advertising & ADVERTISED_10baseT_Half)
1344                         new_adv |= ADVERTISE_10HALF;
1345                 if (tp->link_config.advertising & ADVERTISED_10baseT_Full)
1346                         new_adv |= ADVERTISE_10FULL;
1347                 if (tp->link_config.advertising & ADVERTISED_100baseT_Half)
1348                         new_adv |= ADVERTISE_100HALF;
1349                 if (tp->link_config.advertising & ADVERTISED_100baseT_Full)
1350                         new_adv |= ADVERTISE_100FULL;
1351                 tg3_writephy(tp, MII_ADVERTISE, new_adv);
1352
1353                 if (tp->link_config.advertising &
1354                     (ADVERTISED_1000baseT_Half | ADVERTISED_1000baseT_Full)) {
1355                         new_adv = 0;
1356                         if (tp->link_config.advertising & ADVERTISED_1000baseT_Half)
1357                                 new_adv |= MII_TG3_CTRL_ADV_1000_HALF;
1358                         if (tp->link_config.advertising & ADVERTISED_1000baseT_Full)
1359                                 new_adv |= MII_TG3_CTRL_ADV_1000_FULL;
1360                         if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY) &&
1361                             (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
1362                              tp->pci_chip_rev_id == CHIPREV_ID_5701_B0))
1363                                 new_adv |= (MII_TG3_CTRL_AS_MASTER |
1364                                             MII_TG3_CTRL_ENABLE_AS_MASTER);
1365                         tg3_writephy(tp, MII_TG3_CTRL, new_adv);
1366                 } else {
1367                         tg3_writephy(tp, MII_TG3_CTRL, 0);
1368                 }
1369         } else {
1370                 /* Asking for a specific link mode. */
1371                 if (tp->link_config.speed == SPEED_1000) {
1372                         new_adv = ADVERTISE_CSMA | ADVERTISE_PAUSE_CAP;
1373                         tg3_writephy(tp, MII_ADVERTISE, new_adv);
1374
1375                         if (tp->link_config.duplex == DUPLEX_FULL)
1376                                 new_adv = MII_TG3_CTRL_ADV_1000_FULL;
1377                         else
1378                                 new_adv = MII_TG3_CTRL_ADV_1000_HALF;
1379                         if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
1380                             tp->pci_chip_rev_id == CHIPREV_ID_5701_B0)
1381                                 new_adv |= (MII_TG3_CTRL_AS_MASTER |
1382                                             MII_TG3_CTRL_ENABLE_AS_MASTER);
1383                         tg3_writephy(tp, MII_TG3_CTRL, new_adv);
1384                 } else {
1385                         tg3_writephy(tp, MII_TG3_CTRL, 0);
1386
1387                         new_adv = ADVERTISE_CSMA | ADVERTISE_PAUSE_CAP;
1388                         if (tp->link_config.speed == SPEED_100) {
1389                                 if (tp->link_config.duplex == DUPLEX_FULL)
1390                                         new_adv |= ADVERTISE_100FULL;
1391                                 else
1392                                         new_adv |= ADVERTISE_100HALF;
1393                         } else {
1394                                 if (tp->link_config.duplex == DUPLEX_FULL)
1395                                         new_adv |= ADVERTISE_10FULL;
1396                                 else
1397                                         new_adv |= ADVERTISE_10HALF;
1398                         }
1399                         tg3_writephy(tp, MII_ADVERTISE, new_adv);
1400                 }
1401         }
1402
1403         if (tp->link_config.autoneg == AUTONEG_DISABLE &&
1404             tp->link_config.speed != SPEED_INVALID) {
1405                 u32 bmcr, orig_bmcr;
1406
1407                 tp->link_config.active_speed = tp->link_config.speed;
1408                 tp->link_config.active_duplex = tp->link_config.duplex;
1409
1410                 bmcr = 0;
1411                 switch (tp->link_config.speed) {
1412                 default:
1413                 case SPEED_10:
1414                         break;
1415
1416                 case SPEED_100:
1417                         bmcr |= BMCR_SPEED100;
1418                         break;
1419
1420                 case SPEED_1000:
1421                         bmcr |= TG3_BMCR_SPEED1000;
1422                         break;
1423                 };
1424
1425                 if (tp->link_config.duplex == DUPLEX_FULL)
1426                         bmcr |= BMCR_FULLDPLX;
1427
1428                 if (!tg3_readphy(tp, MII_BMCR, &orig_bmcr) &&
1429                     (bmcr != orig_bmcr)) {
1430                         tg3_writephy(tp, MII_BMCR, BMCR_LOOPBACK);
1431                         for (i = 0; i < 1500; i++) {
1432                                 u32 tmp;
1433
1434                                 udelay(10);
1435                                 if (tg3_readphy(tp, MII_BMSR, &tmp) ||
1436                                     tg3_readphy(tp, MII_BMSR, &tmp))
1437                                         continue;
1438                                 if (!(tmp & BMSR_LSTATUS)) {
1439                                         udelay(40);
1440                                         break;
1441                                 }
1442                         }
1443                         tg3_writephy(tp, MII_BMCR, bmcr);
1444                         udelay(40);
1445                 }
1446         } else {
1447                 tg3_writephy(tp, MII_BMCR,
1448                              BMCR_ANENABLE | BMCR_ANRESTART);
1449         }
1450 }
1451
1452 static int tg3_init_5401phy_dsp(struct tg3 *tp)
1453 {
1454         int err;
1455
1456         /* Turn off tap power management. */
1457         /* Set Extended packet length bit */
1458         err  = tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x4c20);
1459
1460         err |= tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x0012);
1461         err |= tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x1804);
1462
1463         err |= tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x0013);
1464         err |= tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x1204);
1465
1466         err |= tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8006);
1467         err |= tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0132);
1468
1469         err |= tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8006);
1470         err |= tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0232);
1471
1472         err |= tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x201f);
1473         err |= tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0a20);
1474
1475         udelay(40);
1476
1477         return err;
1478 }
1479
1480 static int tg3_copper_is_advertising_all(struct tg3 *tp)
1481 {
1482         u32 adv_reg, all_mask;
1483
1484         if (tg3_readphy(tp, MII_ADVERTISE, &adv_reg))
1485                 return 0;
1486
1487         all_mask = (ADVERTISE_10HALF | ADVERTISE_10FULL |
1488                     ADVERTISE_100HALF | ADVERTISE_100FULL);
1489         if ((adv_reg & all_mask) != all_mask)
1490                 return 0;
1491         if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY)) {
1492                 u32 tg3_ctrl;
1493
1494                 if (tg3_readphy(tp, MII_TG3_CTRL, &tg3_ctrl))
1495                         return 0;
1496
1497                 all_mask = (MII_TG3_CTRL_ADV_1000_HALF |
1498                             MII_TG3_CTRL_ADV_1000_FULL);
1499                 if ((tg3_ctrl & all_mask) != all_mask)
1500                         return 0;
1501         }
1502         return 1;
1503 }
1504
1505 static int tg3_setup_copper_phy(struct tg3 *tp, int force_reset)
1506 {
1507         int current_link_up;
1508         u32 bmsr, dummy;
1509         u16 current_speed;
1510         u8 current_duplex;
1511         int i, err;
1512
1513         tw32(MAC_EVENT, 0);
1514
1515         tw32_f(MAC_STATUS,
1516              (MAC_STATUS_SYNC_CHANGED |
1517               MAC_STATUS_CFG_CHANGED |
1518               MAC_STATUS_MI_COMPLETION |
1519               MAC_STATUS_LNKSTATE_CHANGED));
1520         udelay(40);
1521
1522         tp->mi_mode = MAC_MI_MODE_BASE;
1523         tw32_f(MAC_MI_MODE, tp->mi_mode);
1524         udelay(80);
1525
1526         tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x02);
1527
1528         /* Some third-party PHYs need to be reset on link going
1529          * down.
1530          */
1531         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
1532              GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
1533              GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) &&
1534             netif_carrier_ok(tp->dev)) {
1535                 tg3_readphy(tp, MII_BMSR, &bmsr);
1536                 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
1537                     !(bmsr & BMSR_LSTATUS))
1538                         force_reset = 1;
1539         }
1540         if (force_reset)
1541                 tg3_phy_reset(tp);
1542
1543         if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5401) {
1544                 tg3_readphy(tp, MII_BMSR, &bmsr);
1545                 if (tg3_readphy(tp, MII_BMSR, &bmsr) ||
1546                     !(tp->tg3_flags & TG3_FLAG_INIT_COMPLETE))
1547                         bmsr = 0;
1548
1549                 if (!(bmsr & BMSR_LSTATUS)) {
1550                         err = tg3_init_5401phy_dsp(tp);
1551                         if (err)
1552                                 return err;
1553
1554                         tg3_readphy(tp, MII_BMSR, &bmsr);
1555                         for (i = 0; i < 1000; i++) {
1556                                 udelay(10);
1557                                 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
1558                                     (bmsr & BMSR_LSTATUS)) {
1559                                         udelay(40);
1560                                         break;
1561                                 }
1562                         }
1563
1564                         if ((tp->phy_id & PHY_ID_REV_MASK) == PHY_REV_BCM5401_B0 &&
1565                             !(bmsr & BMSR_LSTATUS) &&
1566                             tp->link_config.active_speed == SPEED_1000) {
1567                                 err = tg3_phy_reset(tp);
1568                                 if (!err)
1569                                         err = tg3_init_5401phy_dsp(tp);
1570                                 if (err)
1571                                         return err;
1572                         }
1573                 }
1574         } else if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
1575                    tp->pci_chip_rev_id == CHIPREV_ID_5701_B0) {
1576                 /* 5701 {A0,B0} CRC bug workaround */
1577                 tg3_writephy(tp, 0x15, 0x0a75);
1578                 tg3_writephy(tp, 0x1c, 0x8c68);
1579                 tg3_writephy(tp, 0x1c, 0x8d68);
1580                 tg3_writephy(tp, 0x1c, 0x8c68);
1581         }
1582
1583         /* Clear pending interrupts... */
1584         tg3_readphy(tp, MII_TG3_ISTAT, &dummy);
1585         tg3_readphy(tp, MII_TG3_ISTAT, &dummy);
1586
1587         if (tp->tg3_flags & TG3_FLAG_USE_MI_INTERRUPT)
1588                 tg3_writephy(tp, MII_TG3_IMASK, ~MII_TG3_INT_LINKCHG);
1589         else
1590                 tg3_writephy(tp, MII_TG3_IMASK, ~0);
1591
1592         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
1593             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
1594                 if (tp->led_ctrl == LED_CTRL_MODE_PHY_1)
1595                         tg3_writephy(tp, MII_TG3_EXT_CTRL,
1596                                      MII_TG3_EXT_CTRL_LNK3_LED_MODE);
1597                 else
1598                         tg3_writephy(tp, MII_TG3_EXT_CTRL, 0);
1599         }
1600
1601         current_link_up = 0;
1602         current_speed = SPEED_INVALID;
1603         current_duplex = DUPLEX_INVALID;
1604
1605         if (tp->tg3_flags2 & TG3_FLG2_CAPACITIVE_COUPLING) {
1606                 u32 val;
1607
1608                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x4007);
1609                 tg3_readphy(tp, MII_TG3_AUX_CTRL, &val);
1610                 if (!(val & (1 << 10))) {
1611                         val |= (1 << 10);
1612                         tg3_writephy(tp, MII_TG3_AUX_CTRL, val);
1613                         goto relink;
1614                 }
1615         }
1616
1617         bmsr = 0;
1618         for (i = 0; i < 100; i++) {
1619                 tg3_readphy(tp, MII_BMSR, &bmsr);
1620                 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
1621                     (bmsr & BMSR_LSTATUS))
1622                         break;
1623                 udelay(40);
1624         }
1625
1626         if (bmsr & BMSR_LSTATUS) {
1627                 u32 aux_stat, bmcr;
1628
1629                 tg3_readphy(tp, MII_TG3_AUX_STAT, &aux_stat);
1630                 for (i = 0; i < 2000; i++) {
1631                         udelay(10);
1632                         if (!tg3_readphy(tp, MII_TG3_AUX_STAT, &aux_stat) &&
1633                             aux_stat)
1634                                 break;
1635                 }
1636
1637                 tg3_aux_stat_to_speed_duplex(tp, aux_stat,
1638                                              &current_speed,
1639                                              &current_duplex);
1640
1641                 bmcr = 0;
1642                 for (i = 0; i < 200; i++) {
1643                         tg3_readphy(tp, MII_BMCR, &bmcr);
1644                         if (tg3_readphy(tp, MII_BMCR, &bmcr))
1645                                 continue;
1646                         if (bmcr && bmcr != 0x7fff)
1647                                 break;
1648                         udelay(10);
1649                 }
1650
1651                 if (tp->link_config.autoneg == AUTONEG_ENABLE) {
1652                         if (bmcr & BMCR_ANENABLE) {
1653                                 current_link_up = 1;
1654
1655                                 /* Force autoneg restart if we are exiting
1656                                  * low power mode.
1657                                  */
1658                                 if (!tg3_copper_is_advertising_all(tp))
1659                                         current_link_up = 0;
1660                         } else {
1661                                 current_link_up = 0;
1662                         }
1663                 } else {
1664                         if (!(bmcr & BMCR_ANENABLE) &&
1665                             tp->link_config.speed == current_speed &&
1666                             tp->link_config.duplex == current_duplex) {
1667                                 current_link_up = 1;
1668                         } else {
1669                                 current_link_up = 0;
1670                         }
1671                 }
1672
1673                 tp->link_config.active_speed = current_speed;
1674                 tp->link_config.active_duplex = current_duplex;
1675         }
1676
1677         if (current_link_up == 1 &&
1678             (tp->link_config.active_duplex == DUPLEX_FULL) &&
1679             (tp->link_config.autoneg == AUTONEG_ENABLE)) {
1680                 u32 local_adv, remote_adv;
1681
1682                 if (tg3_readphy(tp, MII_ADVERTISE, &local_adv))
1683                         local_adv = 0;
1684                 local_adv &= (ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM);
1685
1686                 if (tg3_readphy(tp, MII_LPA, &remote_adv))
1687                         remote_adv = 0;
1688
1689                 remote_adv &= (LPA_PAUSE_CAP | LPA_PAUSE_ASYM);
1690
1691                 /* If we are not advertising full pause capability,
1692                  * something is wrong.  Bring the link down and reconfigure.
1693                  */
1694                 if (local_adv != ADVERTISE_PAUSE_CAP) {
1695                         current_link_up = 0;
1696                 } else {
1697                         tg3_setup_flow_control(tp, local_adv, remote_adv);
1698                 }
1699         }
1700 relink:
1701         if (current_link_up == 0) {
1702                 u32 tmp;
1703
1704                 tg3_phy_copper_begin(tp);
1705
1706                 tg3_readphy(tp, MII_BMSR, &tmp);
1707                 if (!tg3_readphy(tp, MII_BMSR, &tmp) &&
1708                     (tmp & BMSR_LSTATUS))
1709                         current_link_up = 1;
1710         }
1711
1712         tp->mac_mode &= ~MAC_MODE_PORT_MODE_MASK;
1713         if (current_link_up == 1) {
1714                 if (tp->link_config.active_speed == SPEED_100 ||
1715                     tp->link_config.active_speed == SPEED_10)
1716                         tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
1717                 else
1718                         tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
1719         } else
1720                 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
1721
1722         tp->mac_mode &= ~MAC_MODE_HALF_DUPLEX;
1723         if (tp->link_config.active_duplex == DUPLEX_HALF)
1724                 tp->mac_mode |= MAC_MODE_HALF_DUPLEX;
1725
1726         tp->mac_mode &= ~MAC_MODE_LINK_POLARITY;
1727         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700) {
1728                 if ((tp->led_ctrl == LED_CTRL_MODE_PHY_2) ||
1729                     (current_link_up == 1 &&
1730                      tp->link_config.active_speed == SPEED_10))
1731                         tp->mac_mode |= MAC_MODE_LINK_POLARITY;
1732         } else {
1733                 if (current_link_up == 1)
1734                         tp->mac_mode |= MAC_MODE_LINK_POLARITY;
1735         }
1736
1737         /* ??? Without this setting Netgear GA302T PHY does not
1738          * ??? send/receive packets...
1739          */
1740         if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5411 &&
1741             tp->pci_chip_rev_id == CHIPREV_ID_5700_ALTIMA) {
1742                 tp->mi_mode |= MAC_MI_MODE_AUTO_POLL;
1743                 tw32_f(MAC_MI_MODE, tp->mi_mode);
1744                 udelay(80);
1745         }
1746
1747         tw32_f(MAC_MODE, tp->mac_mode);
1748         udelay(40);
1749
1750         if (tp->tg3_flags & TG3_FLAG_USE_LINKCHG_REG) {
1751                 /* Polled via timer. */
1752                 tw32_f(MAC_EVENT, 0);
1753         } else {
1754                 tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
1755         }
1756         udelay(40);
1757
1758         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 &&
1759             current_link_up == 1 &&
1760             tp->link_config.active_speed == SPEED_1000 &&
1761             ((tp->tg3_flags & TG3_FLAG_PCIX_MODE) ||
1762              (tp->tg3_flags & TG3_FLAG_PCI_HIGH_SPEED))) {
1763                 udelay(120);
1764                 tw32_f(MAC_STATUS,
1765                      (MAC_STATUS_SYNC_CHANGED |
1766                       MAC_STATUS_CFG_CHANGED));
1767                 udelay(40);
1768                 tg3_write_mem(tp,
1769                               NIC_SRAM_FIRMWARE_MBOX,
1770                               NIC_SRAM_FIRMWARE_MBOX_MAGIC2);
1771         }
1772
1773         if (current_link_up != netif_carrier_ok(tp->dev)) {
1774                 if (current_link_up)
1775                         netif_carrier_on(tp->dev);
1776                 else
1777                         netif_carrier_off(tp->dev);
1778                 tg3_link_report(tp);
1779         }
1780
1781         return 0;
1782 }
1783
1784 struct tg3_fiber_aneginfo {
1785         int state;
1786 #define ANEG_STATE_UNKNOWN              0
1787 #define ANEG_STATE_AN_ENABLE            1
1788 #define ANEG_STATE_RESTART_INIT         2
1789 #define ANEG_STATE_RESTART              3
1790 #define ANEG_STATE_DISABLE_LINK_OK      4
1791 #define ANEG_STATE_ABILITY_DETECT_INIT  5
1792 #define ANEG_STATE_ABILITY_DETECT       6
1793 #define ANEG_STATE_ACK_DETECT_INIT      7
1794 #define ANEG_STATE_ACK_DETECT           8
1795 #define ANEG_STATE_COMPLETE_ACK_INIT    9
1796 #define ANEG_STATE_COMPLETE_ACK         10
1797 #define ANEG_STATE_IDLE_DETECT_INIT     11
1798 #define ANEG_STATE_IDLE_DETECT          12
1799 #define ANEG_STATE_LINK_OK              13
1800 #define ANEG_STATE_NEXT_PAGE_WAIT_INIT  14
1801 #define ANEG_STATE_NEXT_PAGE_WAIT       15
1802
1803         u32 flags;
1804 #define MR_AN_ENABLE            0x00000001
1805 #define MR_RESTART_AN           0x00000002
1806 #define MR_AN_COMPLETE          0x00000004
1807 #define MR_PAGE_RX              0x00000008
1808 #define MR_NP_LOADED            0x00000010
1809 #define MR_TOGGLE_TX            0x00000020
1810 #define MR_LP_ADV_FULL_DUPLEX   0x00000040
1811 #define MR_LP_ADV_HALF_DUPLEX   0x00000080
1812 #define MR_LP_ADV_SYM_PAUSE     0x00000100
1813 #define MR_LP_ADV_ASYM_PAUSE    0x00000200
1814 #define MR_LP_ADV_REMOTE_FAULT1 0x00000400
1815 #define MR_LP_ADV_REMOTE_FAULT2 0x00000800
1816 #define MR_LP_ADV_NEXT_PAGE     0x00001000
1817 #define MR_TOGGLE_RX            0x00002000
1818 #define MR_NP_RX                0x00004000
1819
1820 #define MR_LINK_OK              0x80000000
1821
1822         unsigned long link_time, cur_time;
1823
1824         u32 ability_match_cfg;
1825         int ability_match_count;
1826
1827         char ability_match, idle_match, ack_match;
1828
1829         u32 txconfig, rxconfig;
1830 #define ANEG_CFG_NP             0x00000080
1831 #define ANEG_CFG_ACK            0x00000040
1832 #define ANEG_CFG_RF2            0x00000020
1833 #define ANEG_CFG_RF1            0x00000010
1834 #define ANEG_CFG_PS2            0x00000001
1835 #define ANEG_CFG_PS1            0x00008000
1836 #define ANEG_CFG_HD             0x00004000
1837 #define ANEG_CFG_FD             0x00002000
1838 #define ANEG_CFG_INVAL          0x00001f06
1839
1840 };
1841 #define ANEG_OK         0
1842 #define ANEG_DONE       1
1843 #define ANEG_TIMER_ENAB 2
1844 #define ANEG_FAILED     -1
1845
1846 #define ANEG_STATE_SETTLE_TIME  10000
1847
1848 static int tg3_fiber_aneg_smachine(struct tg3 *tp,
1849                                    struct tg3_fiber_aneginfo *ap)
1850 {
1851         unsigned long delta;
1852         u32 rx_cfg_reg;
1853         int ret;
1854
1855         if (ap->state == ANEG_STATE_UNKNOWN) {
1856                 ap->rxconfig = 0;
1857                 ap->link_time = 0;
1858                 ap->cur_time = 0;
1859                 ap->ability_match_cfg = 0;
1860                 ap->ability_match_count = 0;
1861                 ap->ability_match = 0;
1862                 ap->idle_match = 0;
1863                 ap->ack_match = 0;
1864         }
1865         ap->cur_time++;
1866
1867         if (tr32(MAC_STATUS) & MAC_STATUS_RCVD_CFG) {
1868                 rx_cfg_reg = tr32(MAC_RX_AUTO_NEG);
1869
1870                 if (rx_cfg_reg != ap->ability_match_cfg) {
1871                         ap->ability_match_cfg = rx_cfg_reg;
1872                         ap->ability_match = 0;
1873                         ap->ability_match_count = 0;
1874                 } else {
1875                         if (++ap->ability_match_count > 1) {
1876                                 ap->ability_match = 1;
1877                                 ap->ability_match_cfg = rx_cfg_reg;
1878                         }
1879                 }
1880                 if (rx_cfg_reg & ANEG_CFG_ACK)
1881                         ap->ack_match = 1;
1882                 else
1883                         ap->ack_match = 0;
1884
1885                 ap->idle_match = 0;
1886         } else {
1887                 ap->idle_match = 1;
1888                 ap->ability_match_cfg = 0;
1889                 ap->ability_match_count = 0;
1890                 ap->ability_match = 0;
1891                 ap->ack_match = 0;
1892
1893                 rx_cfg_reg = 0;
1894         }
1895
1896         ap->rxconfig = rx_cfg_reg;
1897         ret = ANEG_OK;
1898
1899         switch(ap->state) {
1900         case ANEG_STATE_UNKNOWN:
1901                 if (ap->flags & (MR_AN_ENABLE | MR_RESTART_AN))
1902                         ap->state = ANEG_STATE_AN_ENABLE;
1903
1904                 /* fallthru */
1905         case ANEG_STATE_AN_ENABLE:
1906                 ap->flags &= ~(MR_AN_COMPLETE | MR_PAGE_RX);
1907                 if (ap->flags & MR_AN_ENABLE) {
1908                         ap->link_time = 0;
1909                         ap->cur_time = 0;
1910                         ap->ability_match_cfg = 0;
1911                         ap->ability_match_count = 0;
1912                         ap->ability_match = 0;
1913                         ap->idle_match = 0;
1914                         ap->ack_match = 0;
1915
1916                         ap->state = ANEG_STATE_RESTART_INIT;
1917                 } else {
1918                         ap->state = ANEG_STATE_DISABLE_LINK_OK;
1919                 }
1920                 break;
1921
1922         case ANEG_STATE_RESTART_INIT:
1923                 ap->link_time = ap->cur_time;
1924                 ap->flags &= ~(MR_NP_LOADED);
1925                 ap->txconfig = 0;
1926                 tw32(MAC_TX_AUTO_NEG, 0);
1927                 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
1928                 tw32_f(MAC_MODE, tp->mac_mode);
1929                 udelay(40);
1930
1931                 ret = ANEG_TIMER_ENAB;
1932                 ap->state = ANEG_STATE_RESTART;
1933
1934                 /* fallthru */
1935         case ANEG_STATE_RESTART:
1936                 delta = ap->cur_time - ap->link_time;
1937                 if (delta > ANEG_STATE_SETTLE_TIME) {
1938                         ap->state = ANEG_STATE_ABILITY_DETECT_INIT;
1939                 } else {
1940                         ret = ANEG_TIMER_ENAB;
1941                 }
1942                 break;
1943
1944         case ANEG_STATE_DISABLE_LINK_OK:
1945                 ret = ANEG_DONE;
1946                 break;
1947
1948         case ANEG_STATE_ABILITY_DETECT_INIT:
1949                 ap->flags &= ~(MR_TOGGLE_TX);
1950                 ap->txconfig = (ANEG_CFG_FD | ANEG_CFG_PS1);
1951                 tw32(MAC_TX_AUTO_NEG, ap->txconfig);
1952                 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
1953                 tw32_f(MAC_MODE, tp->mac_mode);
1954                 udelay(40);
1955
1956                 ap->state = ANEG_STATE_ABILITY_DETECT;
1957                 break;
1958
1959         case ANEG_STATE_ABILITY_DETECT:
1960                 if (ap->ability_match != 0 && ap->rxconfig != 0) {
1961                         ap->state = ANEG_STATE_ACK_DETECT_INIT;
1962                 }
1963                 break;
1964
1965         case ANEG_STATE_ACK_DETECT_INIT:
1966                 ap->txconfig |= ANEG_CFG_ACK;
1967                 tw32(MAC_TX_AUTO_NEG, ap->txconfig);
1968                 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
1969                 tw32_f(MAC_MODE, tp->mac_mode);
1970                 udelay(40);
1971
1972                 ap->state = ANEG_STATE_ACK_DETECT;
1973
1974                 /* fallthru */
1975         case ANEG_STATE_ACK_DETECT:
1976                 if (ap->ack_match != 0) {
1977                         if ((ap->rxconfig & ~ANEG_CFG_ACK) ==
1978                             (ap->ability_match_cfg & ~ANEG_CFG_ACK)) {
1979                                 ap->state = ANEG_STATE_COMPLETE_ACK_INIT;
1980                         } else {
1981                                 ap->state = ANEG_STATE_AN_ENABLE;
1982                         }
1983                 } else if (ap->ability_match != 0 &&
1984                            ap->rxconfig == 0) {
1985                         ap->state = ANEG_STATE_AN_ENABLE;
1986                 }
1987                 break;
1988
1989         case ANEG_STATE_COMPLETE_ACK_INIT:
1990                 if (ap->rxconfig & ANEG_CFG_INVAL) {
1991                         ret = ANEG_FAILED;
1992                         break;
1993                 }
1994                 ap->flags &= ~(MR_LP_ADV_FULL_DUPLEX |
1995                                MR_LP_ADV_HALF_DUPLEX |
1996                                MR_LP_ADV_SYM_PAUSE |
1997                                MR_LP_ADV_ASYM_PAUSE |
1998                                MR_LP_ADV_REMOTE_FAULT1 |
1999                                MR_LP_ADV_REMOTE_FAULT2 |
2000                                MR_LP_ADV_NEXT_PAGE |
2001                                MR_TOGGLE_RX |
2002                                MR_NP_RX);
2003                 if (ap->rxconfig & ANEG_CFG_FD)
2004                         ap->flags |= MR_LP_ADV_FULL_DUPLEX;
2005                 if (ap->rxconfig & ANEG_CFG_HD)
2006                         ap->flags |= MR_LP_ADV_HALF_DUPLEX;
2007                 if (ap->rxconfig & ANEG_CFG_PS1)
2008                         ap->flags |= MR_LP_ADV_SYM_PAUSE;
2009                 if (ap->rxconfig & ANEG_CFG_PS2)
2010                         ap->flags |= MR_LP_ADV_ASYM_PAUSE;
2011                 if (ap->rxconfig & ANEG_CFG_RF1)
2012                         ap->flags |= MR_LP_ADV_REMOTE_FAULT1;
2013                 if (ap->rxconfig & ANEG_CFG_RF2)
2014                         ap->flags |= MR_LP_ADV_REMOTE_FAULT2;
2015                 if (ap->rxconfig & ANEG_CFG_NP)
2016                         ap->flags |= MR_LP_ADV_NEXT_PAGE;
2017
2018                 ap->link_time = ap->cur_time;
2019
2020                 ap->flags ^= (MR_TOGGLE_TX);
2021                 if (ap->rxconfig & 0x0008)
2022                         ap->flags |= MR_TOGGLE_RX;
2023                 if (ap->rxconfig & ANEG_CFG_NP)
2024                         ap->flags |= MR_NP_RX;
2025                 ap->flags |= MR_PAGE_RX;
2026
2027                 ap->state = ANEG_STATE_COMPLETE_ACK;
2028                 ret = ANEG_TIMER_ENAB;
2029                 break;
2030
2031         case ANEG_STATE_COMPLETE_ACK:
2032                 if (ap->ability_match != 0 &&
2033                     ap->rxconfig == 0) {
2034                         ap->state = ANEG_STATE_AN_ENABLE;
2035                         break;
2036                 }
2037                 delta = ap->cur_time - ap->link_time;
2038                 if (delta > ANEG_STATE_SETTLE_TIME) {
2039                         if (!(ap->flags & (MR_LP_ADV_NEXT_PAGE))) {
2040                                 ap->state = ANEG_STATE_IDLE_DETECT_INIT;
2041                         } else {
2042                                 if ((ap->txconfig & ANEG_CFG_NP) == 0 &&
2043                                     !(ap->flags & MR_NP_RX)) {
2044                                         ap->state = ANEG_STATE_IDLE_DETECT_INIT;
2045                                 } else {
2046                                         ret = ANEG_FAILED;
2047                                 }
2048                         }
2049                 }
2050                 break;
2051
2052         case ANEG_STATE_IDLE_DETECT_INIT:
2053                 ap->link_time = ap->cur_time;
2054                 tp->mac_mode &= ~MAC_MODE_SEND_CONFIGS;
2055                 tw32_f(MAC_MODE, tp->mac_mode);
2056                 udelay(40);
2057
2058                 ap->state = ANEG_STATE_IDLE_DETECT;
2059                 ret = ANEG_TIMER_ENAB;
2060                 break;
2061
2062         case ANEG_STATE_IDLE_DETECT:
2063                 if (ap->ability_match != 0 &&
2064                     ap->rxconfig == 0) {
2065                         ap->state = ANEG_STATE_AN_ENABLE;
2066                         break;
2067                 }
2068                 delta = ap->cur_time - ap->link_time;
2069                 if (delta > ANEG_STATE_SETTLE_TIME) {
2070                         /* XXX another gem from the Broadcom driver :( */
2071                         ap->state = ANEG_STATE_LINK_OK;
2072                 }
2073                 break;
2074
2075         case ANEG_STATE_LINK_OK:
2076                 ap->flags |= (MR_AN_COMPLETE | MR_LINK_OK);
2077                 ret = ANEG_DONE;
2078                 break;
2079
2080         case ANEG_STATE_NEXT_PAGE_WAIT_INIT:
2081                 /* ??? unimplemented */
2082                 break;
2083
2084         case ANEG_STATE_NEXT_PAGE_WAIT:
2085                 /* ??? unimplemented */
2086                 break;
2087
2088         default:
2089                 ret = ANEG_FAILED;
2090                 break;
2091         };
2092
2093         return ret;
2094 }
2095
2096 static int fiber_autoneg(struct tg3 *tp, u32 *flags)
2097 {
2098         int res = 0;
2099         struct tg3_fiber_aneginfo aninfo;
2100         int status = ANEG_FAILED;
2101         unsigned int tick;
2102         u32 tmp;
2103
2104         tw32_f(MAC_TX_AUTO_NEG, 0);
2105
2106         tmp = tp->mac_mode & ~MAC_MODE_PORT_MODE_MASK;
2107         tw32_f(MAC_MODE, tmp | MAC_MODE_PORT_MODE_GMII);
2108         udelay(40);
2109
2110         tw32_f(MAC_MODE, tp->mac_mode | MAC_MODE_SEND_CONFIGS);
2111         udelay(40);
2112
2113         memset(&aninfo, 0, sizeof(aninfo));
2114         aninfo.flags |= MR_AN_ENABLE;
2115         aninfo.state = ANEG_STATE_UNKNOWN;
2116         aninfo.cur_time = 0;
2117         tick = 0;
2118         while (++tick < 195000) {
2119                 status = tg3_fiber_aneg_smachine(tp, &aninfo);
2120                 if (status == ANEG_DONE || status == ANEG_FAILED)
2121                         break;
2122
2123                 udelay(1);
2124         }
2125
2126         tp->mac_mode &= ~MAC_MODE_SEND_CONFIGS;
2127         tw32_f(MAC_MODE, tp->mac_mode);
2128         udelay(40);
2129
2130         *flags = aninfo.flags;
2131
2132         if (status == ANEG_DONE &&
2133             (aninfo.flags & (MR_AN_COMPLETE | MR_LINK_OK |
2134                              MR_LP_ADV_FULL_DUPLEX)))
2135                 res = 1;
2136
2137         return res;
2138 }
2139
2140 static void tg3_init_bcm8002(struct tg3 *tp)
2141 {
2142         u32 mac_status = tr32(MAC_STATUS);
2143         int i;
2144
2145         /* Reset when initting first time or we have a link. */
2146         if ((tp->tg3_flags & TG3_FLAG_INIT_COMPLETE) &&
2147             !(mac_status & MAC_STATUS_PCS_SYNCED))
2148                 return;
2149
2150         /* Set PLL lock range. */
2151         tg3_writephy(tp, 0x16, 0x8007);
2152
2153         /* SW reset */
2154         tg3_writephy(tp, MII_BMCR, BMCR_RESET);
2155
2156         /* Wait for reset to complete. */
2157         /* XXX schedule_timeout() ... */
2158         for (i = 0; i < 500; i++)
2159                 udelay(10);
2160
2161         /* Config mode; select PMA/Ch 1 regs. */
2162         tg3_writephy(tp, 0x10, 0x8411);
2163
2164         /* Enable auto-lock and comdet, select txclk for tx. */
2165         tg3_writephy(tp, 0x11, 0x0a10);
2166
2167         tg3_writephy(tp, 0x18, 0x00a0);
2168         tg3_writephy(tp, 0x16, 0x41ff);
2169
2170         /* Assert and deassert POR. */
2171         tg3_writephy(tp, 0x13, 0x0400);
2172         udelay(40);
2173         tg3_writephy(tp, 0x13, 0x0000);
2174
2175         tg3_writephy(tp, 0x11, 0x0a50);
2176         udelay(40);
2177         tg3_writephy(tp, 0x11, 0x0a10);
2178
2179         /* Wait for signal to stabilize */
2180         /* XXX schedule_timeout() ... */
2181         for (i = 0; i < 15000; i++)
2182                 udelay(10);
2183
2184         /* Deselect the channel register so we can read the PHYID
2185          * later.
2186          */
2187         tg3_writephy(tp, 0x10, 0x8011);
2188 }
2189
2190 static int tg3_setup_fiber_hw_autoneg(struct tg3 *tp, u32 mac_status)
2191 {
2192         u32 sg_dig_ctrl, sg_dig_status;
2193         u32 serdes_cfg, expected_sg_dig_ctrl;
2194         int workaround, port_a;
2195         int current_link_up;
2196
2197         serdes_cfg = 0;
2198         expected_sg_dig_ctrl = 0;
2199         workaround = 0;
2200         port_a = 1;
2201         current_link_up = 0;
2202
2203         if (tp->pci_chip_rev_id != CHIPREV_ID_5704_A0 &&
2204             tp->pci_chip_rev_id != CHIPREV_ID_5704_A1) {
2205                 workaround = 1;
2206                 if (tr32(TG3PCI_DUAL_MAC_CTRL) & DUAL_MAC_CTRL_ID)
2207                         port_a = 0;
2208
2209                 /* preserve bits 0-11,13,14 for signal pre-emphasis */
2210                 /* preserve bits 20-23 for voltage regulator */
2211                 serdes_cfg = tr32(MAC_SERDES_CFG) & 0x00f06fff;
2212         }
2213
2214         sg_dig_ctrl = tr32(SG_DIG_CTRL);
2215
2216         if (tp->link_config.autoneg != AUTONEG_ENABLE) {
2217                 if (sg_dig_ctrl & (1 << 31)) {
2218                         if (workaround) {
2219                                 u32 val = serdes_cfg;
2220
2221                                 if (port_a)
2222                                         val |= 0xc010000;
2223                                 else
2224                                         val |= 0x4010000;
2225                                 tw32_f(MAC_SERDES_CFG, val);
2226                         }
2227                         tw32_f(SG_DIG_CTRL, 0x01388400);
2228                 }
2229                 if (mac_status & MAC_STATUS_PCS_SYNCED) {
2230                         tg3_setup_flow_control(tp, 0, 0);
2231                         current_link_up = 1;
2232                 }
2233                 goto out;
2234         }
2235
2236         /* Want auto-negotiation.  */
2237         expected_sg_dig_ctrl = 0x81388400;
2238
2239         /* Pause capability */
2240         expected_sg_dig_ctrl |= (1 << 11);
2241
2242         /* Asymettric pause */
2243         expected_sg_dig_ctrl |= (1 << 12);
2244
2245         if (sg_dig_ctrl != expected_sg_dig_ctrl) {
2246                 if (workaround)
2247                         tw32_f(MAC_SERDES_CFG, serdes_cfg | 0xc011000);
2248                 tw32_f(SG_DIG_CTRL, expected_sg_dig_ctrl | (1 << 30));
2249                 udelay(5);
2250                 tw32_f(SG_DIG_CTRL, expected_sg_dig_ctrl);
2251
2252                 tp->tg3_flags2 |= TG3_FLG2_PHY_JUST_INITTED;
2253         } else if (mac_status & (MAC_STATUS_PCS_SYNCED |
2254                                  MAC_STATUS_SIGNAL_DET)) {
2255                 int i;
2256
2257                 /* Giver time to negotiate (~200ms) */
2258                 for (i = 0; i < 40000; i++) {
2259                         sg_dig_status = tr32(SG_DIG_STATUS);
2260                         if (sg_dig_status & (0x3))
2261                                 break;
2262                         udelay(5);
2263                 }
2264                 mac_status = tr32(MAC_STATUS);
2265
2266                 if ((sg_dig_status & (1 << 1)) &&
2267                     (mac_status & MAC_STATUS_PCS_SYNCED)) {
2268                         u32 local_adv, remote_adv;
2269
2270                         local_adv = ADVERTISE_PAUSE_CAP;
2271                         remote_adv = 0;
2272                         if (sg_dig_status & (1 << 19))
2273                                 remote_adv |= LPA_PAUSE_CAP;
2274                         if (sg_dig_status & (1 << 20))
2275                                 remote_adv |= LPA_PAUSE_ASYM;
2276
2277                         tg3_setup_flow_control(tp, local_adv, remote_adv);
2278                         current_link_up = 1;
2279                         tp->tg3_flags2 &= ~TG3_FLG2_PHY_JUST_INITTED;
2280                 } else if (!(sg_dig_status & (1 << 1))) {
2281                         if (tp->tg3_flags2 & TG3_FLG2_PHY_JUST_INITTED)
2282                                 tp->tg3_flags2 &= ~TG3_FLG2_PHY_JUST_INITTED;
2283                         else {
2284                                 if (workaround) {
2285                                         u32 val = serdes_cfg;
2286
2287                                         if (port_a)
2288                                                 val |= 0xc010000;
2289                                         else
2290                                                 val |= 0x4010000;
2291
2292                                         tw32_f(MAC_SERDES_CFG, val);
2293                                 }
2294
2295                                 tw32_f(SG_DIG_CTRL, 0x01388400);
2296                                 udelay(40);
2297
2298                                 /* Link parallel detection - link is up */
2299                                 /* only if we have PCS_SYNC and not */
2300                                 /* receiving config code words */
2301                                 mac_status = tr32(MAC_STATUS);
2302                                 if ((mac_status & MAC_STATUS_PCS_SYNCED) &&
2303                                     !(mac_status & MAC_STATUS_RCVD_CFG)) {
2304                                         tg3_setup_flow_control(tp, 0, 0);
2305                                         current_link_up = 1;
2306                                 }
2307                         }
2308                 }
2309         }
2310
2311 out:
2312         return current_link_up;
2313 }
2314
2315 static int tg3_setup_fiber_by_hand(struct tg3 *tp, u32 mac_status)
2316 {
2317         int current_link_up = 0;
2318
2319         if (!(mac_status & MAC_STATUS_PCS_SYNCED)) {
2320                 tp->tg3_flags &= ~TG3_FLAG_GOT_SERDES_FLOWCTL;
2321                 goto out;
2322         }
2323
2324         if (tp->link_config.autoneg == AUTONEG_ENABLE) {
2325                 u32 flags;
2326                 int i;
2327   
2328                 if (fiber_autoneg(tp, &flags)) {
2329                         u32 local_adv, remote_adv;
2330
2331                         local_adv = ADVERTISE_PAUSE_CAP;
2332                         remote_adv = 0;
2333                         if (flags & MR_LP_ADV_SYM_PAUSE)
2334                                 remote_adv |= LPA_PAUSE_CAP;
2335                         if (flags & MR_LP_ADV_ASYM_PAUSE)
2336                                 remote_adv |= LPA_PAUSE_ASYM;
2337
2338                         tg3_setup_flow_control(tp, local_adv, remote_adv);
2339
2340                         tp->tg3_flags |= TG3_FLAG_GOT_SERDES_FLOWCTL;
2341                         current_link_up = 1;
2342                 }
2343                 for (i = 0; i < 30; i++) {
2344                         udelay(20);
2345                         tw32_f(MAC_STATUS,
2346                                (MAC_STATUS_SYNC_CHANGED |
2347                                 MAC_STATUS_CFG_CHANGED));
2348                         udelay(40);
2349                         if ((tr32(MAC_STATUS) &
2350                              (MAC_STATUS_SYNC_CHANGED |
2351                               MAC_STATUS_CFG_CHANGED)) == 0)
2352                                 break;
2353                 }
2354
2355                 mac_status = tr32(MAC_STATUS);
2356                 if (current_link_up == 0 &&
2357                     (mac_status & MAC_STATUS_PCS_SYNCED) &&
2358                     !(mac_status & MAC_STATUS_RCVD_CFG))
2359                         current_link_up = 1;
2360         } else {
2361                 /* Forcing 1000FD link up. */
2362                 current_link_up = 1;
2363                 tp->tg3_flags |= TG3_FLAG_GOT_SERDES_FLOWCTL;
2364
2365                 tw32_f(MAC_MODE, (tp->mac_mode | MAC_MODE_SEND_CONFIGS));
2366                 udelay(40);
2367         }
2368
2369 out:
2370         return current_link_up;
2371 }
2372
2373 static int tg3_setup_fiber_phy(struct tg3 *tp, int force_reset)
2374 {
2375         u32 orig_pause_cfg;
2376         u16 orig_active_speed;
2377         u8 orig_active_duplex;
2378         u32 mac_status;
2379         int current_link_up;
2380         int i;
2381
2382         orig_pause_cfg =
2383                 (tp->tg3_flags & (TG3_FLAG_RX_PAUSE |
2384                                   TG3_FLAG_TX_PAUSE));
2385         orig_active_speed = tp->link_config.active_speed;
2386         orig_active_duplex = tp->link_config.active_duplex;
2387
2388         if (!(tp->tg3_flags2 & TG3_FLG2_HW_AUTONEG) &&
2389             netif_carrier_ok(tp->dev) &&
2390             (tp->tg3_flags & TG3_FLAG_INIT_COMPLETE)) {
2391                 mac_status = tr32(MAC_STATUS);
2392                 mac_status &= (MAC_STATUS_PCS_SYNCED |
2393                                MAC_STATUS_SIGNAL_DET |
2394                                MAC_STATUS_CFG_CHANGED |
2395                                MAC_STATUS_RCVD_CFG);
2396                 if (mac_status == (MAC_STATUS_PCS_SYNCED |
2397                                    MAC_STATUS_SIGNAL_DET)) {
2398                         tw32_f(MAC_STATUS, (MAC_STATUS_SYNC_CHANGED |
2399                                             MAC_STATUS_CFG_CHANGED));
2400                         return 0;
2401                 }
2402         }
2403
2404         tw32_f(MAC_TX_AUTO_NEG, 0);
2405
2406         tp->mac_mode &= ~(MAC_MODE_PORT_MODE_MASK | MAC_MODE_HALF_DUPLEX);
2407         tp->mac_mode |= MAC_MODE_PORT_MODE_TBI;
2408         tw32_f(MAC_MODE, tp->mac_mode);
2409         udelay(40);
2410
2411         if (tp->phy_id == PHY_ID_BCM8002)
2412                 tg3_init_bcm8002(tp);
2413
2414         /* Enable link change event even when serdes polling.  */
2415         tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
2416         udelay(40);
2417
2418         current_link_up = 0;
2419         mac_status = tr32(MAC_STATUS);
2420
2421         if (tp->tg3_flags2 & TG3_FLG2_HW_AUTONEG)
2422                 current_link_up = tg3_setup_fiber_hw_autoneg(tp, mac_status);
2423         else
2424                 current_link_up = tg3_setup_fiber_by_hand(tp, mac_status);
2425
2426         tp->mac_mode &= ~MAC_MODE_LINK_POLARITY;
2427         tw32_f(MAC_MODE, tp->mac_mode);
2428         udelay(40);
2429
2430         tp->hw_status->status =
2431                 (SD_STATUS_UPDATED |
2432                  (tp->hw_status->status & ~SD_STATUS_LINK_CHG));
2433
2434         for (i = 0; i < 100; i++) {
2435                 tw32_f(MAC_STATUS, (MAC_STATUS_SYNC_CHANGED |
2436                                     MAC_STATUS_CFG_CHANGED));
2437                 udelay(5);
2438                 if ((tr32(MAC_STATUS) & (MAC_STATUS_SYNC_CHANGED |
2439                                          MAC_STATUS_CFG_CHANGED)) == 0)
2440                         break;
2441         }
2442
2443         mac_status = tr32(MAC_STATUS);
2444         if ((mac_status & MAC_STATUS_PCS_SYNCED) == 0) {
2445                 current_link_up = 0;
2446                 if (tp->link_config.autoneg == AUTONEG_ENABLE) {
2447                         tw32_f(MAC_MODE, (tp->mac_mode |
2448                                           MAC_MODE_SEND_CONFIGS));
2449                         udelay(1);
2450                         tw32_f(MAC_MODE, tp->mac_mode);
2451                 }
2452         }
2453
2454         if (current_link_up == 1) {
2455                 tp->link_config.active_speed = SPEED_1000;
2456                 tp->link_config.active_duplex = DUPLEX_FULL;
2457                 tw32(MAC_LED_CTRL, (tp->led_ctrl |
2458                                     LED_CTRL_LNKLED_OVERRIDE |
2459                                     LED_CTRL_1000MBPS_ON));
2460         } else {
2461                 tp->link_config.active_speed = SPEED_INVALID;
2462                 tp->link_config.active_duplex = DUPLEX_INVALID;
2463                 tw32(MAC_LED_CTRL, (tp->led_ctrl |
2464                                     LED_CTRL_LNKLED_OVERRIDE |
2465                                     LED_CTRL_TRAFFIC_OVERRIDE));
2466         }
2467
2468         if (current_link_up != netif_carrier_ok(tp->dev)) {
2469                 if (current_link_up)
2470                         netif_carrier_on(tp->dev);
2471                 else
2472                         netif_carrier_off(tp->dev);
2473                 tg3_link_report(tp);
2474         } else {
2475                 u32 now_pause_cfg =
2476                         tp->tg3_flags & (TG3_FLAG_RX_PAUSE |
2477                                          TG3_FLAG_TX_PAUSE);
2478                 if (orig_pause_cfg != now_pause_cfg ||
2479                     orig_active_speed != tp->link_config.active_speed ||
2480                     orig_active_duplex != tp->link_config.active_duplex)
2481                         tg3_link_report(tp);
2482         }
2483
2484         return 0;
2485 }
2486
2487 static int tg3_setup_phy(struct tg3 *tp, int force_reset)
2488 {
2489         int err;
2490
2491         if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) {
2492                 err = tg3_setup_fiber_phy(tp, force_reset);
2493         } else {
2494                 err = tg3_setup_copper_phy(tp, force_reset);
2495         }
2496
2497         if (tp->link_config.active_speed == SPEED_1000 &&
2498             tp->link_config.active_duplex == DUPLEX_HALF)
2499                 tw32(MAC_TX_LENGTHS,
2500                      ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
2501                       (6 << TX_LENGTHS_IPG_SHIFT) |
2502                       (0xff << TX_LENGTHS_SLOT_TIME_SHIFT)));
2503         else
2504                 tw32(MAC_TX_LENGTHS,
2505                      ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
2506                       (6 << TX_LENGTHS_IPG_SHIFT) |
2507                       (32 << TX_LENGTHS_SLOT_TIME_SHIFT)));
2508
2509         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
2510                 if (netif_carrier_ok(tp->dev)) {
2511                         tw32(HOSTCC_STAT_COAL_TICKS,
2512                              tp->coal.stats_block_coalesce_usecs);
2513                 } else {
2514                         tw32(HOSTCC_STAT_COAL_TICKS, 0);
2515                 }
2516         }
2517
2518         return err;
2519 }
2520
2521 /* Tigon3 never reports partial packet sends.  So we do not
2522  * need special logic to handle SKBs that have not had all
2523  * of their frags sent yet, like SunGEM does.
2524  */
2525 static void tg3_tx(struct tg3 *tp)
2526 {
2527         u32 hw_idx = tp->hw_status->idx[0].tx_consumer;
2528         u32 sw_idx = tp->tx_cons;
2529
2530         while (sw_idx != hw_idx) {
2531                 struct tx_ring_info *ri = &tp->tx_buffers[sw_idx];
2532                 struct sk_buff *skb = ri->skb;
2533                 int i;
2534
2535                 if (unlikely(skb == NULL))
2536                         BUG();
2537
2538                 pci_unmap_single(tp->pdev,
2539                                  pci_unmap_addr(ri, mapping),
2540                                  skb_headlen(skb),
2541                                  PCI_DMA_TODEVICE);
2542
2543                 ri->skb = NULL;
2544
2545                 sw_idx = NEXT_TX(sw_idx);
2546
2547                 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
2548                         if (unlikely(sw_idx == hw_idx))
2549                                 BUG();
2550
2551                         ri = &tp->tx_buffers[sw_idx];
2552                         if (unlikely(ri->skb != NULL))
2553                                 BUG();
2554
2555                         pci_unmap_page(tp->pdev,
2556                                        pci_unmap_addr(ri, mapping),
2557                                        skb_shinfo(skb)->frags[i].size,
2558                                        PCI_DMA_TODEVICE);
2559
2560                         sw_idx = NEXT_TX(sw_idx);
2561                 }
2562
2563                 dev_kfree_skb_irq(skb);
2564         }
2565
2566         tp->tx_cons = sw_idx;
2567
2568         if (netif_queue_stopped(tp->dev) &&
2569             (TX_BUFFS_AVAIL(tp) > TG3_TX_WAKEUP_THRESH))
2570                 netif_wake_queue(tp->dev);
2571 }
2572
2573 /* Returns size of skb allocated or < 0 on error.
2574  *
2575  * We only need to fill in the address because the other members
2576  * of the RX descriptor are invariant, see tg3_init_rings.
2577  *
2578  * Note the purposeful assymetry of cpu vs. chip accesses.  For
2579  * posting buffers we only dirty the first cache line of the RX
2580  * descriptor (containing the address).  Whereas for the RX status
2581  * buffers the cpu only reads the last cacheline of the RX descriptor
2582  * (to fetch the error flags, vlan tag, checksum, and opaque cookie).
2583  */
2584 static int tg3_alloc_rx_skb(struct tg3 *tp, u32 opaque_key,
2585                             int src_idx, u32 dest_idx_unmasked)
2586 {
2587         struct tg3_rx_buffer_desc *desc;
2588         struct ring_info *map, *src_map;
2589         struct sk_buff *skb;
2590         dma_addr_t mapping;
2591         int skb_size, dest_idx;
2592
2593         src_map = NULL;
2594         switch (opaque_key) {
2595         case RXD_OPAQUE_RING_STD:
2596                 dest_idx = dest_idx_unmasked % TG3_RX_RING_SIZE;
2597                 desc = &tp->rx_std[dest_idx];
2598                 map = &tp->rx_std_buffers[dest_idx];
2599                 if (src_idx >= 0)
2600                         src_map = &tp->rx_std_buffers[src_idx];
2601                 skb_size = RX_PKT_BUF_SZ;
2602                 break;
2603
2604         case RXD_OPAQUE_RING_JUMBO:
2605                 dest_idx = dest_idx_unmasked % TG3_RX_JUMBO_RING_SIZE;
2606                 desc = &tp->rx_jumbo[dest_idx];
2607                 map = &tp->rx_jumbo_buffers[dest_idx];
2608                 if (src_idx >= 0)
2609                         src_map = &tp->rx_jumbo_buffers[src_idx];
2610                 skb_size = RX_JUMBO_PKT_BUF_SZ;
2611                 break;
2612
2613         default:
2614                 return -EINVAL;
2615         };
2616
2617         /* Do not overwrite any of the map or rp information
2618          * until we are sure we can commit to a new buffer.
2619          *
2620          * Callers depend upon this behavior and assume that
2621          * we leave everything unchanged if we fail.
2622          */
2623         skb = dev_alloc_skb(skb_size);
2624         if (skb == NULL)
2625                 return -ENOMEM;
2626
2627         skb->dev = tp->dev;
2628         skb_reserve(skb, tp->rx_offset);
2629
2630         mapping = pci_map_single(tp->pdev, skb->data,
2631                                  skb_size - tp->rx_offset,
2632                                  PCI_DMA_FROMDEVICE);
2633
2634         map->skb = skb;
2635         pci_unmap_addr_set(map, mapping, mapping);
2636
2637         if (src_map != NULL)
2638                 src_map->skb = NULL;
2639
2640         desc->addr_hi = ((u64)mapping >> 32);
2641         desc->addr_lo = ((u64)mapping & 0xffffffff);
2642
2643         return skb_size;
2644 }
2645
2646 /* We only need to move over in the address because the other
2647  * members of the RX descriptor are invariant.  See notes above
2648  * tg3_alloc_rx_skb for full details.
2649  */
2650 static void tg3_recycle_rx(struct tg3 *tp, u32 opaque_key,
2651                            int src_idx, u32 dest_idx_unmasked)
2652 {
2653         struct tg3_rx_buffer_desc *src_desc, *dest_desc;
2654         struct ring_info *src_map, *dest_map;
2655         int dest_idx;
2656
2657         switch (opaque_key) {
2658         case RXD_OPAQUE_RING_STD:
2659                 dest_idx = dest_idx_unmasked % TG3_RX_RING_SIZE;
2660                 dest_desc = &tp->rx_std[dest_idx];
2661                 dest_map = &tp->rx_std_buffers[dest_idx];
2662                 src_desc = &tp->rx_std[src_idx];
2663                 src_map = &tp->rx_std_buffers[src_idx];
2664                 break;
2665
2666         case RXD_OPAQUE_RING_JUMBO:
2667                 dest_idx = dest_idx_unmasked % TG3_RX_JUMBO_RING_SIZE;
2668                 dest_desc = &tp->rx_jumbo[dest_idx];
2669                 dest_map = &tp->rx_jumbo_buffers[dest_idx];
2670                 src_desc = &tp->rx_jumbo[src_idx];
2671                 src_map = &tp->rx_jumbo_buffers[src_idx];
2672                 break;
2673
2674         default:
2675                 return;
2676         };
2677
2678         dest_map->skb = src_map->skb;
2679         pci_unmap_addr_set(dest_map, mapping,
2680                            pci_unmap_addr(src_map, mapping));
2681         dest_desc->addr_hi = src_desc->addr_hi;
2682         dest_desc->addr_lo = src_desc->addr_lo;
2683
2684         src_map->skb = NULL;
2685 }
2686
2687 #if TG3_VLAN_TAG_USED
2688 static int tg3_vlan_rx(struct tg3 *tp, struct sk_buff *skb, u16 vlan_tag)
2689 {
2690         return vlan_hwaccel_receive_skb(skb, tp->vlgrp, vlan_tag);
2691 }
2692 #endif
2693
2694 /* The RX ring scheme is composed of multiple rings which post fresh
2695  * buffers to the chip, and one special ring the chip uses to report
2696  * status back to the host.
2697  *
2698  * The special ring reports the status of received packets to the
2699  * host.  The chip does not write into the original descriptor the
2700  * RX buffer was obtained from.  The chip simply takes the original
2701  * descriptor as provided by the host, updates the status and length
2702  * field, then writes this into the next status ring entry.
2703  *
2704  * Each ring the host uses to post buffers to the chip is described
2705  * by a TG3_BDINFO entry in the chips SRAM area.  When a packet arrives,
2706  * it is first placed into the on-chip ram.  When the packet's length
2707  * is known, it walks down the TG3_BDINFO entries to select the ring.
2708  * Each TG3_BDINFO specifies a MAXLEN field and the first TG3_BDINFO
2709  * which is within the range of the new packet's length is chosen.
2710  *
2711  * The "separate ring for rx status" scheme may sound queer, but it makes
2712  * sense from a cache coherency perspective.  If only the host writes
2713  * to the buffer post rings, and only the chip writes to the rx status
2714  * rings, then cache lines never move beyond shared-modified state.
2715  * If both the host and chip were to write into the same ring, cache line
2716  * eviction could occur since both entities want it in an exclusive state.
2717  */
2718 static int tg3_rx(struct tg3 *tp, int budget)
2719 {
2720         u32 work_mask;
2721         u32 sw_idx = tp->rx_rcb_ptr;
2722         u16 hw_idx;
2723         int received;
2724
2725         hw_idx = tp->hw_status->idx[0].rx_producer;
2726         /*
2727          * We need to order the read of hw_idx and the read of
2728          * the opaque cookie.
2729          */
2730         rmb();
2731         work_mask = 0;
2732         received = 0;
2733         while (sw_idx != hw_idx && budget > 0) {
2734                 struct tg3_rx_buffer_desc *desc = &tp->rx_rcb[sw_idx];
2735                 unsigned int len;
2736                 struct sk_buff *skb;
2737                 dma_addr_t dma_addr;
2738                 u32 opaque_key, desc_idx, *post_ptr;
2739
2740                 desc_idx = desc->opaque & RXD_OPAQUE_INDEX_MASK;
2741                 opaque_key = desc->opaque & RXD_OPAQUE_RING_MASK;
2742                 if (opaque_key == RXD_OPAQUE_RING_STD) {
2743                         dma_addr = pci_unmap_addr(&tp->rx_std_buffers[desc_idx],
2744                                                   mapping);
2745                         skb = tp->rx_std_buffers[desc_idx].skb;
2746                         post_ptr = &tp->rx_std_ptr;
2747                 } else if (opaque_key == RXD_OPAQUE_RING_JUMBO) {
2748                         dma_addr = pci_unmap_addr(&tp->rx_jumbo_buffers[desc_idx],
2749                                                   mapping);
2750                         skb = tp->rx_jumbo_buffers[desc_idx].skb;
2751                         post_ptr = &tp->rx_jumbo_ptr;
2752                 }
2753                 else {
2754                         goto next_pkt_nopost;
2755                 }
2756
2757                 work_mask |= opaque_key;
2758
2759                 if ((desc->err_vlan & RXD_ERR_MASK) != 0 &&
2760                     (desc->err_vlan != RXD_ERR_ODD_NIBBLE_RCVD_MII)) {
2761                 drop_it:
2762                         tg3_recycle_rx(tp, opaque_key,
2763                                        desc_idx, *post_ptr);
2764                 drop_it_no_recycle:
2765                         /* Other statistics kept track of by card. */
2766                         tp->net_stats.rx_dropped++;
2767                         goto next_pkt;
2768                 }
2769
2770                 len = ((desc->idx_len & RXD_LEN_MASK) >> RXD_LEN_SHIFT) - 4; /* omit crc */
2771
2772                 if (len > RX_COPY_THRESHOLD 
2773                         && tp->rx_offset == 2
2774                         /* rx_offset != 2 iff this is a 5701 card running
2775                          * in PCI-X mode [see tg3_get_invariants()] */
2776                 ) {
2777                         int skb_size;
2778
2779                         skb_size = tg3_alloc_rx_skb(tp, opaque_key,
2780                                                     desc_idx, *post_ptr);
2781                         if (skb_size < 0)
2782                                 goto drop_it;
2783
2784                         pci_unmap_single(tp->pdev, dma_addr,
2785                                          skb_size - tp->rx_offset,
2786                                          PCI_DMA_FROMDEVICE);
2787
2788                         skb_put(skb, len);
2789                 } else {
2790                         struct sk_buff *copy_skb;
2791
2792                         tg3_recycle_rx(tp, opaque_key,
2793                                        desc_idx, *post_ptr);
2794
2795                         copy_skb = dev_alloc_skb(len + 2);
2796                         if (copy_skb == NULL)
2797                                 goto drop_it_no_recycle;
2798
2799                         copy_skb->dev = tp->dev;
2800                         skb_reserve(copy_skb, 2);
2801                         skb_put(copy_skb, len);
2802                         pci_dma_sync_single_for_cpu(tp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE);
2803                         memcpy(copy_skb->data, skb->data, len);
2804                         pci_dma_sync_single_for_device(tp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE);
2805
2806                         /* We'll reuse the original ring buffer. */
2807                         skb = copy_skb;
2808                 }
2809
2810                 if ((tp->tg3_flags & TG3_FLAG_RX_CHECKSUMS) &&
2811                     (desc->type_flags & RXD_FLAG_TCPUDP_CSUM) &&
2812                     (((desc->ip_tcp_csum & RXD_TCPCSUM_MASK)
2813                       >> RXD_TCPCSUM_SHIFT) == 0xffff))
2814                         skb->ip_summed = CHECKSUM_UNNECESSARY;
2815                 else
2816                         skb->ip_summed = CHECKSUM_NONE;
2817
2818                 skb->protocol = eth_type_trans(skb, tp->dev);
2819 #if TG3_VLAN_TAG_USED
2820                 if (tp->vlgrp != NULL &&
2821                     desc->type_flags & RXD_FLAG_VLAN) {
2822                         tg3_vlan_rx(tp, skb,
2823                                     desc->err_vlan & RXD_VLAN_MASK);
2824                 } else
2825 #endif
2826                         netif_receive_skb(skb);
2827
2828                 tp->dev->last_rx = jiffies;
2829                 received++;
2830                 budget--;
2831
2832 next_pkt:
2833                 (*post_ptr)++;
2834 next_pkt_nopost:
2835                 sw_idx++;
2836                 sw_idx %= TG3_RX_RCB_RING_SIZE(tp);
2837
2838                 /* Refresh hw_idx to see if there is new work */
2839                 if (sw_idx == hw_idx) {
2840                         hw_idx = tp->hw_status->idx[0].rx_producer;
2841                         rmb();
2842                 }
2843         }
2844
2845         /* ACK the status ring. */
2846         tp->rx_rcb_ptr = sw_idx;
2847         tw32_rx_mbox(MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW, sw_idx);
2848
2849         /* Refill RX ring(s). */
2850         if (work_mask & RXD_OPAQUE_RING_STD) {
2851                 sw_idx = tp->rx_std_ptr % TG3_RX_RING_SIZE;
2852                 tw32_rx_mbox(MAILBOX_RCV_STD_PROD_IDX + TG3_64BIT_REG_LOW,
2853                              sw_idx);
2854         }
2855         if (work_mask & RXD_OPAQUE_RING_JUMBO) {
2856                 sw_idx = tp->rx_jumbo_ptr % TG3_RX_JUMBO_RING_SIZE;
2857                 tw32_rx_mbox(MAILBOX_RCV_JUMBO_PROD_IDX + TG3_64BIT_REG_LOW,
2858                              sw_idx);
2859         }
2860         mmiowb();
2861
2862         return received;
2863 }
2864
2865 static int tg3_poll(struct net_device *netdev, int *budget)
2866 {
2867         struct tg3 *tp = netdev_priv(netdev);
2868         struct tg3_hw_status *sblk = tp->hw_status;
2869         unsigned long flags;
2870         int done;
2871
2872         spin_lock_irqsave(&tp->lock, flags);
2873
2874         /* handle link change and other phy events */
2875         if (!(tp->tg3_flags &
2876               (TG3_FLAG_USE_LINKCHG_REG |
2877                TG3_FLAG_POLL_SERDES))) {
2878                 if (sblk->status & SD_STATUS_LINK_CHG) {
2879                         sblk->status = SD_STATUS_UPDATED |
2880                                 (sblk->status & ~SD_STATUS_LINK_CHG);
2881                         tg3_setup_phy(tp, 0);
2882                 }
2883         }
2884
2885         /* run TX completion thread */
2886         if (sblk->idx[0].tx_consumer != tp->tx_cons) {
2887                 spin_lock(&tp->tx_lock);
2888                 tg3_tx(tp);
2889                 spin_unlock(&tp->tx_lock);
2890         }
2891
2892         spin_unlock_irqrestore(&tp->lock, flags);
2893
2894         /* run RX thread, within the bounds set by NAPI.
2895          * All RX "locking" is done by ensuring outside
2896          * code synchronizes with dev->poll()
2897          */
2898         if (sblk->idx[0].rx_producer != tp->rx_rcb_ptr) {
2899                 int orig_budget = *budget;
2900                 int work_done;
2901
2902                 if (orig_budget > netdev->quota)
2903                         orig_budget = netdev->quota;
2904
2905                 work_done = tg3_rx(tp, orig_budget);
2906
2907                 *budget -= work_done;
2908                 netdev->quota -= work_done;
2909         }
2910
2911         if (tp->tg3_flags & TG3_FLAG_TAGGED_STATUS)
2912                 tp->last_tag = sblk->status_tag;
2913         rmb();
2914
2915         /* if no more work, tell net stack and NIC we're done */
2916         done = !tg3_has_work(tp);
2917         if (done) {
2918                 spin_lock_irqsave(&tp->lock, flags);
2919                 __netif_rx_complete(netdev);
2920                 tg3_restart_ints(tp);
2921                 spin_unlock_irqrestore(&tp->lock, flags);
2922         }
2923
2924         return (done ? 0 : 1);
2925 }
2926
2927 /* MSI ISR - No need to check for interrupt sharing and no need to
2928  * flush status block and interrupt mailbox. PCI ordering rules
2929  * guarantee that MSI will arrive after the status block.
2930  */
2931 static irqreturn_t tg3_msi(int irq, void *dev_id, struct pt_regs *regs)
2932 {
2933         struct net_device *dev = dev_id;
2934         struct tg3 *tp = netdev_priv(dev);
2935         struct tg3_hw_status *sblk = tp->hw_status;
2936         unsigned long flags;
2937
2938         spin_lock_irqsave(&tp->lock, flags);
2939
2940         /*
2941          * Writing any value to intr-mbox-0 clears PCI INTA# and
2942          * chip-internal interrupt pending events.
2943          * Writing non-zero to intr-mbox-0 additional tells the
2944          * NIC to stop sending us irqs, engaging "in-intr-handler"
2945          * event coalescing.
2946          */
2947         tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
2948         tp->last_tag = sblk->status_tag;
2949         sblk->status &= ~SD_STATUS_UPDATED;
2950         if (likely(tg3_has_work(tp)))
2951                 netif_rx_schedule(dev);         /* schedule NAPI poll */
2952         else {
2953                 /* No work, re-enable interrupts.  */
2954                 tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
2955                              tp->last_tag << 24);
2956         }
2957
2958         spin_unlock_irqrestore(&tp->lock, flags);
2959
2960         return IRQ_RETVAL(1);
2961 }
2962
2963 static irqreturn_t tg3_interrupt(int irq, void *dev_id, struct pt_regs *regs)
2964 {
2965         struct net_device *dev = dev_id;
2966         struct tg3 *tp = netdev_priv(dev);
2967         struct tg3_hw_status *sblk = tp->hw_status;
2968         unsigned long flags;
2969         unsigned int handled = 1;
2970
2971         spin_lock_irqsave(&tp->lock, flags);
2972
2973         /* In INTx mode, it is possible for the interrupt to arrive at
2974          * the CPU before the status block posted prior to the interrupt.
2975          * Reading the PCI State register will confirm whether the
2976          * interrupt is ours and will flush the status block.
2977          */
2978         if ((sblk->status & SD_STATUS_UPDATED) ||
2979             !(tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
2980                 /*
2981                  * Writing any value to intr-mbox-0 clears PCI INTA# and
2982                  * chip-internal interrupt pending events.
2983                  * Writing non-zero to intr-mbox-0 additional tells the
2984                  * NIC to stop sending us irqs, engaging "in-intr-handler"
2985                  * event coalescing.
2986                  */
2987                 tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
2988                              0x00000001);
2989                 sblk->status &= ~SD_STATUS_UPDATED;
2990                 if (likely(tg3_has_work(tp)))
2991                         netif_rx_schedule(dev);         /* schedule NAPI poll */
2992                 else {
2993                         /* No work, shared interrupt perhaps?  re-enable
2994                          * interrupts, and flush that PCI write
2995                          */
2996                         tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
2997                                 0x00000000);
2998                         tr32(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW);
2999                 }
3000         } else {        /* shared interrupt */
3001                 handled = 0;
3002         }
3003
3004         spin_unlock_irqrestore(&tp->lock, flags);
3005
3006         return IRQ_RETVAL(handled);
3007 }
3008
3009 static irqreturn_t tg3_interrupt_tagged(int irq, void *dev_id, struct pt_regs *regs)
3010 {
3011         struct net_device *dev = dev_id;
3012         struct tg3 *tp = netdev_priv(dev);
3013         struct tg3_hw_status *sblk = tp->hw_status;
3014         unsigned long flags;
3015         unsigned int handled = 1;
3016
3017         spin_lock_irqsave(&tp->lock, flags);
3018
3019         /* In INTx mode, it is possible for the interrupt to arrive at
3020          * the CPU before the status block posted prior to the interrupt.
3021          * Reading the PCI State register will confirm whether the
3022          * interrupt is ours and will flush the status block.
3023          */
3024         if ((sblk->status & SD_STATUS_UPDATED) ||
3025             !(tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
3026                 /*
3027                  * writing any value to intr-mbox-0 clears PCI INTA# and
3028                  * chip-internal interrupt pending events.
3029                  * writing non-zero to intr-mbox-0 additional tells the
3030                  * NIC to stop sending us irqs, engaging "in-intr-handler"
3031                  * event coalescing.
3032                  */
3033                 tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
3034                              0x00000001);
3035                 tp->last_tag = sblk->status_tag;
3036                 sblk->status &= ~SD_STATUS_UPDATED;
3037                 if (likely(tg3_has_work(tp)))
3038                         netif_rx_schedule(dev);         /* schedule NAPI poll */
3039                 else {
3040                         /* no work, shared interrupt perhaps?  re-enable
3041                          * interrupts, and flush that PCI write
3042                          */
3043                         tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
3044                                      tp->last_tag << 24);
3045                         tr32(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW);
3046                 }
3047         } else {        /* shared interrupt */
3048                 handled = 0;
3049         }
3050
3051         spin_unlock_irqrestore(&tp->lock, flags);
3052
3053         return IRQ_RETVAL(handled);
3054 }
3055
3056 /* ISR for interrupt test */
3057 static irqreturn_t tg3_test_isr(int irq, void *dev_id,
3058                 struct pt_regs *regs)
3059 {
3060         struct net_device *dev = dev_id;
3061         struct tg3 *tp = netdev_priv(dev);
3062         struct tg3_hw_status *sblk = tp->hw_status;
3063
3064         if (sblk->status & SD_STATUS_UPDATED) {
3065                 tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
3066                              0x00000001);
3067                 return IRQ_RETVAL(1);
3068         }
3069         return IRQ_RETVAL(0);
3070 }
3071
3072 static int tg3_init_hw(struct tg3 *);
3073 static int tg3_halt(struct tg3 *, int);
3074
3075 #ifdef CONFIG_NET_POLL_CONTROLLER
3076 static void tg3_poll_controller(struct net_device *dev)
3077 {
3078         struct tg3 *tp = netdev_priv(dev);
3079
3080         tg3_interrupt(tp->pdev->irq, dev, NULL);
3081 }
3082 #endif
3083
3084 static void tg3_reset_task(void *_data)
3085 {
3086         struct tg3 *tp = _data;
3087         unsigned int restart_timer;
3088
3089         tg3_netif_stop(tp);
3090
3091         spin_lock_irq(&tp->lock);
3092         spin_lock(&tp->tx_lock);
3093
3094         restart_timer = tp->tg3_flags2 & TG3_FLG2_RESTART_TIMER;
3095         tp->tg3_flags2 &= ~TG3_FLG2_RESTART_TIMER;
3096
3097         tg3_halt(tp, 0);
3098         tg3_init_hw(tp);
3099
3100         tg3_netif_start(tp);
3101
3102         spin_unlock(&tp->tx_lock);
3103         spin_unlock_irq(&tp->lock);
3104
3105         if (restart_timer)
3106                 mod_timer(&tp->timer, jiffies + 1);
3107 }
3108
3109 static void tg3_tx_timeout(struct net_device *dev)
3110 {
3111         struct tg3 *tp = netdev_priv(dev);
3112
3113         printk(KERN_ERR PFX "%s: transmit timed out, resetting\n",
3114                dev->name);
3115
3116         schedule_work(&tp->reset_task);
3117 }
3118
3119 static void tg3_set_txd(struct tg3 *, int, dma_addr_t, int, u32, u32);
3120
3121 static int tigon3_4gb_hwbug_workaround(struct tg3 *tp, struct sk_buff *skb,
3122                                        u32 guilty_entry, int guilty_len,
3123                                        u32 last_plus_one, u32 *start, u32 mss)
3124 {
3125         struct sk_buff *new_skb = skb_copy(skb, GFP_ATOMIC);
3126         dma_addr_t new_addr;
3127         u32 entry = *start;
3128         int i;
3129
3130         if (!new_skb) {
3131                 dev_kfree_skb(skb);
3132                 return -1;
3133         }
3134
3135         /* New SKB is guaranteed to be linear. */
3136         entry = *start;
3137         new_addr = pci_map_single(tp->pdev, new_skb->data, new_skb->len,
3138                                   PCI_DMA_TODEVICE);
3139         tg3_set_txd(tp, entry, new_addr, new_skb->len,
3140                     (skb->ip_summed == CHECKSUM_HW) ?
3141                     TXD_FLAG_TCPUDP_CSUM : 0, 1 | (mss << 1));
3142         *start = NEXT_TX(entry);
3143
3144         /* Now clean up the sw ring entries. */
3145         i = 0;
3146         while (entry != last_plus_one) {
3147                 int len;
3148
3149                 if (i == 0)
3150                         len = skb_headlen(skb);
3151                 else
3152                         len = skb_shinfo(skb)->frags[i-1].size;
3153                 pci_unmap_single(tp->pdev,
3154                                  pci_unmap_addr(&tp->tx_buffers[entry], mapping),
3155                                  len, PCI_DMA_TODEVICE);
3156                 if (i == 0) {
3157                         tp->tx_buffers[entry].skb = new_skb;
3158                         pci_unmap_addr_set(&tp->tx_buffers[entry], mapping, new_addr);
3159                 } else {
3160                         tp->tx_buffers[entry].skb = NULL;
3161                 }
3162                 entry = NEXT_TX(entry);
3163                 i++;
3164         }
3165
3166         dev_kfree_skb(skb);
3167
3168         return 0;
3169 }
3170
3171 static void tg3_set_txd(struct tg3 *tp, int entry,
3172                         dma_addr_t mapping, int len, u32 flags,
3173                         u32 mss_and_is_end)
3174 {
3175         struct tg3_tx_buffer_desc *txd = &tp->tx_ring[entry];
3176         int is_end = (mss_and_is_end & 0x1);
3177         u32 mss = (mss_and_is_end >> 1);
3178         u32 vlan_tag = 0;
3179
3180         if (is_end)
3181                 flags |= TXD_FLAG_END;
3182         if (flags & TXD_FLAG_VLAN) {
3183                 vlan_tag = flags >> 16;
3184                 flags &= 0xffff;
3185         }
3186         vlan_tag |= (mss << TXD_MSS_SHIFT);
3187
3188         txd->addr_hi = ((u64) mapping >> 32);
3189         txd->addr_lo = ((u64) mapping & 0xffffffff);
3190         txd->len_flags = (len << TXD_LEN_SHIFT) | flags;
3191         txd->vlan_tag = vlan_tag << TXD_VLAN_TAG_SHIFT;
3192 }
3193
3194 static inline int tg3_4g_overflow_test(dma_addr_t mapping, int len)
3195 {
3196         u32 base = (u32) mapping & 0xffffffff;
3197
3198         return ((base > 0xffffdcc0) &&
3199                 (base + len + 8 < base));
3200 }
3201
3202 static int tg3_start_xmit(struct sk_buff *skb, struct net_device *dev)
3203 {
3204         struct tg3 *tp = netdev_priv(dev);
3205         dma_addr_t mapping;
3206         unsigned int i;
3207         u32 len, entry, base_flags, mss;
3208         int would_hit_hwbug;
3209         unsigned long flags;
3210
3211         len = skb_headlen(skb);
3212
3213         /* No BH disabling for tx_lock here.  We are running in BH disabled
3214          * context and TX reclaim runs via tp->poll inside of a software
3215          * interrupt.  Rejoice!
3216          *
3217          * Actually, things are not so simple.  If we are to take a hw
3218          * IRQ here, we can deadlock, consider:
3219          *
3220          *       CPU1           CPU2
3221          *   tg3_start_xmit
3222          *   take tp->tx_lock
3223          *                      tg3_timer
3224          *                      take tp->lock
3225          *   tg3_interrupt
3226          *   spin on tp->lock
3227          *                      spin on tp->tx_lock
3228          *
3229          * So we really do need to disable interrupts when taking
3230          * tx_lock here.
3231          */
3232         local_irq_save(flags);
3233         if (!spin_trylock(&tp->tx_lock)) { 
3234                 local_irq_restore(flags);
3235                 return NETDEV_TX_LOCKED; 
3236         } 
3237
3238         /* This is a hard error, log it. */
3239         if (unlikely(TX_BUFFS_AVAIL(tp) <= (skb_shinfo(skb)->nr_frags + 1))) {
3240                 netif_stop_queue(dev);
3241                 spin_unlock_irqrestore(&tp->tx_lock, flags);
3242                 printk(KERN_ERR PFX "%s: BUG! Tx Ring full when queue awake!\n",
3243                        dev->name);