[PATCH] Change PowerPC MPC8xx maintainer
[linux-3.10.git] / drivers / net / tg3.c
1 /*
2  * tg3.c: Broadcom Tigon3 ethernet driver.
3  *
4  * Copyright (C) 2001, 2002, 2003, 2004 David S. Miller (davem@redhat.com)
5  * Copyright (C) 2001, 2002, 2003 Jeff Garzik (jgarzik@pobox.com)
6  * Copyright (C) 2004 Sun Microsystems Inc.
7  * Copyright (C) 2005 Broadcom Corporation.
8  *
9  * Firmware is:
10  *      Derived from proprietary unpublished source code,
11  *      Copyright (C) 2000-2003 Broadcom Corporation.
12  *
13  *      Permission is hereby granted for the distribution of this firmware
14  *      data in hexadecimal or equivalent format, provided this copyright
15  *      notice is accompanying it.
16  */
17
18 #include <linux/config.h>
19
20 #include <linux/module.h>
21 #include <linux/moduleparam.h>
22 #include <linux/kernel.h>
23 #include <linux/types.h>
24 #include <linux/compiler.h>
25 #include <linux/slab.h>
26 #include <linux/delay.h>
27 #include <linux/init.h>
28 #include <linux/ioport.h>
29 #include <linux/pci.h>
30 #include <linux/netdevice.h>
31 #include <linux/etherdevice.h>
32 #include <linux/skbuff.h>
33 #include <linux/ethtool.h>
34 #include <linux/mii.h>
35 #include <linux/if_vlan.h>
36 #include <linux/ip.h>
37 #include <linux/tcp.h>
38 #include <linux/workqueue.h>
39
40 #include <net/checksum.h>
41
42 #include <asm/system.h>
43 #include <asm/io.h>
44 #include <asm/byteorder.h>
45 #include <asm/uaccess.h>
46
47 #ifdef CONFIG_SPARC64
48 #include <asm/idprom.h>
49 #include <asm/oplib.h>
50 #include <asm/pbm.h>
51 #endif
52
53 #if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE)
54 #define TG3_VLAN_TAG_USED 1
55 #else
56 #define TG3_VLAN_TAG_USED 0
57 #endif
58
59 #ifdef NETIF_F_TSO
60 #define TG3_TSO_SUPPORT 1
61 #else
62 #define TG3_TSO_SUPPORT 0
63 #endif
64
65 #include "tg3.h"
66
67 #define DRV_MODULE_NAME         "tg3"
68 #define PFX DRV_MODULE_NAME     ": "
69 #define DRV_MODULE_VERSION      "3.33"
70 #define DRV_MODULE_RELDATE      "July 5, 2005"
71
72 #define TG3_DEF_MAC_MODE        0
73 #define TG3_DEF_RX_MODE         0
74 #define TG3_DEF_TX_MODE         0
75 #define TG3_DEF_MSG_ENABLE        \
76         (NETIF_MSG_DRV          | \
77          NETIF_MSG_PROBE        | \
78          NETIF_MSG_LINK         | \
79          NETIF_MSG_TIMER        | \
80          NETIF_MSG_IFDOWN       | \
81          NETIF_MSG_IFUP         | \
82          NETIF_MSG_RX_ERR       | \
83          NETIF_MSG_TX_ERR)
84
85 /* length of time before we decide the hardware is borked,
86  * and dev->tx_timeout() should be called to fix the problem
87  */
88 #define TG3_TX_TIMEOUT                  (5 * HZ)
89
90 /* hardware minimum and maximum for a single frame's data payload */
91 #define TG3_MIN_MTU                     60
92 #define TG3_MAX_MTU(tp) \
93         (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS) ? 9000 : 1500)
94
95 /* These numbers seem to be hard coded in the NIC firmware somehow.
96  * You can't change the ring sizes, but you can change where you place
97  * them in the NIC onboard memory.
98  */
99 #define TG3_RX_RING_SIZE                512
100 #define TG3_DEF_RX_RING_PENDING         200
101 #define TG3_RX_JUMBO_RING_SIZE          256
102 #define TG3_DEF_RX_JUMBO_RING_PENDING   100
103
104 /* Do not place this n-ring entries value into the tp struct itself,
105  * we really want to expose these constants to GCC so that modulo et
106  * al.  operations are done with shifts and masks instead of with
107  * hw multiply/modulo instructions.  Another solution would be to
108  * replace things like '% foo' with '& (foo - 1)'.
109  */
110 #define TG3_RX_RCB_RING_SIZE(tp)        \
111         ((tp->tg3_flags2 & TG3_FLG2_5705_PLUS) ?  512 : 1024)
112
113 #define TG3_TX_RING_SIZE                512
114 #define TG3_DEF_TX_RING_PENDING         (TG3_TX_RING_SIZE - 1)
115
116 #define TG3_RX_RING_BYTES       (sizeof(struct tg3_rx_buffer_desc) * \
117                                  TG3_RX_RING_SIZE)
118 #define TG3_RX_JUMBO_RING_BYTES (sizeof(struct tg3_rx_buffer_desc) * \
119                                  TG3_RX_JUMBO_RING_SIZE)
120 #define TG3_RX_RCB_RING_BYTES(tp) (sizeof(struct tg3_rx_buffer_desc) * \
121                                    TG3_RX_RCB_RING_SIZE(tp))
122 #define TG3_TX_RING_BYTES       (sizeof(struct tg3_tx_buffer_desc) * \
123                                  TG3_TX_RING_SIZE)
124 #define TX_RING_GAP(TP) \
125         (TG3_TX_RING_SIZE - (TP)->tx_pending)
126 #define TX_BUFFS_AVAIL(TP)                                              \
127         (((TP)->tx_cons <= (TP)->tx_prod) ?                             \
128           (TP)->tx_cons + (TP)->tx_pending - (TP)->tx_prod :            \
129           (TP)->tx_cons - (TP)->tx_prod - TX_RING_GAP(TP))
130 #define NEXT_TX(N)              (((N) + 1) & (TG3_TX_RING_SIZE - 1))
131
132 #define RX_PKT_BUF_SZ           (1536 + tp->rx_offset + 64)
133 #define RX_JUMBO_PKT_BUF_SZ     (9046 + tp->rx_offset + 64)
134
135 /* minimum number of free TX descriptors required to wake up TX process */
136 #define TG3_TX_WAKEUP_THRESH            (TG3_TX_RING_SIZE / 4)
137
138 /* number of ETHTOOL_GSTATS u64's */
139 #define TG3_NUM_STATS           (sizeof(struct tg3_ethtool_stats)/sizeof(u64))
140
141 #define TG3_NUM_TEST            6
142
143 static char version[] __devinitdata =
144         DRV_MODULE_NAME ".c:v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
145
146 MODULE_AUTHOR("David S. Miller (davem@redhat.com) and Jeff Garzik (jgarzik@pobox.com)");
147 MODULE_DESCRIPTION("Broadcom Tigon3 ethernet driver");
148 MODULE_LICENSE("GPL");
149 MODULE_VERSION(DRV_MODULE_VERSION);
150
151 static int tg3_debug = -1;      /* -1 == use TG3_DEF_MSG_ENABLE as value */
152 module_param(tg3_debug, int, 0);
153 MODULE_PARM_DESC(tg3_debug, "Tigon3 bitmapped debugging message enable value");
154
155 static struct pci_device_id tg3_pci_tbl[] = {
156         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5700,
157           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
158         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5701,
159           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
160         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702,
161           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
162         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703,
163           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
164         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704,
165           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
166         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702FE,
167           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
168         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705,
169           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
170         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705_2,
171           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
172         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705M,
173           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
174         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705M_2,
175           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
176         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702X,
177           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
178         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703X,
179           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
180         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704S,
181           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
182         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702A3,
183           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
184         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703A3,
185           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
186         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5782,
187           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
188         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5788,
189           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
190         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5789,
191           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
192         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5901,
193           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
194         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5901_2,
195           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
196         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704S_2,
197           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
198         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705F,
199           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
200         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5720,
201           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
202         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5721,
203           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
204         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5750,
205           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
206         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751,
207           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
208         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5750M,
209           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
210         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751M,
211           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
212         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751F,
213           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
214         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5752,
215           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
216         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5752M,
217           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
218         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753,
219           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
220         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753M,
221           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
222         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753F,
223           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
224         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5781,
225           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
226         { PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9DXX,
227           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
228         { PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9MXX,
229           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
230         { PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1000,
231           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
232         { PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1001,
233           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
234         { PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1003,
235           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
236         { PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC9100,
237           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
238         { PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_TIGON3,
239           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
240         { 0, }
241 };
242
243 MODULE_DEVICE_TABLE(pci, tg3_pci_tbl);
244
245 static struct {
246         const char string[ETH_GSTRING_LEN];
247 } ethtool_stats_keys[TG3_NUM_STATS] = {
248         { "rx_octets" },
249         { "rx_fragments" },
250         { "rx_ucast_packets" },
251         { "rx_mcast_packets" },
252         { "rx_bcast_packets" },
253         { "rx_fcs_errors" },
254         { "rx_align_errors" },
255         { "rx_xon_pause_rcvd" },
256         { "rx_xoff_pause_rcvd" },
257         { "rx_mac_ctrl_rcvd" },
258         { "rx_xoff_entered" },
259         { "rx_frame_too_long_errors" },
260         { "rx_jabbers" },
261         { "rx_undersize_packets" },
262         { "rx_in_length_errors" },
263         { "rx_out_length_errors" },
264         { "rx_64_or_less_octet_packets" },
265         { "rx_65_to_127_octet_packets" },
266         { "rx_128_to_255_octet_packets" },
267         { "rx_256_to_511_octet_packets" },
268         { "rx_512_to_1023_octet_packets" },
269         { "rx_1024_to_1522_octet_packets" },
270         { "rx_1523_to_2047_octet_packets" },
271         { "rx_2048_to_4095_octet_packets" },
272         { "rx_4096_to_8191_octet_packets" },
273         { "rx_8192_to_9022_octet_packets" },
274
275         { "tx_octets" },
276         { "tx_collisions" },
277
278         { "tx_xon_sent" },
279         { "tx_xoff_sent" },
280         { "tx_flow_control" },
281         { "tx_mac_errors" },
282         { "tx_single_collisions" },
283         { "tx_mult_collisions" },
284         { "tx_deferred" },
285         { "tx_excessive_collisions" },
286         { "tx_late_collisions" },
287         { "tx_collide_2times" },
288         { "tx_collide_3times" },
289         { "tx_collide_4times" },
290         { "tx_collide_5times" },
291         { "tx_collide_6times" },
292         { "tx_collide_7times" },
293         { "tx_collide_8times" },
294         { "tx_collide_9times" },
295         { "tx_collide_10times" },
296         { "tx_collide_11times" },
297         { "tx_collide_12times" },
298         { "tx_collide_13times" },
299         { "tx_collide_14times" },
300         { "tx_collide_15times" },
301         { "tx_ucast_packets" },
302         { "tx_mcast_packets" },
303         { "tx_bcast_packets" },
304         { "tx_carrier_sense_errors" },
305         { "tx_discards" },
306         { "tx_errors" },
307
308         { "dma_writeq_full" },
309         { "dma_write_prioq_full" },
310         { "rxbds_empty" },
311         { "rx_discards" },
312         { "rx_errors" },
313         { "rx_threshold_hit" },
314
315         { "dma_readq_full" },
316         { "dma_read_prioq_full" },
317         { "tx_comp_queue_full" },
318
319         { "ring_set_send_prod_index" },
320         { "ring_status_update" },
321         { "nic_irqs" },
322         { "nic_avoided_irqs" },
323         { "nic_tx_threshold_hit" }
324 };
325
326 static struct {
327         const char string[ETH_GSTRING_LEN];
328 } ethtool_test_keys[TG3_NUM_TEST] = {
329         { "nvram test     (online) " },
330         { "link test      (online) " },
331         { "register test  (offline)" },
332         { "memory test    (offline)" },
333         { "loopback test  (offline)" },
334         { "interrupt test (offline)" },
335 };
336
337 static void tg3_write_indirect_reg32(struct tg3 *tp, u32 off, u32 val)
338 {
339         if ((tp->tg3_flags & TG3_FLAG_PCIX_TARGET_HWBUG) != 0) {
340                 spin_lock_bh(&tp->indirect_lock);
341                 pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off);
342                 pci_write_config_dword(tp->pdev, TG3PCI_REG_DATA, val);
343                 spin_unlock_bh(&tp->indirect_lock);
344         } else {
345                 writel(val, tp->regs + off);
346                 if ((tp->tg3_flags & TG3_FLAG_5701_REG_WRITE_BUG) != 0)
347                         readl(tp->regs + off);
348         }
349 }
350
351 static void _tw32_flush(struct tg3 *tp, u32 off, u32 val)
352 {
353         if ((tp->tg3_flags & TG3_FLAG_PCIX_TARGET_HWBUG) != 0) {
354                 spin_lock_bh(&tp->indirect_lock);
355                 pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off);
356                 pci_write_config_dword(tp->pdev, TG3PCI_REG_DATA, val);
357                 spin_unlock_bh(&tp->indirect_lock);
358         } else {
359                 void __iomem *dest = tp->regs + off;
360                 writel(val, dest);
361                 readl(dest);    /* always flush PCI write */
362         }
363 }
364
365 static inline void _tw32_rx_mbox(struct tg3 *tp, u32 off, u32 val)
366 {
367         void __iomem *mbox = tp->regs + off;
368         writel(val, mbox);
369         if (tp->tg3_flags & TG3_FLAG_MBOX_WRITE_REORDER)
370                 readl(mbox);
371 }
372
373 static inline void _tw32_tx_mbox(struct tg3 *tp, u32 off, u32 val)
374 {
375         void __iomem *mbox = tp->regs + off;
376         writel(val, mbox);
377         if (tp->tg3_flags & TG3_FLAG_TXD_MBOX_HWBUG)
378                 writel(val, mbox);
379         if (tp->tg3_flags & TG3_FLAG_MBOX_WRITE_REORDER)
380                 readl(mbox);
381 }
382
383 #define tw32_mailbox(reg, val)  writel(((val) & 0xffffffff), tp->regs + (reg))
384 #define tw32_rx_mbox(reg, val)  _tw32_rx_mbox(tp, reg, val)
385 #define tw32_tx_mbox(reg, val)  _tw32_tx_mbox(tp, reg, val)
386
387 #define tw32(reg,val)           tg3_write_indirect_reg32(tp,(reg),(val))
388 #define tw32_f(reg,val)         _tw32_flush(tp,(reg),(val))
389 #define tw16(reg,val)           writew(((val) & 0xffff), tp->regs + (reg))
390 #define tw8(reg,val)            writeb(((val) & 0xff), tp->regs + (reg))
391 #define tr32(reg)               readl(tp->regs + (reg))
392 #define tr16(reg)               readw(tp->regs + (reg))
393 #define tr8(reg)                readb(tp->regs + (reg))
394
395 static void tg3_write_mem(struct tg3 *tp, u32 off, u32 val)
396 {
397         spin_lock_bh(&tp->indirect_lock);
398         pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off);
399         pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
400
401         /* Always leave this as zero. */
402         pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
403         spin_unlock_bh(&tp->indirect_lock);
404 }
405
406 static void tg3_read_mem(struct tg3 *tp, u32 off, u32 *val)
407 {
408         spin_lock_bh(&tp->indirect_lock);
409         pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off);
410         pci_read_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
411
412         /* Always leave this as zero. */
413         pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
414         spin_unlock_bh(&tp->indirect_lock);
415 }
416
417 static void tg3_disable_ints(struct tg3 *tp)
418 {
419         tw32(TG3PCI_MISC_HOST_CTRL,
420              (tp->misc_host_ctrl | MISC_HOST_CTRL_MASK_PCI_INT));
421         tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
422         tr32(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW);
423 }
424
425 static inline void tg3_cond_int(struct tg3 *tp)
426 {
427         if (tp->hw_status->status & SD_STATUS_UPDATED)
428                 tw32(GRC_LOCAL_CTRL, tp->grc_local_ctrl | GRC_LCLCTRL_SETINT);
429 }
430
431 static void tg3_enable_ints(struct tg3 *tp)
432 {
433         tp->irq_sync = 0;
434         wmb();
435
436         tw32(TG3PCI_MISC_HOST_CTRL,
437              (tp->misc_host_ctrl & ~MISC_HOST_CTRL_MASK_PCI_INT));
438         tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
439                      (tp->last_tag << 24));
440         tr32(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW);
441         tg3_cond_int(tp);
442 }
443
444 static inline unsigned int tg3_has_work(struct tg3 *tp)
445 {
446         struct tg3_hw_status *sblk = tp->hw_status;
447         unsigned int work_exists = 0;
448
449         /* check for phy events */
450         if (!(tp->tg3_flags &
451               (TG3_FLAG_USE_LINKCHG_REG |
452                TG3_FLAG_POLL_SERDES))) {
453                 if (sblk->status & SD_STATUS_LINK_CHG)
454                         work_exists = 1;
455         }
456         /* check for RX/TX work to do */
457         if (sblk->idx[0].tx_consumer != tp->tx_cons ||
458             sblk->idx[0].rx_producer != tp->rx_rcb_ptr)
459                 work_exists = 1;
460
461         return work_exists;
462 }
463
464 /* tg3_restart_ints
465  *  similar to tg3_enable_ints, but it accurately determines whether there
466  *  is new work pending and can return without flushing the PIO write
467  *  which reenables interrupts 
468  */
469 static void tg3_restart_ints(struct tg3 *tp)
470 {
471         tw32(TG3PCI_MISC_HOST_CTRL,
472                 (tp->misc_host_ctrl & ~MISC_HOST_CTRL_MASK_PCI_INT));
473         tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
474                      tp->last_tag << 24);
475         mmiowb();
476
477         /* When doing tagged status, this work check is unnecessary.
478          * The last_tag we write above tells the chip which piece of
479          * work we've completed.
480          */
481         if (!(tp->tg3_flags & TG3_FLAG_TAGGED_STATUS) &&
482             tg3_has_work(tp))
483                 tw32(HOSTCC_MODE, tp->coalesce_mode |
484                      (HOSTCC_MODE_ENABLE | HOSTCC_MODE_NOW));
485 }
486
487 static inline void tg3_netif_stop(struct tg3 *tp)
488 {
489         tp->dev->trans_start = jiffies; /* prevent tx timeout */
490         netif_poll_disable(tp->dev);
491         netif_tx_disable(tp->dev);
492 }
493
494 static inline void tg3_netif_start(struct tg3 *tp)
495 {
496         netif_wake_queue(tp->dev);
497         /* NOTE: unconditional netif_wake_queue is only appropriate
498          * so long as all callers are assured to have free tx slots
499          * (such as after tg3_init_hw)
500          */
501         netif_poll_enable(tp->dev);
502         tp->hw_status->status |= SD_STATUS_UPDATED;
503         tg3_enable_ints(tp);
504 }
505
506 static void tg3_switch_clocks(struct tg3 *tp)
507 {
508         u32 clock_ctrl = tr32(TG3PCI_CLOCK_CTRL);
509         u32 orig_clock_ctrl;
510
511         orig_clock_ctrl = clock_ctrl;
512         clock_ctrl &= (CLOCK_CTRL_FORCE_CLKRUN |
513                        CLOCK_CTRL_CLKRUN_OENABLE |
514                        0x1f);
515         tp->pci_clock_ctrl = clock_ctrl;
516
517         if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
518                 if (orig_clock_ctrl & CLOCK_CTRL_625_CORE) {
519                         tw32_f(TG3PCI_CLOCK_CTRL,
520                                clock_ctrl | CLOCK_CTRL_625_CORE);
521                         udelay(40);
522                 }
523         } else if ((orig_clock_ctrl & CLOCK_CTRL_44MHZ_CORE) != 0) {
524                 tw32_f(TG3PCI_CLOCK_CTRL,
525                      clock_ctrl |
526                      (CLOCK_CTRL_44MHZ_CORE | CLOCK_CTRL_ALTCLK));
527                 udelay(40);
528                 tw32_f(TG3PCI_CLOCK_CTRL,
529                      clock_ctrl | (CLOCK_CTRL_ALTCLK));
530                 udelay(40);
531         }
532         tw32_f(TG3PCI_CLOCK_CTRL, clock_ctrl);
533         udelay(40);
534 }
535
536 #define PHY_BUSY_LOOPS  5000
537
538 static int tg3_readphy(struct tg3 *tp, int reg, u32 *val)
539 {
540         u32 frame_val;
541         unsigned int loops;
542         int ret;
543
544         if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
545                 tw32_f(MAC_MI_MODE,
546                      (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
547                 udelay(80);
548         }
549
550         *val = 0x0;
551
552         frame_val  = ((PHY_ADDR << MI_COM_PHY_ADDR_SHIFT) &
553                       MI_COM_PHY_ADDR_MASK);
554         frame_val |= ((reg << MI_COM_REG_ADDR_SHIFT) &
555                       MI_COM_REG_ADDR_MASK);
556         frame_val |= (MI_COM_CMD_READ | MI_COM_START);
557         
558         tw32_f(MAC_MI_COM, frame_val);
559
560         loops = PHY_BUSY_LOOPS;
561         while (loops != 0) {
562                 udelay(10);
563                 frame_val = tr32(MAC_MI_COM);
564
565                 if ((frame_val & MI_COM_BUSY) == 0) {
566                         udelay(5);
567                         frame_val = tr32(MAC_MI_COM);
568                         break;
569                 }
570                 loops -= 1;
571         }
572
573         ret = -EBUSY;
574         if (loops != 0) {
575                 *val = frame_val & MI_COM_DATA_MASK;
576                 ret = 0;
577         }
578
579         if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
580                 tw32_f(MAC_MI_MODE, tp->mi_mode);
581                 udelay(80);
582         }
583
584         return ret;
585 }
586
587 static int tg3_writephy(struct tg3 *tp, int reg, u32 val)
588 {
589         u32 frame_val;
590         unsigned int loops;
591         int ret;
592
593         if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
594                 tw32_f(MAC_MI_MODE,
595                      (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
596                 udelay(80);
597         }
598
599         frame_val  = ((PHY_ADDR << MI_COM_PHY_ADDR_SHIFT) &
600                       MI_COM_PHY_ADDR_MASK);
601         frame_val |= ((reg << MI_COM_REG_ADDR_SHIFT) &
602                       MI_COM_REG_ADDR_MASK);
603         frame_val |= (val & MI_COM_DATA_MASK);
604         frame_val |= (MI_COM_CMD_WRITE | MI_COM_START);
605         
606         tw32_f(MAC_MI_COM, frame_val);
607
608         loops = PHY_BUSY_LOOPS;
609         while (loops != 0) {
610                 udelay(10);
611                 frame_val = tr32(MAC_MI_COM);
612                 if ((frame_val & MI_COM_BUSY) == 0) {
613                         udelay(5);
614                         frame_val = tr32(MAC_MI_COM);
615                         break;
616                 }
617                 loops -= 1;
618         }
619
620         ret = -EBUSY;
621         if (loops != 0)
622                 ret = 0;
623
624         if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
625                 tw32_f(MAC_MI_MODE, tp->mi_mode);
626                 udelay(80);
627         }
628
629         return ret;
630 }
631
632 static void tg3_phy_set_wirespeed(struct tg3 *tp)
633 {
634         u32 val;
635
636         if (tp->tg3_flags2 & TG3_FLG2_NO_ETH_WIRE_SPEED)
637                 return;
638
639         if (!tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x7007) &&
640             !tg3_readphy(tp, MII_TG3_AUX_CTRL, &val))
641                 tg3_writephy(tp, MII_TG3_AUX_CTRL,
642                              (val | (1 << 15) | (1 << 4)));
643 }
644
645 static int tg3_bmcr_reset(struct tg3 *tp)
646 {
647         u32 phy_control;
648         int limit, err;
649
650         /* OK, reset it, and poll the BMCR_RESET bit until it
651          * clears or we time out.
652          */
653         phy_control = BMCR_RESET;
654         err = tg3_writephy(tp, MII_BMCR, phy_control);
655         if (err != 0)
656                 return -EBUSY;
657
658         limit = 5000;
659         while (limit--) {
660                 err = tg3_readphy(tp, MII_BMCR, &phy_control);
661                 if (err != 0)
662                         return -EBUSY;
663
664                 if ((phy_control & BMCR_RESET) == 0) {
665                         udelay(40);
666                         break;
667                 }
668                 udelay(10);
669         }
670         if (limit <= 0)
671                 return -EBUSY;
672
673         return 0;
674 }
675
676 static int tg3_wait_macro_done(struct tg3 *tp)
677 {
678         int limit = 100;
679
680         while (limit--) {
681                 u32 tmp32;
682
683                 if (!tg3_readphy(tp, 0x16, &tmp32)) {
684                         if ((tmp32 & 0x1000) == 0)
685                                 break;
686                 }
687         }
688         if (limit <= 0)
689                 return -EBUSY;
690
691         return 0;
692 }
693
694 static int tg3_phy_write_and_check_testpat(struct tg3 *tp, int *resetp)
695 {
696         static const u32 test_pat[4][6] = {
697         { 0x00005555, 0x00000005, 0x00002aaa, 0x0000000a, 0x00003456, 0x00000003 },
698         { 0x00002aaa, 0x0000000a, 0x00003333, 0x00000003, 0x0000789a, 0x00000005 },
699         { 0x00005a5a, 0x00000005, 0x00002a6a, 0x0000000a, 0x00001bcd, 0x00000003 },
700         { 0x00002a5a, 0x0000000a, 0x000033c3, 0x00000003, 0x00002ef1, 0x00000005 }
701         };
702         int chan;
703
704         for (chan = 0; chan < 4; chan++) {
705                 int i;
706
707                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
708                              (chan * 0x2000) | 0x0200);
709                 tg3_writephy(tp, 0x16, 0x0002);
710
711                 for (i = 0; i < 6; i++)
712                         tg3_writephy(tp, MII_TG3_DSP_RW_PORT,
713                                      test_pat[chan][i]);
714
715                 tg3_writephy(tp, 0x16, 0x0202);
716                 if (tg3_wait_macro_done(tp)) {
717                         *resetp = 1;
718                         return -EBUSY;
719                 }
720
721                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
722                              (chan * 0x2000) | 0x0200);
723                 tg3_writephy(tp, 0x16, 0x0082);
724                 if (tg3_wait_macro_done(tp)) {
725                         *resetp = 1;
726                         return -EBUSY;
727                 }
728
729                 tg3_writephy(tp, 0x16, 0x0802);
730                 if (tg3_wait_macro_done(tp)) {
731                         *resetp = 1;
732                         return -EBUSY;
733                 }
734
735                 for (i = 0; i < 6; i += 2) {
736                         u32 low, high;
737
738                         if (tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &low) ||
739                             tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &high) ||
740                             tg3_wait_macro_done(tp)) {
741                                 *resetp = 1;
742                                 return -EBUSY;
743                         }
744                         low &= 0x7fff;
745                         high &= 0x000f;
746                         if (low != test_pat[chan][i] ||
747                             high != test_pat[chan][i+1]) {
748                                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000b);
749                                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x4001);
750                                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x4005);
751
752                                 return -EBUSY;
753                         }
754                 }
755         }
756
757         return 0;
758 }
759
760 static int tg3_phy_reset_chanpat(struct tg3 *tp)
761 {
762         int chan;
763
764         for (chan = 0; chan < 4; chan++) {
765                 int i;
766
767                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
768                              (chan * 0x2000) | 0x0200);
769                 tg3_writephy(tp, 0x16, 0x0002);
770                 for (i = 0; i < 6; i++)
771                         tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x000);
772                 tg3_writephy(tp, 0x16, 0x0202);
773                 if (tg3_wait_macro_done(tp))
774                         return -EBUSY;
775         }
776
777         return 0;
778 }
779
780 static int tg3_phy_reset_5703_4_5(struct tg3 *tp)
781 {
782         u32 reg32, phy9_orig;
783         int retries, do_phy_reset, err;
784
785         retries = 10;
786         do_phy_reset = 1;
787         do {
788                 if (do_phy_reset) {
789                         err = tg3_bmcr_reset(tp);
790                         if (err)
791                                 return err;
792                         do_phy_reset = 0;
793                 }
794
795                 /* Disable transmitter and interrupt.  */
796                 if (tg3_readphy(tp, MII_TG3_EXT_CTRL, &reg32))
797                         continue;
798
799                 reg32 |= 0x3000;
800                 tg3_writephy(tp, MII_TG3_EXT_CTRL, reg32);
801
802                 /* Set full-duplex, 1000 mbps.  */
803                 tg3_writephy(tp, MII_BMCR,
804                              BMCR_FULLDPLX | TG3_BMCR_SPEED1000);
805
806                 /* Set to master mode.  */
807                 if (tg3_readphy(tp, MII_TG3_CTRL, &phy9_orig))
808                         continue;
809
810                 tg3_writephy(tp, MII_TG3_CTRL,
811                              (MII_TG3_CTRL_AS_MASTER |
812                               MII_TG3_CTRL_ENABLE_AS_MASTER));
813
814                 /* Enable SM_DSP_CLOCK and 6dB.  */
815                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0c00);
816
817                 /* Block the PHY control access.  */
818                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8005);
819                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0800);
820
821                 err = tg3_phy_write_and_check_testpat(tp, &do_phy_reset);
822                 if (!err)
823                         break;
824         } while (--retries);
825
826         err = tg3_phy_reset_chanpat(tp);
827         if (err)
828                 return err;
829
830         tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8005);
831         tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0000);
832
833         tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8200);
834         tg3_writephy(tp, 0x16, 0x0000);
835
836         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
837             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
838                 /* Set Extended packet length bit for jumbo frames */
839                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x4400);
840         }
841         else {
842                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0400);
843         }
844
845         tg3_writephy(tp, MII_TG3_CTRL, phy9_orig);
846
847         if (!tg3_readphy(tp, MII_TG3_EXT_CTRL, &reg32)) {
848                 reg32 &= ~0x3000;
849                 tg3_writephy(tp, MII_TG3_EXT_CTRL, reg32);
850         } else if (!err)
851                 err = -EBUSY;
852
853         return err;
854 }
855
856 /* This will reset the tigon3 PHY if there is no valid
857  * link unless the FORCE argument is non-zero.
858  */
859 static int tg3_phy_reset(struct tg3 *tp)
860 {
861         u32 phy_status;
862         int err;
863
864         err  = tg3_readphy(tp, MII_BMSR, &phy_status);
865         err |= tg3_readphy(tp, MII_BMSR, &phy_status);
866         if (err != 0)
867                 return -EBUSY;
868
869         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
870             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
871             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
872                 err = tg3_phy_reset_5703_4_5(tp);
873                 if (err)
874                         return err;
875                 goto out;
876         }
877
878         err = tg3_bmcr_reset(tp);
879         if (err)
880                 return err;
881
882 out:
883         if (tp->tg3_flags2 & TG3_FLG2_PHY_ADC_BUG) {
884                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0c00);
885                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x201f);
886                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x2aaa);
887                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000a);
888                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0323);
889                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0400);
890         }
891         if (tp->tg3_flags2 & TG3_FLG2_PHY_5704_A0_BUG) {
892                 tg3_writephy(tp, 0x1c, 0x8d68);
893                 tg3_writephy(tp, 0x1c, 0x8d68);
894         }
895         if (tp->tg3_flags2 & TG3_FLG2_PHY_BER_BUG) {
896                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0c00);
897                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000a);
898                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x310b);
899                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x201f);
900                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x9506);
901                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x401f);
902                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x14e2);
903                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0400);
904         }
905         /* Set Extended packet length bit (bit 14) on all chips that */
906         /* support jumbo frames */
907         if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5401) {
908                 /* Cannot do read-modify-write on 5401 */
909                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x4c20);
910         } else if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
911                 u32 phy_reg;
912
913                 /* Set bit 14 with read-modify-write to preserve other bits */
914                 if (!tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0007) &&
915                     !tg3_readphy(tp, MII_TG3_AUX_CTRL, &phy_reg))
916                         tg3_writephy(tp, MII_TG3_AUX_CTRL, phy_reg | 0x4000);
917         }
918
919         /* Set phy register 0x10 bit 0 to high fifo elasticity to support
920          * jumbo frames transmission.
921          */
922         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
923                 u32 phy_reg;
924
925                 if (!tg3_readphy(tp, MII_TG3_EXT_CTRL, &phy_reg))
926                     tg3_writephy(tp, MII_TG3_EXT_CTRL,
927                                  phy_reg | MII_TG3_EXT_CTRL_FIFO_ELASTIC);
928         }
929
930         tg3_phy_set_wirespeed(tp);
931         return 0;
932 }
933
934 static void tg3_frob_aux_power(struct tg3 *tp)
935 {
936         struct tg3 *tp_peer = tp;
937
938         if ((tp->tg3_flags & TG3_FLAG_EEPROM_WRITE_PROT) != 0)
939                 return;
940
941         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
942                 tp_peer = pci_get_drvdata(tp->pdev_peer);
943                 if (!tp_peer)
944                         BUG();
945         }
946
947
948         if ((tp->tg3_flags & TG3_FLAG_WOL_ENABLE) != 0 ||
949             (tp_peer->tg3_flags & TG3_FLAG_WOL_ENABLE) != 0) {
950                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
951                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
952                         tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
953                              (GRC_LCLCTRL_GPIO_OE0 |
954                               GRC_LCLCTRL_GPIO_OE1 |
955                               GRC_LCLCTRL_GPIO_OE2 |
956                               GRC_LCLCTRL_GPIO_OUTPUT0 |
957                               GRC_LCLCTRL_GPIO_OUTPUT1));
958                         udelay(100);
959                 } else {
960                         u32 no_gpio2;
961                         u32 grc_local_ctrl;
962
963                         if (tp_peer != tp &&
964                             (tp_peer->tg3_flags & TG3_FLAG_INIT_COMPLETE) != 0)
965                                 return;
966
967                         /* On 5753 and variants, GPIO2 cannot be used. */
968                         no_gpio2 = tp->nic_sram_data_cfg &
969                                     NIC_SRAM_DATA_CFG_NO_GPIO2;
970
971                         grc_local_ctrl = GRC_LCLCTRL_GPIO_OE0 |
972                                          GRC_LCLCTRL_GPIO_OE1 |
973                                          GRC_LCLCTRL_GPIO_OE2 |
974                                          GRC_LCLCTRL_GPIO_OUTPUT1 |
975                                          GRC_LCLCTRL_GPIO_OUTPUT2;
976                         if (no_gpio2) {
977                                 grc_local_ctrl &= ~(GRC_LCLCTRL_GPIO_OE2 |
978                                                     GRC_LCLCTRL_GPIO_OUTPUT2);
979                         }
980                         tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
981                                                 grc_local_ctrl);
982                         udelay(100);
983
984                         grc_local_ctrl |= GRC_LCLCTRL_GPIO_OUTPUT0;
985
986                         tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
987                                                 grc_local_ctrl);
988                         udelay(100);
989
990                         if (!no_gpio2) {
991                                 grc_local_ctrl &= ~GRC_LCLCTRL_GPIO_OUTPUT2;
992                                 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
993                                        grc_local_ctrl);
994                                 udelay(100);
995                         }
996                 }
997         } else {
998                 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
999                     GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701) {
1000                         if (tp_peer != tp &&
1001                             (tp_peer->tg3_flags & TG3_FLAG_INIT_COMPLETE) != 0)
1002                                 return;
1003
1004                         tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1005                              (GRC_LCLCTRL_GPIO_OE1 |
1006                               GRC_LCLCTRL_GPIO_OUTPUT1));
1007                         udelay(100);
1008
1009                         tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1010                              (GRC_LCLCTRL_GPIO_OE1));
1011                         udelay(100);
1012
1013                         tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1014                              (GRC_LCLCTRL_GPIO_OE1 |
1015                               GRC_LCLCTRL_GPIO_OUTPUT1));
1016                         udelay(100);
1017                 }
1018         }
1019 }
1020
1021 static int tg3_setup_phy(struct tg3 *, int);
1022
1023 #define RESET_KIND_SHUTDOWN     0
1024 #define RESET_KIND_INIT         1
1025 #define RESET_KIND_SUSPEND      2
1026
1027 static void tg3_write_sig_post_reset(struct tg3 *, int);
1028 static int tg3_halt_cpu(struct tg3 *, u32);
1029
1030 static int tg3_set_power_state(struct tg3 *tp, int state)
1031 {
1032         u32 misc_host_ctrl;
1033         u16 power_control, power_caps;
1034         int pm = tp->pm_cap;
1035
1036         /* Make sure register accesses (indirect or otherwise)
1037          * will function correctly.
1038          */
1039         pci_write_config_dword(tp->pdev,
1040                                TG3PCI_MISC_HOST_CTRL,
1041                                tp->misc_host_ctrl);
1042
1043         pci_read_config_word(tp->pdev,
1044                              pm + PCI_PM_CTRL,
1045                              &power_control);
1046         power_control |= PCI_PM_CTRL_PME_STATUS;
1047         power_control &= ~(PCI_PM_CTRL_STATE_MASK);
1048         switch (state) {
1049         case 0:
1050                 power_control |= 0;
1051                 pci_write_config_word(tp->pdev,
1052                                       pm + PCI_PM_CTRL,
1053                                       power_control);
1054                 udelay(100);    /* Delay after power state change */
1055
1056                 /* Switch out of Vaux if it is not a LOM */
1057                 if (!(tp->tg3_flags & TG3_FLAG_EEPROM_WRITE_PROT)) {
1058                         tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
1059                         udelay(100);
1060                 }
1061
1062                 return 0;
1063
1064         case 1:
1065                 power_control |= 1;
1066                 break;
1067
1068         case 2:
1069                 power_control |= 2;
1070                 break;
1071
1072         case 3:
1073                 power_control |= 3;
1074                 break;
1075
1076         default:
1077                 printk(KERN_WARNING PFX "%s: Invalid power state (%d) "
1078                        "requested.\n",
1079                        tp->dev->name, state);
1080                 return -EINVAL;
1081         };
1082
1083         power_control |= PCI_PM_CTRL_PME_ENABLE;
1084
1085         misc_host_ctrl = tr32(TG3PCI_MISC_HOST_CTRL);
1086         tw32(TG3PCI_MISC_HOST_CTRL,
1087              misc_host_ctrl | MISC_HOST_CTRL_MASK_PCI_INT);
1088
1089         if (tp->link_config.phy_is_low_power == 0) {
1090                 tp->link_config.phy_is_low_power = 1;
1091                 tp->link_config.orig_speed = tp->link_config.speed;
1092                 tp->link_config.orig_duplex = tp->link_config.duplex;
1093                 tp->link_config.orig_autoneg = tp->link_config.autoneg;
1094         }
1095
1096         if (!(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)) {
1097                 tp->link_config.speed = SPEED_10;
1098                 tp->link_config.duplex = DUPLEX_HALF;
1099                 tp->link_config.autoneg = AUTONEG_ENABLE;
1100                 tg3_setup_phy(tp, 0);
1101         }
1102
1103         pci_read_config_word(tp->pdev, pm + PCI_PM_PMC, &power_caps);
1104
1105         if (tp->tg3_flags & TG3_FLAG_WOL_ENABLE) {
1106                 u32 mac_mode;
1107
1108                 if (!(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)) {
1109                         tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x5a);
1110                         udelay(40);
1111
1112                         mac_mode = MAC_MODE_PORT_MODE_MII;
1113
1114                         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 ||
1115                             !(tp->tg3_flags & TG3_FLAG_WOL_SPEED_100MB))
1116                                 mac_mode |= MAC_MODE_LINK_POLARITY;
1117                 } else {
1118                         mac_mode = MAC_MODE_PORT_MODE_TBI;
1119                 }
1120
1121                 if (!(tp->tg3_flags2 & TG3_FLG2_5750_PLUS))
1122                         tw32(MAC_LED_CTRL, tp->led_ctrl);
1123
1124                 if (((power_caps & PCI_PM_CAP_PME_D3cold) &&
1125                      (tp->tg3_flags & TG3_FLAG_WOL_ENABLE)))
1126                         mac_mode |= MAC_MODE_MAGIC_PKT_ENABLE;
1127
1128                 tw32_f(MAC_MODE, mac_mode);
1129                 udelay(100);
1130
1131                 tw32_f(MAC_RX_MODE, RX_MODE_ENABLE);
1132                 udelay(10);
1133         }
1134
1135         if (!(tp->tg3_flags & TG3_FLAG_WOL_SPEED_100MB) &&
1136             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
1137              GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)) {
1138                 u32 base_val;
1139
1140                 base_val = tp->pci_clock_ctrl;
1141                 base_val |= (CLOCK_CTRL_RXCLK_DISABLE |
1142                              CLOCK_CTRL_TXCLK_DISABLE);
1143
1144                 tw32_f(TG3PCI_CLOCK_CTRL, base_val |
1145                      CLOCK_CTRL_ALTCLK |
1146                      CLOCK_CTRL_PWRDOWN_PLL133);
1147                 udelay(40);
1148         } else if (!((tp->tg3_flags2 & TG3_FLG2_5750_PLUS) &&
1149                      (tp->tg3_flags & TG3_FLAG_ENABLE_ASF))) {
1150                 u32 newbits1, newbits2;
1151
1152                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
1153                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
1154                         newbits1 = (CLOCK_CTRL_RXCLK_DISABLE |
1155                                     CLOCK_CTRL_TXCLK_DISABLE |
1156                                     CLOCK_CTRL_ALTCLK);
1157                         newbits2 = newbits1 | CLOCK_CTRL_44MHZ_CORE;
1158                 } else if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
1159                         newbits1 = CLOCK_CTRL_625_CORE;
1160                         newbits2 = newbits1 | CLOCK_CTRL_ALTCLK;
1161                 } else {
1162                         newbits1 = CLOCK_CTRL_ALTCLK;
1163                         newbits2 = newbits1 | CLOCK_CTRL_44MHZ_CORE;
1164                 }
1165
1166                 tw32_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl | newbits1);
1167                 udelay(40);
1168
1169                 tw32_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl | newbits2);
1170                 udelay(40);
1171
1172                 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
1173                         u32 newbits3;
1174
1175                         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
1176                             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
1177                                 newbits3 = (CLOCK_CTRL_RXCLK_DISABLE |
1178                                             CLOCK_CTRL_TXCLK_DISABLE |
1179                                             CLOCK_CTRL_44MHZ_CORE);
1180                         } else {
1181                                 newbits3 = CLOCK_CTRL_44MHZ_CORE;
1182                         }
1183
1184                         tw32_f(TG3PCI_CLOCK_CTRL,
1185                                          tp->pci_clock_ctrl | newbits3);
1186                         udelay(40);
1187                 }
1188         }
1189
1190         tg3_frob_aux_power(tp);
1191
1192         /* Workaround for unstable PLL clock */
1193         if ((GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_AX) ||
1194             (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_BX)) {
1195                 u32 val = tr32(0x7d00);
1196
1197                 val &= ~((1 << 16) | (1 << 4) | (1 << 2) | (1 << 1) | 1);
1198                 tw32(0x7d00, val);
1199                 if (!(tp->tg3_flags & TG3_FLAG_ENABLE_ASF))
1200                         tg3_halt_cpu(tp, RX_CPU_BASE);
1201         }
1202
1203         /* Finally, set the new power state. */
1204         pci_write_config_word(tp->pdev, pm + PCI_PM_CTRL, power_control);
1205         udelay(100);    /* Delay after power state change */
1206
1207         tg3_write_sig_post_reset(tp, RESET_KIND_SHUTDOWN);
1208
1209         return 0;
1210 }
1211
1212 static void tg3_link_report(struct tg3 *tp)
1213 {
1214         if (!netif_carrier_ok(tp->dev)) {
1215                 printk(KERN_INFO PFX "%s: Link is down.\n", tp->dev->name);
1216         } else {
1217                 printk(KERN_INFO PFX "%s: Link is up at %d Mbps, %s duplex.\n",
1218                        tp->dev->name,
1219                        (tp->link_config.active_speed == SPEED_1000 ?
1220                         1000 :
1221                         (tp->link_config.active_speed == SPEED_100 ?
1222                          100 : 10)),
1223                        (tp->link_config.active_duplex == DUPLEX_FULL ?
1224                         "full" : "half"));
1225
1226                 printk(KERN_INFO PFX "%s: Flow control is %s for TX and "
1227                        "%s for RX.\n",
1228                        tp->dev->name,
1229                        (tp->tg3_flags & TG3_FLAG_TX_PAUSE) ? "on" : "off",
1230                        (tp->tg3_flags & TG3_FLAG_RX_PAUSE) ? "on" : "off");
1231         }
1232 }
1233
1234 static void tg3_setup_flow_control(struct tg3 *tp, u32 local_adv, u32 remote_adv)
1235 {
1236         u32 new_tg3_flags = 0;
1237         u32 old_rx_mode = tp->rx_mode;
1238         u32 old_tx_mode = tp->tx_mode;
1239
1240         if (tp->tg3_flags & TG3_FLAG_PAUSE_AUTONEG) {
1241                 if (local_adv & ADVERTISE_PAUSE_CAP) {
1242                         if (local_adv & ADVERTISE_PAUSE_ASYM) {
1243                                 if (remote_adv & LPA_PAUSE_CAP)
1244                                         new_tg3_flags |=
1245                                                 (TG3_FLAG_RX_PAUSE |
1246                                                 TG3_FLAG_TX_PAUSE);
1247                                 else if (remote_adv & LPA_PAUSE_ASYM)
1248                                         new_tg3_flags |=
1249                                                 (TG3_FLAG_RX_PAUSE);
1250                         } else {
1251                                 if (remote_adv & LPA_PAUSE_CAP)
1252                                         new_tg3_flags |=
1253                                                 (TG3_FLAG_RX_PAUSE |
1254                                                 TG3_FLAG_TX_PAUSE);
1255                         }
1256                 } else if (local_adv & ADVERTISE_PAUSE_ASYM) {
1257                         if ((remote_adv & LPA_PAUSE_CAP) &&
1258                         (remote_adv & LPA_PAUSE_ASYM))
1259                                 new_tg3_flags |= TG3_FLAG_TX_PAUSE;
1260                 }
1261
1262                 tp->tg3_flags &= ~(TG3_FLAG_RX_PAUSE | TG3_FLAG_TX_PAUSE);
1263                 tp->tg3_flags |= new_tg3_flags;
1264         } else {
1265                 new_tg3_flags = tp->tg3_flags;
1266         }
1267
1268         if (new_tg3_flags & TG3_FLAG_RX_PAUSE)
1269                 tp->rx_mode |= RX_MODE_FLOW_CTRL_ENABLE;
1270         else
1271                 tp->rx_mode &= ~RX_MODE_FLOW_CTRL_ENABLE;
1272
1273         if (old_rx_mode != tp->rx_mode) {
1274                 tw32_f(MAC_RX_MODE, tp->rx_mode);
1275         }
1276         
1277         if (new_tg3_flags & TG3_FLAG_TX_PAUSE)
1278                 tp->tx_mode |= TX_MODE_FLOW_CTRL_ENABLE;
1279         else
1280                 tp->tx_mode &= ~TX_MODE_FLOW_CTRL_ENABLE;
1281
1282         if (old_tx_mode != tp->tx_mode) {
1283                 tw32_f(MAC_TX_MODE, tp->tx_mode);
1284         }
1285 }
1286
1287 static void tg3_aux_stat_to_speed_duplex(struct tg3 *tp, u32 val, u16 *speed, u8 *duplex)
1288 {
1289         switch (val & MII_TG3_AUX_STAT_SPDMASK) {
1290         case MII_TG3_AUX_STAT_10HALF:
1291                 *speed = SPEED_10;
1292                 *duplex = DUPLEX_HALF;
1293                 break;
1294
1295         case MII_TG3_AUX_STAT_10FULL:
1296                 *speed = SPEED_10;
1297                 *duplex = DUPLEX_FULL;
1298                 break;
1299
1300         case MII_TG3_AUX_STAT_100HALF:
1301                 *speed = SPEED_100;
1302                 *duplex = DUPLEX_HALF;
1303                 break;
1304
1305         case MII_TG3_AUX_STAT_100FULL:
1306                 *speed = SPEED_100;
1307                 *duplex = DUPLEX_FULL;
1308                 break;
1309
1310         case MII_TG3_AUX_STAT_1000HALF:
1311                 *speed = SPEED_1000;
1312                 *duplex = DUPLEX_HALF;
1313                 break;
1314
1315         case MII_TG3_AUX_STAT_1000FULL:
1316                 *speed = SPEED_1000;
1317                 *duplex = DUPLEX_FULL;
1318                 break;
1319
1320         default:
1321                 *speed = SPEED_INVALID;
1322                 *duplex = DUPLEX_INVALID;
1323                 break;
1324         };
1325 }
1326
1327 static void tg3_phy_copper_begin(struct tg3 *tp)
1328 {
1329         u32 new_adv;
1330         int i;
1331
1332         if (tp->link_config.phy_is_low_power) {
1333                 /* Entering low power mode.  Disable gigabit and
1334                  * 100baseT advertisements.
1335                  */
1336                 tg3_writephy(tp, MII_TG3_CTRL, 0);
1337
1338                 new_adv = (ADVERTISE_10HALF | ADVERTISE_10FULL |
1339                            ADVERTISE_CSMA | ADVERTISE_PAUSE_CAP);
1340                 if (tp->tg3_flags & TG3_FLAG_WOL_SPEED_100MB)
1341                         new_adv |= (ADVERTISE_100HALF | ADVERTISE_100FULL);
1342
1343                 tg3_writephy(tp, MII_ADVERTISE, new_adv);
1344         } else if (tp->link_config.speed == SPEED_INVALID) {
1345                 tp->link_config.advertising =
1346                         (ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full |
1347                          ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full |
1348                          ADVERTISED_1000baseT_Half | ADVERTISED_1000baseT_Full |
1349                          ADVERTISED_Autoneg | ADVERTISED_MII);
1350
1351                 if (tp->tg3_flags & TG3_FLAG_10_100_ONLY)
1352                         tp->link_config.advertising &=
1353                                 ~(ADVERTISED_1000baseT_Half |
1354                                   ADVERTISED_1000baseT_Full);
1355
1356                 new_adv = (ADVERTISE_CSMA | ADVERTISE_PAUSE_CAP);
1357                 if (tp->link_config.advertising & ADVERTISED_10baseT_Half)
1358                         new_adv |= ADVERTISE_10HALF;
1359                 if (tp->link_config.advertising & ADVERTISED_10baseT_Full)
1360                         new_adv |= ADVERTISE_10FULL;
1361                 if (tp->link_config.advertising & ADVERTISED_100baseT_Half)
1362                         new_adv |= ADVERTISE_100HALF;
1363                 if (tp->link_config.advertising & ADVERTISED_100baseT_Full)
1364                         new_adv |= ADVERTISE_100FULL;
1365                 tg3_writephy(tp, MII_ADVERTISE, new_adv);
1366
1367                 if (tp->link_config.advertising &
1368                     (ADVERTISED_1000baseT_Half | ADVERTISED_1000baseT_Full)) {
1369                         new_adv = 0;
1370                         if (tp->link_config.advertising & ADVERTISED_1000baseT_Half)
1371                                 new_adv |= MII_TG3_CTRL_ADV_1000_HALF;
1372                         if (tp->link_config.advertising & ADVERTISED_1000baseT_Full)
1373                                 new_adv |= MII_TG3_CTRL_ADV_1000_FULL;
1374                         if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY) &&
1375                             (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
1376                              tp->pci_chip_rev_id == CHIPREV_ID_5701_B0))
1377                                 new_adv |= (MII_TG3_CTRL_AS_MASTER |
1378                                             MII_TG3_CTRL_ENABLE_AS_MASTER);
1379                         tg3_writephy(tp, MII_TG3_CTRL, new_adv);
1380                 } else {
1381                         tg3_writephy(tp, MII_TG3_CTRL, 0);
1382                 }
1383         } else {
1384                 /* Asking for a specific link mode. */
1385                 if (tp->link_config.speed == SPEED_1000) {
1386                         new_adv = ADVERTISE_CSMA | ADVERTISE_PAUSE_CAP;
1387                         tg3_writephy(tp, MII_ADVERTISE, new_adv);
1388
1389                         if (tp->link_config.duplex == DUPLEX_FULL)
1390                                 new_adv = MII_TG3_CTRL_ADV_1000_FULL;
1391                         else
1392                                 new_adv = MII_TG3_CTRL_ADV_1000_HALF;
1393                         if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
1394                             tp->pci_chip_rev_id == CHIPREV_ID_5701_B0)
1395                                 new_adv |= (MII_TG3_CTRL_AS_MASTER |
1396                                             MII_TG3_CTRL_ENABLE_AS_MASTER);
1397                         tg3_writephy(tp, MII_TG3_CTRL, new_adv);
1398                 } else {
1399                         tg3_writephy(tp, MII_TG3_CTRL, 0);
1400
1401                         new_adv = ADVERTISE_CSMA | ADVERTISE_PAUSE_CAP;
1402                         if (tp->link_config.speed == SPEED_100) {
1403                                 if (tp->link_config.duplex == DUPLEX_FULL)
1404                                         new_adv |= ADVERTISE_100FULL;
1405                                 else
1406                                         new_adv |= ADVERTISE_100HALF;
1407                         } else {
1408                                 if (tp->link_config.duplex == DUPLEX_FULL)
1409                                         new_adv |= ADVERTISE_10FULL;
1410                                 else
1411                                         new_adv |= ADVERTISE_10HALF;
1412                         }
1413                         tg3_writephy(tp, MII_ADVERTISE, new_adv);
1414                 }
1415         }
1416
1417         if (tp->link_config.autoneg == AUTONEG_DISABLE &&
1418             tp->link_config.speed != SPEED_INVALID) {
1419                 u32 bmcr, orig_bmcr;
1420
1421                 tp->link_config.active_speed = tp->link_config.speed;
1422                 tp->link_config.active_duplex = tp->link_config.duplex;
1423
1424                 bmcr = 0;
1425                 switch (tp->link_config.speed) {
1426                 default:
1427                 case SPEED_10:
1428                         break;
1429
1430                 case SPEED_100:
1431                         bmcr |= BMCR_SPEED100;
1432                         break;
1433
1434                 case SPEED_1000:
1435                         bmcr |= TG3_BMCR_SPEED1000;
1436                         break;
1437                 };
1438
1439                 if (tp->link_config.duplex == DUPLEX_FULL)
1440                         bmcr |= BMCR_FULLDPLX;
1441
1442                 if (!tg3_readphy(tp, MII_BMCR, &orig_bmcr) &&
1443                     (bmcr != orig_bmcr)) {
1444                         tg3_writephy(tp, MII_BMCR, BMCR_LOOPBACK);
1445                         for (i = 0; i < 1500; i++) {
1446                                 u32 tmp;
1447
1448                                 udelay(10);
1449                                 if (tg3_readphy(tp, MII_BMSR, &tmp) ||
1450                                     tg3_readphy(tp, MII_BMSR, &tmp))
1451                                         continue;
1452                                 if (!(tmp & BMSR_LSTATUS)) {
1453                                         udelay(40);
1454                                         break;
1455                                 }
1456                         }
1457                         tg3_writephy(tp, MII_BMCR, bmcr);
1458                         udelay(40);
1459                 }
1460         } else {
1461                 tg3_writephy(tp, MII_BMCR,
1462                              BMCR_ANENABLE | BMCR_ANRESTART);
1463         }
1464 }
1465
1466 static int tg3_init_5401phy_dsp(struct tg3 *tp)
1467 {
1468         int err;
1469
1470         /* Turn off tap power management. */
1471         /* Set Extended packet length bit */
1472         err  = tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x4c20);
1473
1474         err |= tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x0012);
1475         err |= tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x1804);
1476
1477         err |= tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x0013);
1478         err |= tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x1204);
1479
1480         err |= tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8006);
1481         err |= tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0132);
1482
1483         err |= tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8006);
1484         err |= tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0232);
1485
1486         err |= tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x201f);
1487         err |= tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0a20);
1488
1489         udelay(40);
1490
1491         return err;
1492 }
1493
1494 static int tg3_copper_is_advertising_all(struct tg3 *tp)
1495 {
1496         u32 adv_reg, all_mask;
1497
1498         if (tg3_readphy(tp, MII_ADVERTISE, &adv_reg))
1499                 return 0;
1500
1501         all_mask = (ADVERTISE_10HALF | ADVERTISE_10FULL |
1502                     ADVERTISE_100HALF | ADVERTISE_100FULL);
1503         if ((adv_reg & all_mask) != all_mask)
1504                 return 0;
1505         if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY)) {
1506                 u32 tg3_ctrl;
1507
1508                 if (tg3_readphy(tp, MII_TG3_CTRL, &tg3_ctrl))
1509                         return 0;
1510
1511                 all_mask = (MII_TG3_CTRL_ADV_1000_HALF |
1512                             MII_TG3_CTRL_ADV_1000_FULL);
1513                 if ((tg3_ctrl & all_mask) != all_mask)
1514                         return 0;
1515         }
1516         return 1;
1517 }
1518
1519 static int tg3_setup_copper_phy(struct tg3 *tp, int force_reset)
1520 {
1521         int current_link_up;
1522         u32 bmsr, dummy;
1523         u16 current_speed;
1524         u8 current_duplex;
1525         int i, err;
1526
1527         tw32(MAC_EVENT, 0);
1528
1529         tw32_f(MAC_STATUS,
1530              (MAC_STATUS_SYNC_CHANGED |
1531               MAC_STATUS_CFG_CHANGED |
1532               MAC_STATUS_MI_COMPLETION |
1533               MAC_STATUS_LNKSTATE_CHANGED));
1534         udelay(40);
1535
1536         tp->mi_mode = MAC_MI_MODE_BASE;
1537         tw32_f(MAC_MI_MODE, tp->mi_mode);
1538         udelay(80);
1539
1540         tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x02);
1541
1542         /* Some third-party PHYs need to be reset on link going
1543          * down.
1544          */
1545         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
1546              GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
1547              GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) &&
1548             netif_carrier_ok(tp->dev)) {
1549                 tg3_readphy(tp, MII_BMSR, &bmsr);
1550                 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
1551                     !(bmsr & BMSR_LSTATUS))
1552                         force_reset = 1;
1553         }
1554         if (force_reset)
1555                 tg3_phy_reset(tp);
1556
1557         if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5401) {
1558                 tg3_readphy(tp, MII_BMSR, &bmsr);
1559                 if (tg3_readphy(tp, MII_BMSR, &bmsr) ||
1560                     !(tp->tg3_flags & TG3_FLAG_INIT_COMPLETE))
1561                         bmsr = 0;
1562
1563                 if (!(bmsr & BMSR_LSTATUS)) {
1564                         err = tg3_init_5401phy_dsp(tp);
1565                         if (err)
1566                                 return err;
1567
1568                         tg3_readphy(tp, MII_BMSR, &bmsr);
1569                         for (i = 0; i < 1000; i++) {
1570                                 udelay(10);
1571                                 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
1572                                     (bmsr & BMSR_LSTATUS)) {
1573                                         udelay(40);
1574                                         break;
1575                                 }
1576                         }
1577
1578                         if ((tp->phy_id & PHY_ID_REV_MASK) == PHY_REV_BCM5401_B0 &&
1579                             !(bmsr & BMSR_LSTATUS) &&
1580                             tp->link_config.active_speed == SPEED_1000) {
1581                                 err = tg3_phy_reset(tp);
1582                                 if (!err)
1583                                         err = tg3_init_5401phy_dsp(tp);
1584                                 if (err)
1585                                         return err;
1586                         }
1587                 }
1588         } else if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
1589                    tp->pci_chip_rev_id == CHIPREV_ID_5701_B0) {
1590                 /* 5701 {A0,B0} CRC bug workaround */
1591                 tg3_writephy(tp, 0x15, 0x0a75);
1592                 tg3_writephy(tp, 0x1c, 0x8c68);
1593                 tg3_writephy(tp, 0x1c, 0x8d68);
1594                 tg3_writephy(tp, 0x1c, 0x8c68);
1595         }
1596
1597         /* Clear pending interrupts... */
1598         tg3_readphy(tp, MII_TG3_ISTAT, &dummy);
1599         tg3_readphy(tp, MII_TG3_ISTAT, &dummy);
1600
1601         if (tp->tg3_flags & TG3_FLAG_USE_MI_INTERRUPT)
1602                 tg3_writephy(tp, MII_TG3_IMASK, ~MII_TG3_INT_LINKCHG);
1603         else
1604                 tg3_writephy(tp, MII_TG3_IMASK, ~0);
1605
1606         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
1607             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
1608                 if (tp->led_ctrl == LED_CTRL_MODE_PHY_1)
1609                         tg3_writephy(tp, MII_TG3_EXT_CTRL,
1610                                      MII_TG3_EXT_CTRL_LNK3_LED_MODE);
1611                 else
1612                         tg3_writephy(tp, MII_TG3_EXT_CTRL, 0);
1613         }
1614
1615         current_link_up = 0;
1616         current_speed = SPEED_INVALID;
1617         current_duplex = DUPLEX_INVALID;
1618
1619         if (tp->tg3_flags2 & TG3_FLG2_CAPACITIVE_COUPLING) {
1620                 u32 val;
1621
1622                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x4007);
1623                 tg3_readphy(tp, MII_TG3_AUX_CTRL, &val);
1624                 if (!(val & (1 << 10))) {
1625                         val |= (1 << 10);
1626                         tg3_writephy(tp, MII_TG3_AUX_CTRL, val);
1627                         goto relink;
1628                 }
1629         }
1630
1631         bmsr = 0;
1632         for (i = 0; i < 100; i++) {
1633                 tg3_readphy(tp, MII_BMSR, &bmsr);
1634                 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
1635                     (bmsr & BMSR_LSTATUS))
1636                         break;
1637                 udelay(40);
1638         }
1639
1640         if (bmsr & BMSR_LSTATUS) {
1641                 u32 aux_stat, bmcr;
1642
1643                 tg3_readphy(tp, MII_TG3_AUX_STAT, &aux_stat);
1644                 for (i = 0; i < 2000; i++) {
1645                         udelay(10);
1646                         if (!tg3_readphy(tp, MII_TG3_AUX_STAT, &aux_stat) &&
1647                             aux_stat)
1648                                 break;
1649                 }
1650
1651                 tg3_aux_stat_to_speed_duplex(tp, aux_stat,
1652                                              &current_speed,
1653                                              &current_duplex);
1654
1655                 bmcr = 0;
1656                 for (i = 0; i < 200; i++) {
1657                         tg3_readphy(tp, MII_BMCR, &bmcr);
1658                         if (tg3_readphy(tp, MII_BMCR, &bmcr))
1659                                 continue;
1660                         if (bmcr && bmcr != 0x7fff)
1661                                 break;
1662                         udelay(10);
1663                 }
1664
1665                 if (tp->link_config.autoneg == AUTONEG_ENABLE) {
1666                         if (bmcr & BMCR_ANENABLE) {
1667                                 current_link_up = 1;
1668
1669                                 /* Force autoneg restart if we are exiting
1670                                  * low power mode.
1671                                  */
1672                                 if (!tg3_copper_is_advertising_all(tp))
1673                                         current_link_up = 0;
1674                         } else {
1675                                 current_link_up = 0;
1676                         }
1677                 } else {
1678                         if (!(bmcr & BMCR_ANENABLE) &&
1679                             tp->link_config.speed == current_speed &&
1680                             tp->link_config.duplex == current_duplex) {
1681                                 current_link_up = 1;
1682                         } else {
1683                                 current_link_up = 0;
1684                         }
1685                 }
1686
1687                 tp->link_config.active_speed = current_speed;
1688                 tp->link_config.active_duplex = current_duplex;
1689         }
1690
1691         if (current_link_up == 1 &&
1692             (tp->link_config.active_duplex == DUPLEX_FULL) &&
1693             (tp->link_config.autoneg == AUTONEG_ENABLE)) {
1694                 u32 local_adv, remote_adv;
1695
1696                 if (tg3_readphy(tp, MII_ADVERTISE, &local_adv))
1697                         local_adv = 0;
1698                 local_adv &= (ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM);
1699
1700                 if (tg3_readphy(tp, MII_LPA, &remote_adv))
1701                         remote_adv = 0;
1702
1703                 remote_adv &= (LPA_PAUSE_CAP | LPA_PAUSE_ASYM);
1704
1705                 /* If we are not advertising full pause capability,
1706                  * something is wrong.  Bring the link down and reconfigure.
1707                  */
1708                 if (local_adv != ADVERTISE_PAUSE_CAP) {
1709                         current_link_up = 0;
1710                 } else {
1711                         tg3_setup_flow_control(tp, local_adv, remote_adv);
1712                 }
1713         }
1714 relink:
1715         if (current_link_up == 0) {
1716                 u32 tmp;
1717
1718                 tg3_phy_copper_begin(tp);
1719
1720                 tg3_readphy(tp, MII_BMSR, &tmp);
1721                 if (!tg3_readphy(tp, MII_BMSR, &tmp) &&
1722                     (tmp & BMSR_LSTATUS))
1723                         current_link_up = 1;
1724         }
1725
1726         tp->mac_mode &= ~MAC_MODE_PORT_MODE_MASK;
1727         if (current_link_up == 1) {
1728                 if (tp->link_config.active_speed == SPEED_100 ||
1729                     tp->link_config.active_speed == SPEED_10)
1730                         tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
1731                 else
1732                         tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
1733         } else
1734                 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
1735
1736         tp->mac_mode &= ~MAC_MODE_HALF_DUPLEX;
1737         if (tp->link_config.active_duplex == DUPLEX_HALF)
1738                 tp->mac_mode |= MAC_MODE_HALF_DUPLEX;
1739
1740         tp->mac_mode &= ~MAC_MODE_LINK_POLARITY;
1741         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700) {
1742                 if ((tp->led_ctrl == LED_CTRL_MODE_PHY_2) ||
1743                     (current_link_up == 1 &&
1744                      tp->link_config.active_speed == SPEED_10))
1745                         tp->mac_mode |= MAC_MODE_LINK_POLARITY;
1746         } else {
1747                 if (current_link_up == 1)
1748                         tp->mac_mode |= MAC_MODE_LINK_POLARITY;
1749         }
1750
1751         /* ??? Without this setting Netgear GA302T PHY does not
1752          * ??? send/receive packets...
1753          */
1754         if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5411 &&
1755             tp->pci_chip_rev_id == CHIPREV_ID_5700_ALTIMA) {
1756                 tp->mi_mode |= MAC_MI_MODE_AUTO_POLL;
1757                 tw32_f(MAC_MI_MODE, tp->mi_mode);
1758                 udelay(80);
1759         }
1760
1761         tw32_f(MAC_MODE, tp->mac_mode);
1762         udelay(40);
1763
1764         if (tp->tg3_flags & TG3_FLAG_USE_LINKCHG_REG) {
1765                 /* Polled via timer. */
1766                 tw32_f(MAC_EVENT, 0);
1767         } else {
1768                 tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
1769         }
1770         udelay(40);
1771
1772         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 &&
1773             current_link_up == 1 &&
1774             tp->link_config.active_speed == SPEED_1000 &&
1775             ((tp->tg3_flags & TG3_FLAG_PCIX_MODE) ||
1776              (tp->tg3_flags & TG3_FLAG_PCI_HIGH_SPEED))) {
1777                 udelay(120);
1778                 tw32_f(MAC_STATUS,
1779                      (MAC_STATUS_SYNC_CHANGED |
1780                       MAC_STATUS_CFG_CHANGED));
1781                 udelay(40);
1782                 tg3_write_mem(tp,
1783                               NIC_SRAM_FIRMWARE_MBOX,
1784                               NIC_SRAM_FIRMWARE_MBOX_MAGIC2);
1785         }
1786
1787         if (current_link_up != netif_carrier_ok(tp->dev)) {
1788                 if (current_link_up)
1789                         netif_carrier_on(tp->dev);
1790                 else
1791                         netif_carrier_off(tp->dev);
1792                 tg3_link_report(tp);
1793         }
1794
1795         return 0;
1796 }
1797
1798 struct tg3_fiber_aneginfo {
1799         int state;
1800 #define ANEG_STATE_UNKNOWN              0
1801 #define ANEG_STATE_AN_ENABLE            1
1802 #define ANEG_STATE_RESTART_INIT         2
1803 #define ANEG_STATE_RESTART              3
1804 #define ANEG_STATE_DISABLE_LINK_OK      4
1805 #define ANEG_STATE_ABILITY_DETECT_INIT  5
1806 #define ANEG_STATE_ABILITY_DETECT       6
1807 #define ANEG_STATE_ACK_DETECT_INIT      7
1808 #define ANEG_STATE_ACK_DETECT           8
1809 #define ANEG_STATE_COMPLETE_ACK_INIT    9
1810 #define ANEG_STATE_COMPLETE_ACK         10
1811 #define ANEG_STATE_IDLE_DETECT_INIT     11
1812 #define ANEG_STATE_IDLE_DETECT          12
1813 #define ANEG_STATE_LINK_OK              13
1814 #define ANEG_STATE_NEXT_PAGE_WAIT_INIT  14
1815 #define ANEG_STATE_NEXT_PAGE_WAIT       15
1816
1817         u32 flags;
1818 #define MR_AN_ENABLE            0x00000001
1819 #define MR_RESTART_AN           0x00000002
1820 #define MR_AN_COMPLETE          0x00000004
1821 #define MR_PAGE_RX              0x00000008
1822 #define MR_NP_LOADED            0x00000010
1823 #define MR_TOGGLE_TX            0x00000020
1824 #define MR_LP_ADV_FULL_DUPLEX   0x00000040
1825 #define MR_LP_ADV_HALF_DUPLEX   0x00000080
1826 #define MR_LP_ADV_SYM_PAUSE     0x00000100
1827 #define MR_LP_ADV_ASYM_PAUSE    0x00000200
1828 #define MR_LP_ADV_REMOTE_FAULT1 0x00000400
1829 #define MR_LP_ADV_REMOTE_FAULT2 0x00000800
1830 #define MR_LP_ADV_NEXT_PAGE     0x00001000
1831 #define MR_TOGGLE_RX            0x00002000
1832 #define MR_NP_RX                0x00004000
1833
1834 #define MR_LINK_OK              0x80000000
1835
1836         unsigned long link_time, cur_time;
1837
1838         u32 ability_match_cfg;
1839         int ability_match_count;
1840
1841         char ability_match, idle_match, ack_match;
1842
1843         u32 txconfig, rxconfig;
1844 #define ANEG_CFG_NP             0x00000080
1845 #define ANEG_CFG_ACK            0x00000040
1846 #define ANEG_CFG_RF2            0x00000020
1847 #define ANEG_CFG_RF1            0x00000010
1848 #define ANEG_CFG_PS2            0x00000001
1849 #define ANEG_CFG_PS1            0x00008000
1850 #define ANEG_CFG_HD             0x00004000
1851 #define ANEG_CFG_FD             0x00002000
1852 #define ANEG_CFG_INVAL          0x00001f06
1853
1854 };
1855 #define ANEG_OK         0
1856 #define ANEG_DONE       1
1857 #define ANEG_TIMER_ENAB 2
1858 #define ANEG_FAILED     -1
1859
1860 #define ANEG_STATE_SETTLE_TIME  10000
1861
1862 static int tg3_fiber_aneg_smachine(struct tg3 *tp,
1863                                    struct tg3_fiber_aneginfo *ap)
1864 {
1865         unsigned long delta;
1866         u32 rx_cfg_reg;
1867         int ret;
1868
1869         if (ap->state == ANEG_STATE_UNKNOWN) {
1870                 ap->rxconfig = 0;
1871                 ap->link_time = 0;
1872                 ap->cur_time = 0;
1873                 ap->ability_match_cfg = 0;
1874                 ap->ability_match_count = 0;
1875                 ap->ability_match = 0;
1876                 ap->idle_match = 0;
1877                 ap->ack_match = 0;
1878         }
1879         ap->cur_time++;
1880
1881         if (tr32(MAC_STATUS) & MAC_STATUS_RCVD_CFG) {
1882                 rx_cfg_reg = tr32(MAC_RX_AUTO_NEG);
1883
1884                 if (rx_cfg_reg != ap->ability_match_cfg) {
1885                         ap->ability_match_cfg = rx_cfg_reg;
1886                         ap->ability_match = 0;
1887                         ap->ability_match_count = 0;
1888                 } else {
1889                         if (++ap->ability_match_count > 1) {
1890                                 ap->ability_match = 1;
1891                                 ap->ability_match_cfg = rx_cfg_reg;
1892                         }
1893                 }
1894                 if (rx_cfg_reg & ANEG_CFG_ACK)
1895                         ap->ack_match = 1;
1896                 else
1897                         ap->ack_match = 0;
1898
1899                 ap->idle_match = 0;
1900         } else {
1901                 ap->idle_match = 1;
1902                 ap->ability_match_cfg = 0;
1903                 ap->ability_match_count = 0;
1904                 ap->ability_match = 0;
1905                 ap->ack_match = 0;
1906
1907                 rx_cfg_reg = 0;
1908         }
1909
1910         ap->rxconfig = rx_cfg_reg;
1911         ret = ANEG_OK;
1912
1913         switch(ap->state) {
1914         case ANEG_STATE_UNKNOWN:
1915                 if (ap->flags & (MR_AN_ENABLE | MR_RESTART_AN))
1916                         ap->state = ANEG_STATE_AN_ENABLE;
1917
1918                 /* fallthru */
1919         case ANEG_STATE_AN_ENABLE:
1920                 ap->flags &= ~(MR_AN_COMPLETE | MR_PAGE_RX);
1921                 if (ap->flags & MR_AN_ENABLE) {
1922                         ap->link_time = 0;
1923                         ap->cur_time = 0;
1924                         ap->ability_match_cfg = 0;
1925                         ap->ability_match_count = 0;
1926                         ap->ability_match = 0;
1927                         ap->idle_match = 0;
1928                         ap->ack_match = 0;
1929
1930                         ap->state = ANEG_STATE_RESTART_INIT;
1931                 } else {
1932                         ap->state = ANEG_STATE_DISABLE_LINK_OK;
1933                 }
1934                 break;
1935
1936         case ANEG_STATE_RESTART_INIT:
1937                 ap->link_time = ap->cur_time;
1938                 ap->flags &= ~(MR_NP_LOADED);
1939                 ap->txconfig = 0;
1940                 tw32(MAC_TX_AUTO_NEG, 0);
1941                 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
1942                 tw32_f(MAC_MODE, tp->mac_mode);
1943                 udelay(40);
1944
1945                 ret = ANEG_TIMER_ENAB;
1946                 ap->state = ANEG_STATE_RESTART;
1947
1948                 /* fallthru */
1949         case ANEG_STATE_RESTART:
1950                 delta = ap->cur_time - ap->link_time;
1951                 if (delta > ANEG_STATE_SETTLE_TIME) {
1952                         ap->state = ANEG_STATE_ABILITY_DETECT_INIT;
1953                 } else {
1954                         ret = ANEG_TIMER_ENAB;
1955                 }
1956                 break;
1957
1958         case ANEG_STATE_DISABLE_LINK_OK:
1959                 ret = ANEG_DONE;
1960                 break;
1961
1962         case ANEG_STATE_ABILITY_DETECT_INIT:
1963                 ap->flags &= ~(MR_TOGGLE_TX);
1964                 ap->txconfig = (ANEG_CFG_FD | ANEG_CFG_PS1);
1965                 tw32(MAC_TX_AUTO_NEG, ap->txconfig);
1966                 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
1967                 tw32_f(MAC_MODE, tp->mac_mode);
1968                 udelay(40);
1969
1970                 ap->state = ANEG_STATE_ABILITY_DETECT;
1971                 break;
1972
1973         case ANEG_STATE_ABILITY_DETECT:
1974                 if (ap->ability_match != 0 && ap->rxconfig != 0) {
1975                         ap->state = ANEG_STATE_ACK_DETECT_INIT;
1976                 }
1977                 break;
1978
1979         case ANEG_STATE_ACK_DETECT_INIT:
1980                 ap->txconfig |= ANEG_CFG_ACK;
1981                 tw32(MAC_TX_AUTO_NEG, ap->txconfig);
1982                 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
1983                 tw32_f(MAC_MODE, tp->mac_mode);
1984                 udelay(40);
1985
1986                 ap->state = ANEG_STATE_ACK_DETECT;
1987
1988                 /* fallthru */
1989         case ANEG_STATE_ACK_DETECT:
1990                 if (ap->ack_match != 0) {
1991                         if ((ap->rxconfig & ~ANEG_CFG_ACK) ==
1992                             (ap->ability_match_cfg & ~ANEG_CFG_ACK)) {
1993                                 ap->state = ANEG_STATE_COMPLETE_ACK_INIT;
1994                         } else {
1995                                 ap->state = ANEG_STATE_AN_ENABLE;
1996                         }
1997                 } else if (ap->ability_match != 0 &&
1998                            ap->rxconfig == 0) {
1999                         ap->state = ANEG_STATE_AN_ENABLE;
2000                 }
2001                 break;
2002
2003         case ANEG_STATE_COMPLETE_ACK_INIT:
2004                 if (ap->rxconfig & ANEG_CFG_INVAL) {
2005                         ret = ANEG_FAILED;
2006                         break;
2007                 }
2008                 ap->flags &= ~(MR_LP_ADV_FULL_DUPLEX |
2009                                MR_LP_ADV_HALF_DUPLEX |
2010                                MR_LP_ADV_SYM_PAUSE |
2011                                MR_LP_ADV_ASYM_PAUSE |
2012                                MR_LP_ADV_REMOTE_FAULT1 |
2013                                MR_LP_ADV_REMOTE_FAULT2 |
2014                                MR_LP_ADV_NEXT_PAGE |
2015                                MR_TOGGLE_RX |
2016                                MR_NP_RX);
2017                 if (ap->rxconfig & ANEG_CFG_FD)
2018                         ap->flags |= MR_LP_ADV_FULL_DUPLEX;
2019                 if (ap->rxconfig & ANEG_CFG_HD)
2020                         ap->flags |= MR_LP_ADV_HALF_DUPLEX;
2021                 if (ap->rxconfig & ANEG_CFG_PS1)
2022                         ap->flags |= MR_LP_ADV_SYM_PAUSE;
2023                 if (ap->rxconfig & ANEG_CFG_PS2)
2024                         ap->flags |= MR_LP_ADV_ASYM_PAUSE;
2025                 if (ap->rxconfig & ANEG_CFG_RF1)
2026                         ap->flags |= MR_LP_ADV_REMOTE_FAULT1;
2027                 if (ap->rxconfig & ANEG_CFG_RF2)
2028                         ap->flags |= MR_LP_ADV_REMOTE_FAULT2;
2029                 if (ap->rxconfig & ANEG_CFG_NP)
2030                         ap->flags |= MR_LP_ADV_NEXT_PAGE;
2031
2032                 ap->link_time = ap->cur_time;
2033
2034                 ap->flags ^= (MR_TOGGLE_TX);
2035                 if (ap->rxconfig & 0x0008)
2036                         ap->flags |= MR_TOGGLE_RX;
2037                 if (ap->rxconfig & ANEG_CFG_NP)
2038                         ap->flags |= MR_NP_RX;
2039                 ap->flags |= MR_PAGE_RX;
2040
2041                 ap->state = ANEG_STATE_COMPLETE_ACK;
2042                 ret = ANEG_TIMER_ENAB;
2043                 break;
2044
2045         case ANEG_STATE_COMPLETE_ACK:
2046                 if (ap->ability_match != 0 &&
2047                     ap->rxconfig == 0) {
2048                         ap->state = ANEG_STATE_AN_ENABLE;
2049                         break;
2050                 }
2051                 delta = ap->cur_time - ap->link_time;
2052                 if (delta > ANEG_STATE_SETTLE_TIME) {
2053                         if (!(ap->flags & (MR_LP_ADV_NEXT_PAGE))) {
2054                                 ap->state = ANEG_STATE_IDLE_DETECT_INIT;
2055                         } else {
2056                                 if ((ap->txconfig & ANEG_CFG_NP) == 0 &&
2057                                     !(ap->flags & MR_NP_RX)) {
2058                                         ap->state = ANEG_STATE_IDLE_DETECT_INIT;
2059                                 } else {
2060                                         ret = ANEG_FAILED;
2061                                 }
2062                         }
2063                 }
2064                 break;
2065
2066         case ANEG_STATE_IDLE_DETECT_INIT:
2067                 ap->link_time = ap->cur_time;
2068                 tp->mac_mode &= ~MAC_MODE_SEND_CONFIGS;
2069                 tw32_f(MAC_MODE, tp->mac_mode);
2070                 udelay(40);
2071
2072                 ap->state = ANEG_STATE_IDLE_DETECT;
2073                 ret = ANEG_TIMER_ENAB;
2074                 break;
2075
2076         case ANEG_STATE_IDLE_DETECT:
2077                 if (ap->ability_match != 0 &&
2078                     ap->rxconfig == 0) {
2079                         ap->state = ANEG_STATE_AN_ENABLE;
2080                         break;
2081                 }
2082                 delta = ap->cur_time - ap->link_time;
2083                 if (delta > ANEG_STATE_SETTLE_TIME) {
2084                         /* XXX another gem from the Broadcom driver :( */
2085                         ap->state = ANEG_STATE_LINK_OK;
2086                 }
2087                 break;
2088
2089         case ANEG_STATE_LINK_OK:
2090                 ap->flags |= (MR_AN_COMPLETE | MR_LINK_OK);
2091                 ret = ANEG_DONE;
2092                 break;
2093
2094         case ANEG_STATE_NEXT_PAGE_WAIT_INIT:
2095                 /* ??? unimplemented */
2096                 break;
2097
2098         case ANEG_STATE_NEXT_PAGE_WAIT:
2099                 /* ??? unimplemented */
2100                 break;
2101
2102         default:
2103                 ret = ANEG_FAILED;
2104                 break;
2105         };
2106
2107         return ret;
2108 }
2109
2110 static int fiber_autoneg(struct tg3 *tp, u32 *flags)
2111 {
2112         int res = 0;
2113         struct tg3_fiber_aneginfo aninfo;
2114         int status = ANEG_FAILED;
2115         unsigned int tick;
2116         u32 tmp;
2117
2118         tw32_f(MAC_TX_AUTO_NEG, 0);
2119
2120         tmp = tp->mac_mode & ~MAC_MODE_PORT_MODE_MASK;
2121         tw32_f(MAC_MODE, tmp | MAC_MODE_PORT_MODE_GMII);
2122         udelay(40);
2123
2124         tw32_f(MAC_MODE, tp->mac_mode | MAC_MODE_SEND_CONFIGS);
2125         udelay(40);
2126
2127         memset(&aninfo, 0, sizeof(aninfo));
2128         aninfo.flags |= MR_AN_ENABLE;
2129         aninfo.state = ANEG_STATE_UNKNOWN;
2130         aninfo.cur_time = 0;
2131         tick = 0;
2132         while (++tick < 195000) {
2133                 status = tg3_fiber_aneg_smachine(tp, &aninfo);
2134                 if (status == ANEG_DONE || status == ANEG_FAILED)
2135                         break;
2136
2137                 udelay(1);
2138         }
2139
2140         tp->mac_mode &= ~MAC_MODE_SEND_CONFIGS;
2141         tw32_f(MAC_MODE, tp->mac_mode);
2142         udelay(40);
2143
2144         *flags = aninfo.flags;
2145
2146         if (status == ANEG_DONE &&
2147             (aninfo.flags & (MR_AN_COMPLETE | MR_LINK_OK |
2148                              MR_LP_ADV_FULL_DUPLEX)))
2149                 res = 1;
2150
2151         return res;
2152 }
2153
2154 static void tg3_init_bcm8002(struct tg3 *tp)
2155 {
2156         u32 mac_status = tr32(MAC_STATUS);
2157         int i;
2158
2159         /* Reset when initting first time or we have a link. */
2160         if ((tp->tg3_flags & TG3_FLAG_INIT_COMPLETE) &&
2161             !(mac_status & MAC_STATUS_PCS_SYNCED))
2162                 return;
2163
2164         /* Set PLL lock range. */
2165         tg3_writephy(tp, 0x16, 0x8007);
2166
2167         /* SW reset */
2168         tg3_writephy(tp, MII_BMCR, BMCR_RESET);
2169
2170         /* Wait for reset to complete. */
2171         /* XXX schedule_timeout() ... */
2172         for (i = 0; i < 500; i++)
2173                 udelay(10);
2174
2175         /* Config mode; select PMA/Ch 1 regs. */
2176         tg3_writephy(tp, 0x10, 0x8411);
2177
2178         /* Enable auto-lock and comdet, select txclk for tx. */
2179         tg3_writephy(tp, 0x11, 0x0a10);
2180
2181         tg3_writephy(tp, 0x18, 0x00a0);
2182         tg3_writephy(tp, 0x16, 0x41ff);
2183
2184         /* Assert and deassert POR. */
2185         tg3_writephy(tp, 0x13, 0x0400);
2186         udelay(40);
2187         tg3_writephy(tp, 0x13, 0x0000);
2188
2189         tg3_writephy(tp, 0x11, 0x0a50);
2190         udelay(40);
2191         tg3_writephy(tp, 0x11, 0x0a10);
2192
2193         /* Wait for signal to stabilize */
2194         /* XXX schedule_timeout() ... */
2195         for (i = 0; i < 15000; i++)
2196                 udelay(10);
2197
2198         /* Deselect the channel register so we can read the PHYID
2199          * later.
2200          */
2201         tg3_writephy(tp, 0x10, 0x8011);
2202 }
2203
2204 static int tg3_setup_fiber_hw_autoneg(struct tg3 *tp, u32 mac_status)
2205 {
2206         u32 sg_dig_ctrl, sg_dig_status;
2207         u32 serdes_cfg, expected_sg_dig_ctrl;
2208         int workaround, port_a;
2209         int current_link_up;
2210
2211         serdes_cfg = 0;
2212         expected_sg_dig_ctrl = 0;
2213         workaround = 0;
2214         port_a = 1;
2215         current_link_up = 0;
2216
2217         if (tp->pci_chip_rev_id != CHIPREV_ID_5704_A0 &&
2218             tp->pci_chip_rev_id != CHIPREV_ID_5704_A1) {
2219                 workaround = 1;
2220                 if (tr32(TG3PCI_DUAL_MAC_CTRL) & DUAL_MAC_CTRL_ID)
2221                         port_a = 0;
2222
2223                 /* preserve bits 0-11,13,14 for signal pre-emphasis */
2224                 /* preserve bits 20-23 for voltage regulator */
2225                 serdes_cfg = tr32(MAC_SERDES_CFG) & 0x00f06fff;
2226         }
2227
2228         sg_dig_ctrl = tr32(SG_DIG_CTRL);
2229
2230         if (tp->link_config.autoneg != AUTONEG_ENABLE) {
2231                 if (sg_dig_ctrl & (1 << 31)) {
2232                         if (workaround) {
2233                                 u32 val = serdes_cfg;
2234
2235                                 if (port_a)
2236                                         val |= 0xc010000;
2237                                 else
2238                                         val |= 0x4010000;
2239                                 tw32_f(MAC_SERDES_CFG, val);
2240                         }
2241                         tw32_f(SG_DIG_CTRL, 0x01388400);
2242                 }
2243                 if (mac_status & MAC_STATUS_PCS_SYNCED) {
2244                         tg3_setup_flow_control(tp, 0, 0);
2245                         current_link_up = 1;
2246                 }
2247                 goto out;
2248         }
2249
2250         /* Want auto-negotiation.  */
2251         expected_sg_dig_ctrl = 0x81388400;
2252
2253         /* Pause capability */
2254         expected_sg_dig_ctrl |= (1 << 11);
2255
2256         /* Asymettric pause */
2257         expected_sg_dig_ctrl |= (1 << 12);
2258
2259         if (sg_dig_ctrl != expected_sg_dig_ctrl) {
2260                 if (workaround)
2261                         tw32_f(MAC_SERDES_CFG, serdes_cfg | 0xc011000);
2262                 tw32_f(SG_DIG_CTRL, expected_sg_dig_ctrl | (1 << 30));
2263                 udelay(5);
2264                 tw32_f(SG_DIG_CTRL, expected_sg_dig_ctrl);
2265
2266                 tp->tg3_flags2 |= TG3_FLG2_PHY_JUST_INITTED;
2267         } else if (mac_status & (MAC_STATUS_PCS_SYNCED |
2268                                  MAC_STATUS_SIGNAL_DET)) {
2269                 int i;
2270
2271                 /* Giver time to negotiate (~200ms) */
2272                 for (i = 0; i < 40000; i++) {
2273                         sg_dig_status = tr32(SG_DIG_STATUS);
2274                         if (sg_dig_status & (0x3))
2275                                 break;
2276                         udelay(5);
2277                 }
2278                 mac_status = tr32(MAC_STATUS);
2279
2280                 if ((sg_dig_status & (1 << 1)) &&
2281                     (mac_status & MAC_STATUS_PCS_SYNCED)) {
2282                         u32 local_adv, remote_adv;
2283
2284                         local_adv = ADVERTISE_PAUSE_CAP;
2285                         remote_adv = 0;
2286                         if (sg_dig_status & (1 << 19))
2287                                 remote_adv |= LPA_PAUSE_CAP;
2288                         if (sg_dig_status & (1 << 20))
2289                                 remote_adv |= LPA_PAUSE_ASYM;
2290
2291                         tg3_setup_flow_control(tp, local_adv, remote_adv);
2292                         current_link_up = 1;
2293                         tp->tg3_flags2 &= ~TG3_FLG2_PHY_JUST_INITTED;
2294                 } else if (!(sg_dig_status & (1 << 1))) {
2295                         if (tp->tg3_flags2 & TG3_FLG2_PHY_JUST_INITTED)
2296                                 tp->tg3_flags2 &= ~TG3_FLG2_PHY_JUST_INITTED;
2297                         else {
2298                                 if (workaround) {
2299                                         u32 val = serdes_cfg;
2300
2301                                         if (port_a)
2302                                                 val |= 0xc010000;
2303                                         else
2304                                                 val |= 0x4010000;
2305
2306                                         tw32_f(MAC_SERDES_CFG, val);
2307                                 }
2308
2309                                 tw32_f(SG_DIG_CTRL, 0x01388400);
2310                                 udelay(40);
2311
2312                                 /* Link parallel detection - link is up */
2313                                 /* only if we have PCS_SYNC and not */
2314                                 /* receiving config code words */
2315                                 mac_status = tr32(MAC_STATUS);
2316                                 if ((mac_status & MAC_STATUS_PCS_SYNCED) &&
2317                                     !(mac_status & MAC_STATUS_RCVD_CFG)) {
2318                                         tg3_setup_flow_control(tp, 0, 0);
2319                                         current_link_up = 1;
2320                                 }
2321                         }
2322                 }
2323         }
2324
2325 out:
2326         return current_link_up;
2327 }
2328
2329 static int tg3_setup_fiber_by_hand(struct tg3 *tp, u32 mac_status)
2330 {
2331         int current_link_up = 0;
2332
2333         if (!(mac_status & MAC_STATUS_PCS_SYNCED)) {
2334                 tp->tg3_flags &= ~TG3_FLAG_GOT_SERDES_FLOWCTL;
2335                 goto out;
2336         }
2337
2338         if (tp->link_config.autoneg == AUTONEG_ENABLE) {
2339                 u32 flags;
2340                 int i;
2341   
2342                 if (fiber_autoneg(tp, &flags)) {
2343                         u32 local_adv, remote_adv;
2344
2345                         local_adv = ADVERTISE_PAUSE_CAP;
2346                         remote_adv = 0;
2347                         if (flags & MR_LP_ADV_SYM_PAUSE)
2348                                 remote_adv |= LPA_PAUSE_CAP;
2349                         if (flags & MR_LP_ADV_ASYM_PAUSE)
2350                                 remote_adv |= LPA_PAUSE_ASYM;
2351
2352                         tg3_setup_flow_control(tp, local_adv, remote_adv);
2353
2354                         tp->tg3_flags |= TG3_FLAG_GOT_SERDES_FLOWCTL;
2355                         current_link_up = 1;
2356                 }
2357                 for (i = 0; i < 30; i++) {
2358                         udelay(20);
2359                         tw32_f(MAC_STATUS,
2360                                (MAC_STATUS_SYNC_CHANGED |
2361                                 MAC_STATUS_CFG_CHANGED));
2362                         udelay(40);
2363                         if ((tr32(MAC_STATUS) &
2364                              (MAC_STATUS_SYNC_CHANGED |
2365                               MAC_STATUS_CFG_CHANGED)) == 0)
2366                                 break;
2367                 }
2368
2369                 mac_status = tr32(MAC_STATUS);
2370                 if (current_link_up == 0 &&
2371                     (mac_status & MAC_STATUS_PCS_SYNCED) &&
2372                     !(mac_status & MAC_STATUS_RCVD_CFG))
2373                         current_link_up = 1;
2374         } else {
2375                 /* Forcing 1000FD link up. */
2376                 current_link_up = 1;
2377                 tp->tg3_flags |= TG3_FLAG_GOT_SERDES_FLOWCTL;
2378
2379                 tw32_f(MAC_MODE, (tp->mac_mode | MAC_MODE_SEND_CONFIGS));
2380                 udelay(40);
2381         }
2382
2383 out:
2384         return current_link_up;
2385 }
2386
2387 static int tg3_setup_fiber_phy(struct tg3 *tp, int force_reset)
2388 {
2389         u32 orig_pause_cfg;
2390         u16 orig_active_speed;
2391         u8 orig_active_duplex;
2392         u32 mac_status;
2393         int current_link_up;
2394         int i;
2395
2396         orig_pause_cfg =
2397                 (tp->tg3_flags & (TG3_FLAG_RX_PAUSE |
2398                                   TG3_FLAG_TX_PAUSE));
2399         orig_active_speed = tp->link_config.active_speed;
2400         orig_active_duplex = tp->link_config.active_duplex;
2401
2402         if (!(tp->tg3_flags2 & TG3_FLG2_HW_AUTONEG) &&
2403             netif_carrier_ok(tp->dev) &&
2404             (tp->tg3_flags & TG3_FLAG_INIT_COMPLETE)) {
2405                 mac_status = tr32(MAC_STATUS);
2406                 mac_status &= (MAC_STATUS_PCS_SYNCED |
2407                                MAC_STATUS_SIGNAL_DET |
2408                                MAC_STATUS_CFG_CHANGED |
2409                                MAC_STATUS_RCVD_CFG);
2410                 if (mac_status == (MAC_STATUS_PCS_SYNCED |
2411                                    MAC_STATUS_SIGNAL_DET)) {
2412                         tw32_f(MAC_STATUS, (MAC_STATUS_SYNC_CHANGED |
2413                                             MAC_STATUS_CFG_CHANGED));
2414                         return 0;
2415                 }
2416         }
2417
2418         tw32_f(MAC_TX_AUTO_NEG, 0);
2419
2420         tp->mac_mode &= ~(MAC_MODE_PORT_MODE_MASK | MAC_MODE_HALF_DUPLEX);
2421         tp->mac_mode |= MAC_MODE_PORT_MODE_TBI;
2422         tw32_f(MAC_MODE, tp->mac_mode);
2423         udelay(40);
2424
2425         if (tp->phy_id == PHY_ID_BCM8002)
2426                 tg3_init_bcm8002(tp);
2427
2428         /* Enable link change event even when serdes polling.  */
2429         tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
2430         udelay(40);
2431
2432         current_link_up = 0;
2433         mac_status = tr32(MAC_STATUS);
2434
2435         if (tp->tg3_flags2 & TG3_FLG2_HW_AUTONEG)
2436                 current_link_up = tg3_setup_fiber_hw_autoneg(tp, mac_status);
2437         else
2438                 current_link_up = tg3_setup_fiber_by_hand(tp, mac_status);
2439
2440         tp->mac_mode &= ~MAC_MODE_LINK_POLARITY;
2441         tw32_f(MAC_MODE, tp->mac_mode);
2442         udelay(40);
2443
2444         tp->hw_status->status =
2445                 (SD_STATUS_UPDATED |
2446                  (tp->hw_status->status & ~SD_STATUS_LINK_CHG));
2447
2448         for (i = 0; i < 100; i++) {
2449                 tw32_f(MAC_STATUS, (MAC_STATUS_SYNC_CHANGED |
2450                                     MAC_STATUS_CFG_CHANGED));
2451                 udelay(5);
2452                 if ((tr32(MAC_STATUS) & (MAC_STATUS_SYNC_CHANGED |
2453                                          MAC_STATUS_CFG_CHANGED)) == 0)
2454                         break;
2455         }
2456
2457         mac_status = tr32(MAC_STATUS);
2458         if ((mac_status & MAC_STATUS_PCS_SYNCED) == 0) {
2459                 current_link_up = 0;
2460                 if (tp->link_config.autoneg == AUTONEG_ENABLE) {
2461                         tw32_f(MAC_MODE, (tp->mac_mode |
2462                                           MAC_MODE_SEND_CONFIGS));
2463                         udelay(1);
2464                         tw32_f(MAC_MODE, tp->mac_mode);
2465                 }
2466         }
2467
2468         if (current_link_up == 1) {
2469                 tp->link_config.active_speed = SPEED_1000;
2470                 tp->link_config.active_duplex = DUPLEX_FULL;
2471                 tw32(MAC_LED_CTRL, (tp->led_ctrl |
2472                                     LED_CTRL_LNKLED_OVERRIDE |
2473                                     LED_CTRL_1000MBPS_ON));
2474         } else {
2475                 tp->link_config.active_speed = SPEED_INVALID;
2476                 tp->link_config.active_duplex = DUPLEX_INVALID;
2477                 tw32(MAC_LED_CTRL, (tp->led_ctrl |
2478                                     LED_CTRL_LNKLED_OVERRIDE |
2479                                     LED_CTRL_TRAFFIC_OVERRIDE));
2480         }
2481
2482         if (current_link_up != netif_carrier_ok(tp->dev)) {
2483                 if (current_link_up)
2484                         netif_carrier_on(tp->dev);
2485                 else
2486                         netif_carrier_off(tp->dev);
2487                 tg3_link_report(tp);
2488         } else {
2489                 u32 now_pause_cfg =
2490                         tp->tg3_flags & (TG3_FLAG_RX_PAUSE |
2491                                          TG3_FLAG_TX_PAUSE);
2492                 if (orig_pause_cfg != now_pause_cfg ||
2493                     orig_active_speed != tp->link_config.active_speed ||
2494                     orig_active_duplex != tp->link_config.active_duplex)
2495                         tg3_link_report(tp);
2496         }
2497
2498         return 0;
2499 }
2500
2501 static int tg3_setup_phy(struct tg3 *tp, int force_reset)
2502 {
2503         int err;
2504
2505         if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) {
2506                 err = tg3_setup_fiber_phy(tp, force_reset);
2507         } else {
2508                 err = tg3_setup_copper_phy(tp, force_reset);
2509         }
2510
2511         if (tp->link_config.active_speed == SPEED_1000 &&
2512             tp->link_config.active_duplex == DUPLEX_HALF)
2513                 tw32(MAC_TX_LENGTHS,
2514                      ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
2515                       (6 << TX_LENGTHS_IPG_SHIFT) |
2516                       (0xff << TX_LENGTHS_SLOT_TIME_SHIFT)));
2517         else
2518                 tw32(MAC_TX_LENGTHS,
2519                      ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
2520                       (6 << TX_LENGTHS_IPG_SHIFT) |
2521                       (32 << TX_LENGTHS_SLOT_TIME_SHIFT)));
2522
2523         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
2524                 if (netif_carrier_ok(tp->dev)) {
2525                         tw32(HOSTCC_STAT_COAL_TICKS,
2526                              tp->coal.stats_block_coalesce_usecs);
2527                 } else {
2528                         tw32(HOSTCC_STAT_COAL_TICKS, 0);
2529                 }
2530         }
2531
2532         return err;
2533 }
2534
2535 /* Tigon3 never reports partial packet sends.  So we do not
2536  * need special logic to handle SKBs that have not had all
2537  * of their frags sent yet, like SunGEM does.
2538  */
2539 static void tg3_tx(struct tg3 *tp)
2540 {
2541         u32 hw_idx = tp->hw_status->idx[0].tx_consumer;
2542         u32 sw_idx = tp->tx_cons;
2543
2544         while (sw_idx != hw_idx) {
2545                 struct tx_ring_info *ri = &tp->tx_buffers[sw_idx];
2546                 struct sk_buff *skb = ri->skb;
2547                 int i;
2548
2549                 if (unlikely(skb == NULL))
2550                         BUG();
2551
2552                 pci_unmap_single(tp->pdev,
2553                                  pci_unmap_addr(ri, mapping),
2554                                  skb_headlen(skb),
2555                                  PCI_DMA_TODEVICE);
2556
2557                 ri->skb = NULL;
2558
2559                 sw_idx = NEXT_TX(sw_idx);
2560
2561                 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
2562                         if (unlikely(sw_idx == hw_idx))
2563                                 BUG();
2564
2565                         ri = &tp->tx_buffers[sw_idx];
2566                         if (unlikely(ri->skb != NULL))
2567                                 BUG();
2568
2569                         pci_unmap_page(tp->pdev,
2570                                        pci_unmap_addr(ri, mapping),
2571                                        skb_shinfo(skb)->frags[i].size,
2572                                        PCI_DMA_TODEVICE);
2573
2574                         sw_idx = NEXT_TX(sw_idx);
2575                 }
2576
2577                 dev_kfree_skb(skb);
2578         }
2579
2580         tp->tx_cons = sw_idx;
2581
2582         if (netif_queue_stopped(tp->dev) &&
2583             (TX_BUFFS_AVAIL(tp) > TG3_TX_WAKEUP_THRESH))
2584                 netif_wake_queue(tp->dev);
2585 }
2586
2587 /* Returns size of skb allocated or < 0 on error.
2588  *
2589  * We only need to fill in the address because the other members
2590  * of the RX descriptor are invariant, see tg3_init_rings.
2591  *
2592  * Note the purposeful assymetry of cpu vs. chip accesses.  For
2593  * posting buffers we only dirty the first cache line of the RX
2594  * descriptor (containing the address).  Whereas for the RX status
2595  * buffers the cpu only reads the last cacheline of the RX descriptor
2596  * (to fetch the error flags, vlan tag, checksum, and opaque cookie).
2597  */
2598 static int tg3_alloc_rx_skb(struct tg3 *tp, u32 opaque_key,
2599                             int src_idx, u32 dest_idx_unmasked)
2600 {
2601         struct tg3_rx_buffer_desc *desc;
2602         struct ring_info *map, *src_map;
2603         struct sk_buff *skb;
2604         dma_addr_t mapping;
2605         int skb_size, dest_idx;
2606
2607         src_map = NULL;
2608         switch (opaque_key) {
2609         case RXD_OPAQUE_RING_STD:
2610                 dest_idx = dest_idx_unmasked % TG3_RX_RING_SIZE;
2611                 desc = &tp->rx_std[dest_idx];
2612                 map = &tp->rx_std_buffers[dest_idx];
2613                 if (src_idx >= 0)
2614                         src_map = &tp->rx_std_buffers[src_idx];
2615                 skb_size = RX_PKT_BUF_SZ;
2616                 break;
2617
2618         case RXD_OPAQUE_RING_JUMBO:
2619                 dest_idx = dest_idx_unmasked % TG3_RX_JUMBO_RING_SIZE;
2620                 desc = &tp->rx_jumbo[dest_idx];
2621                 map = &tp->rx_jumbo_buffers[dest_idx];
2622                 if (src_idx >= 0)
2623                         src_map = &tp->rx_jumbo_buffers[src_idx];
2624                 skb_size = RX_JUMBO_PKT_BUF_SZ;
2625                 break;
2626
2627         default:
2628                 return -EINVAL;
2629         };
2630
2631         /* Do not overwrite any of the map or rp information
2632          * until we are sure we can commit to a new buffer.
2633          *
2634          * Callers depend upon this behavior and assume that
2635          * we leave everything unchanged if we fail.
2636          */
2637         skb = dev_alloc_skb(skb_size);
2638         if (skb == NULL)
2639                 return -ENOMEM;
2640
2641         skb->dev = tp->dev;
2642         skb_reserve(skb, tp->rx_offset);
2643
2644         mapping = pci_map_single(tp->pdev, skb->data,
2645                                  skb_size - tp->rx_offset,
2646                                  PCI_DMA_FROMDEVICE);
2647
2648         map->skb = skb;
2649         pci_unmap_addr_set(map, mapping, mapping);
2650
2651         if (src_map != NULL)
2652                 src_map->skb = NULL;
2653
2654         desc->addr_hi = ((u64)mapping >> 32);
2655         desc->addr_lo = ((u64)mapping & 0xffffffff);
2656
2657         return skb_size;
2658 }
2659
2660 /* We only need to move over in the address because the other
2661  * members of the RX descriptor are invariant.  See notes above
2662  * tg3_alloc_rx_skb for full details.
2663  */
2664 static void tg3_recycle_rx(struct tg3 *tp, u32 opaque_key,
2665                            int src_idx, u32 dest_idx_unmasked)
2666 {
2667         struct tg3_rx_buffer_desc *src_desc, *dest_desc;
2668         struct ring_info *src_map, *dest_map;
2669         int dest_idx;
2670
2671         switch (opaque_key) {
2672         case RXD_OPAQUE_RING_STD:
2673                 dest_idx = dest_idx_unmasked % TG3_RX_RING_SIZE;
2674                 dest_desc = &tp->rx_std[dest_idx];
2675                 dest_map = &tp->rx_std_buffers[dest_idx];
2676                 src_desc = &tp->rx_std[src_idx];
2677                 src_map = &tp->rx_std_buffers[src_idx];
2678                 break;
2679
2680         case RXD_OPAQUE_RING_JUMBO:
2681                 dest_idx = dest_idx_unmasked % TG3_RX_JUMBO_RING_SIZE;
2682                 dest_desc = &tp->rx_jumbo[dest_idx];
2683                 dest_map = &tp->rx_jumbo_buffers[dest_idx];
2684                 src_desc = &tp->rx_jumbo[src_idx];
2685                 src_map = &tp->rx_jumbo_buffers[src_idx];
2686                 break;
2687
2688         default:
2689                 return;
2690         };
2691
2692         dest_map->skb = src_map->skb;
2693         pci_unmap_addr_set(dest_map, mapping,
2694                            pci_unmap_addr(src_map, mapping));
2695         dest_desc->addr_hi = src_desc->addr_hi;
2696         dest_desc->addr_lo = src_desc->addr_lo;
2697
2698         src_map->skb = NULL;
2699 }
2700
2701 #if TG3_VLAN_TAG_USED
2702 static int tg3_vlan_rx(struct tg3 *tp, struct sk_buff *skb, u16 vlan_tag)
2703 {
2704         return vlan_hwaccel_receive_skb(skb, tp->vlgrp, vlan_tag);
2705 }
2706 #endif
2707
2708 /* The RX ring scheme is composed of multiple rings which post fresh
2709  * buffers to the chip, and one special ring the chip uses to report
2710  * status back to the host.
2711  *
2712  * The special ring reports the status of received packets to the
2713  * host.  The chip does not write into the original descriptor the
2714  * RX buffer was obtained from.  The chip simply takes the original
2715  * descriptor as provided by the host, updates the status and length
2716  * field, then writes this into the next status ring entry.
2717  *
2718  * Each ring the host uses to post buffers to the chip is described
2719  * by a TG3_BDINFO entry in the chips SRAM area.  When a packet arrives,
2720  * it is first placed into the on-chip ram.  When the packet's length
2721  * is known, it walks down the TG3_BDINFO entries to select the ring.
2722  * Each TG3_BDINFO specifies a MAXLEN field and the first TG3_BDINFO
2723  * which is within the range of the new packet's length is chosen.
2724  *
2725  * The "separate ring for rx status" scheme may sound queer, but it makes
2726  * sense from a cache coherency perspective.  If only the host writes
2727  * to the buffer post rings, and only the chip writes to the rx status
2728  * rings, then cache lines never move beyond shared-modified state.
2729  * If both the host and chip were to write into the same ring, cache line
2730  * eviction could occur since both entities want it in an exclusive state.
2731  */
2732 static int tg3_rx(struct tg3 *tp, int budget)
2733 {
2734         u32 work_mask;
2735         u32 sw_idx = tp->rx_rcb_ptr;
2736         u16 hw_idx;
2737         int received;
2738
2739         hw_idx = tp->hw_status->idx[0].rx_producer;
2740         /*
2741          * We need to order the read of hw_idx and the read of
2742          * the opaque cookie.
2743          */
2744         rmb();
2745         work_mask = 0;
2746         received = 0;
2747         while (sw_idx != hw_idx && budget > 0) {
2748                 struct tg3_rx_buffer_desc *desc = &tp->rx_rcb[sw_idx];
2749                 unsigned int len;
2750                 struct sk_buff *skb;
2751                 dma_addr_t dma_addr;
2752                 u32 opaque_key, desc_idx, *post_ptr;
2753
2754                 desc_idx = desc->opaque & RXD_OPAQUE_INDEX_MASK;
2755                 opaque_key = desc->opaque & RXD_OPAQUE_RING_MASK;
2756                 if (opaque_key == RXD_OPAQUE_RING_STD) {
2757                         dma_addr = pci_unmap_addr(&tp->rx_std_buffers[desc_idx],
2758                                                   mapping);
2759                         skb = tp->rx_std_buffers[desc_idx].skb;
2760                         post_ptr = &tp->rx_std_ptr;
2761                 } else if (opaque_key == RXD_OPAQUE_RING_JUMBO) {
2762                         dma_addr = pci_unmap_addr(&tp->rx_jumbo_buffers[desc_idx],
2763                                                   mapping);
2764                         skb = tp->rx_jumbo_buffers[desc_idx].skb;
2765                         post_ptr = &tp->rx_jumbo_ptr;
2766                 }
2767                 else {
2768                         goto next_pkt_nopost;
2769                 }
2770
2771                 work_mask |= opaque_key;
2772
2773                 if ((desc->err_vlan & RXD_ERR_MASK) != 0 &&
2774                     (desc->err_vlan != RXD_ERR_ODD_NIBBLE_RCVD_MII)) {
2775                 drop_it:
2776                         tg3_recycle_rx(tp, opaque_key,
2777                                        desc_idx, *post_ptr);
2778                 drop_it_no_recycle:
2779                         /* Other statistics kept track of by card. */
2780                         tp->net_stats.rx_dropped++;
2781                         goto next_pkt;
2782                 }
2783
2784                 len = ((desc->idx_len & RXD_LEN_MASK) >> RXD_LEN_SHIFT) - 4; /* omit crc */
2785
2786                 if (len > RX_COPY_THRESHOLD 
2787                         && tp->rx_offset == 2
2788                         /* rx_offset != 2 iff this is a 5701 card running
2789                          * in PCI-X mode [see tg3_get_invariants()] */
2790                 ) {
2791                         int skb_size;
2792
2793                         skb_size = tg3_alloc_rx_skb(tp, opaque_key,
2794                                                     desc_idx, *post_ptr);
2795                         if (skb_size < 0)
2796                                 goto drop_it;
2797
2798                         pci_unmap_single(tp->pdev, dma_addr,
2799                                          skb_size - tp->rx_offset,
2800                                          PCI_DMA_FROMDEVICE);
2801
2802                         skb_put(skb, len);
2803                 } else {
2804                         struct sk_buff *copy_skb;
2805
2806                         tg3_recycle_rx(tp, opaque_key,
2807                                        desc_idx, *post_ptr);
2808
2809                         copy_skb = dev_alloc_skb(len + 2);
2810                         if (copy_skb == NULL)
2811                                 goto drop_it_no_recycle;
2812
2813                         copy_skb->dev = tp->dev;
2814                         skb_reserve(copy_skb, 2);
2815                         skb_put(copy_skb, len);
2816                         pci_dma_sync_single_for_cpu(tp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE);
2817                         memcpy(copy_skb->data, skb->data, len);
2818                         pci_dma_sync_single_for_device(tp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE);
2819
2820                         /* We'll reuse the original ring buffer. */
2821                         skb = copy_skb;
2822                 }
2823
2824                 if ((tp->tg3_flags & TG3_FLAG_RX_CHECKSUMS) &&
2825                     (desc->type_flags & RXD_FLAG_TCPUDP_CSUM) &&
2826                     (((desc->ip_tcp_csum & RXD_TCPCSUM_MASK)
2827                       >> RXD_TCPCSUM_SHIFT) == 0xffff))
2828                         skb->ip_summed = CHECKSUM_UNNECESSARY;
2829                 else
2830                         skb->ip_summed = CHECKSUM_NONE;
2831
2832                 skb->protocol = eth_type_trans(skb, tp->dev);
2833 #if TG3_VLAN_TAG_USED
2834                 if (tp->vlgrp != NULL &&
2835                     desc->type_flags & RXD_FLAG_VLAN) {
2836                         tg3_vlan_rx(tp, skb,
2837                                     desc->err_vlan & RXD_VLAN_MASK);
2838                 } else
2839 #endif
2840                         netif_receive_skb(skb);
2841
2842                 tp->dev->last_rx = jiffies;
2843                 received++;
2844                 budget--;
2845
2846 next_pkt:
2847                 (*post_ptr)++;
2848 next_pkt_nopost:
2849                 sw_idx++;
2850                 sw_idx %= TG3_RX_RCB_RING_SIZE(tp);
2851
2852                 /* Refresh hw_idx to see if there is new work */
2853                 if (sw_idx == hw_idx) {
2854                         hw_idx = tp->hw_status->idx[0].rx_producer;
2855                         rmb();
2856                 }
2857         }
2858
2859         /* ACK the status ring. */
2860         tp->rx_rcb_ptr = sw_idx;
2861         tw32_rx_mbox(MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW, sw_idx);
2862
2863         /* Refill RX ring(s). */
2864         if (work_mask & RXD_OPAQUE_RING_STD) {
2865                 sw_idx = tp->rx_std_ptr % TG3_RX_RING_SIZE;
2866                 tw32_rx_mbox(MAILBOX_RCV_STD_PROD_IDX + TG3_64BIT_REG_LOW,
2867                              sw_idx);
2868         }
2869         if (work_mask & RXD_OPAQUE_RING_JUMBO) {
2870                 sw_idx = tp->rx_jumbo_ptr % TG3_RX_JUMBO_RING_SIZE;
2871                 tw32_rx_mbox(MAILBOX_RCV_JUMBO_PROD_IDX + TG3_64BIT_REG_LOW,
2872                              sw_idx);
2873         }
2874         mmiowb();
2875
2876         return received;
2877 }
2878
2879 static int tg3_poll(struct net_device *netdev, int *budget)
2880 {
2881         struct tg3 *tp = netdev_priv(netdev);
2882         struct tg3_hw_status *sblk = tp->hw_status;
2883         int done;
2884
2885         /* handle link change and other phy events */
2886         if (!(tp->tg3_flags &
2887               (TG3_FLAG_USE_LINKCHG_REG |
2888                TG3_FLAG_POLL_SERDES))) {
2889                 if (sblk->status & SD_STATUS_LINK_CHG) {
2890                         sblk->status = SD_STATUS_UPDATED |
2891                                 (sblk->status & ~SD_STATUS_LINK_CHG);
2892                         spin_lock(&tp->lock);
2893                         tg3_setup_phy(tp, 0);
2894                         spin_unlock(&tp->lock);
2895                 }
2896         }
2897
2898         /* run TX completion thread */
2899         if (sblk->idx[0].tx_consumer != tp->tx_cons) {
2900                 spin_lock(&tp->tx_lock);
2901                 tg3_tx(tp);
2902                 spin_unlock(&tp->tx_lock);
2903         }
2904
2905         /* run RX thread, within the bounds set by NAPI.
2906          * All RX "locking" is done by ensuring outside
2907          * code synchronizes with dev->poll()
2908          */
2909         if (sblk->idx[0].rx_producer != tp->rx_rcb_ptr) {
2910                 int orig_budget = *budget;
2911                 int work_done;
2912
2913                 if (orig_budget > netdev->quota)
2914                         orig_budget = netdev->quota;
2915
2916                 work_done = tg3_rx(tp, orig_budget);
2917
2918                 *budget -= work_done;
2919                 netdev->quota -= work_done;
2920         }
2921
2922         if (tp->tg3_flags & TG3_FLAG_TAGGED_STATUS)
2923                 tp->last_tag = sblk->status_tag;
2924         rmb();
2925         sblk->status &= ~SD_STATUS_UPDATED;
2926
2927         /* if no more work, tell net stack and NIC we're done */
2928         done = !tg3_has_work(tp);
2929         if (done) {
2930                 spin_lock(&tp->lock);
2931                 netif_rx_complete(netdev);
2932                 tg3_restart_ints(tp);
2933                 spin_unlock(&tp->lock);
2934         }
2935
2936         return (done ? 0 : 1);
2937 }
2938
2939 static void tg3_irq_quiesce(struct tg3 *tp)
2940 {
2941         BUG_ON(tp->irq_sync);
2942
2943         tp->irq_sync = 1;
2944         smp_mb();
2945
2946         synchronize_irq(tp->pdev->irq);
2947 }
2948
2949 static inline int tg3_irq_sync(struct tg3 *tp)
2950 {
2951         return tp->irq_sync;
2952 }
2953
2954 /* Fully shutdown all tg3 driver activity elsewhere in the system.
2955  * If irq_sync is non-zero, then the IRQ handler must be synchronized
2956  * with as well.  Most of the time, this is not necessary except when
2957  * shutting down the device.
2958  */
2959 static inline void tg3_full_lock(struct tg3 *tp, int irq_sync)
2960 {
2961         if (irq_sync)
2962                 tg3_irq_quiesce(tp);
2963         spin_lock_bh(&tp->lock);
2964         spin_lock(&tp->tx_lock);
2965 }
2966
2967 static inline void tg3_full_unlock(struct tg3 *tp)
2968 {
2969         spin_unlock(&tp->tx_lock);
2970         spin_unlock_bh(&tp->lock);
2971 }
2972
2973 /* MSI ISR - No need to check for interrupt sharing and no need to
2974  * flush status block and interrupt mailbox. PCI ordering rules
2975  * guarantee that MSI will arrive after the status block.
2976  */
2977 static irqreturn_t tg3_msi(int irq, void *dev_id, struct pt_regs *regs)
2978 {
2979         struct net_device *dev = dev_id;
2980         struct tg3 *tp = netdev_priv(dev);
2981         struct tg3_hw_status *sblk = tp->hw_status;
2982
2983         /*
2984          * Writing any value to intr-mbox-0 clears PCI INTA# and
2985          * chip-internal interrupt pending events.
2986          * Writing non-zero to intr-mbox-0 additional tells the
2987          * NIC to stop sending us irqs, engaging "in-intr-handler"
2988          * event coalescing.
2989          */
2990         tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
2991         tp->last_tag = sblk->status_tag;
2992         rmb();
2993         if (tg3_irq_sync(tp))
2994                 goto out;
2995         sblk->status &= ~SD_STATUS_UPDATED;
2996         if (likely(tg3_has_work(tp)))
2997                 netif_rx_schedule(dev);         /* schedule NAPI poll */
2998         else {
2999                 /* No work, re-enable interrupts.  */
3000                 tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
3001                              tp->last_tag << 24);
3002         }
3003 out:
3004         return IRQ_RETVAL(1);
3005 }
3006
3007 static irqreturn_t tg3_interrupt(int irq, void *dev_id, struct pt_regs *regs)
3008 {
3009         struct net_device *dev = dev_id;
3010         struct tg3 *tp = netdev_priv(dev);
3011         struct tg3_hw_status *sblk = tp->hw_status;
3012         unsigned int handled = 1;
3013
3014         /* In INTx mode, it is possible for the interrupt to arrive at
3015          * the CPU before the status block posted prior to the interrupt.
3016          * Reading the PCI State register will confirm whether the
3017          * interrupt is ours and will flush the status block.
3018          */
3019         if ((sblk->status & SD_STATUS_UPDATED) ||
3020             !(tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
3021                 /*
3022                  * Writing any value to intr-mbox-0 clears PCI INTA# and
3023                  * chip-internal interrupt pending events.
3024                  * Writing non-zero to intr-mbox-0 additional tells the
3025                  * NIC to stop sending us irqs, engaging "in-intr-handler"
3026                  * event coalescing.
3027                  */
3028                 tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
3029                              0x00000001);
3030                 if (tg3_irq_sync(tp))
3031                         goto out;
3032                 sblk->status &= ~SD_STATUS_UPDATED;
3033                 if (likely(tg3_has_work(tp)))
3034                         netif_rx_schedule(dev);         /* schedule NAPI poll */
3035                 else {
3036                         /* No work, shared interrupt perhaps?  re-enable
3037                          * interrupts, and flush that PCI write
3038                          */
3039                         tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
3040                                 0x00000000);
3041                         tr32(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW);
3042                 }
3043         } else {        /* shared interrupt */
3044                 handled = 0;
3045         }
3046 out:
3047         return IRQ_RETVAL(handled);
3048 }
3049
3050 static irqreturn_t tg3_interrupt_tagged(int irq, void *dev_id, struct pt_regs *regs)
3051 {
3052         struct net_device *dev = dev_id;
3053         struct tg3 *tp = netdev_priv(dev);
3054         struct tg3_hw_status *sblk = tp->hw_status;
3055         unsigned int handled = 1;
3056
3057         /* In INTx mode, it is possible for the interrupt to arrive at
3058          * the CPU before the status block posted prior to the interrupt.
3059          * Reading the PCI State register will confirm whether the
3060          * interrupt is ours and will flush the status block.
3061          */
3062         if ((sblk->status & SD_STATUS_UPDATED) ||
3063             !(tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
3064                 /*
3065                  * writing any value to intr-mbox-0 clears PCI INTA# and
3066                  * chip-internal interrupt pending events.
3067                  * writing non-zero to intr-mbox-0 additional tells the
3068                  * NIC to stop sending us irqs, engaging "in-intr-handler"
3069                  * event coalescing.
3070                  */
3071                 tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
3072                              0x00000001);
3073                 tp->last_tag = sblk->status_tag;
3074                 rmb();
3075                 if (tg3_irq_sync(tp))
3076                         goto out;
3077                 sblk->status &= ~SD_STATUS_UPDATED;
3078                 if (likely(tg3_has_work(tp)))
3079                         netif_rx_schedule(dev);         /* schedule NAPI poll */
3080                 else {
3081                         /* no work, shared interrupt perhaps?  re-enable
3082                          * interrupts, and flush that PCI write
3083                          */
3084                         tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
3085                                      tp->last_tag << 24);
3086                         tr32(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW);
3087                 }
3088         } else {        /* shared interrupt */
3089                 handled = 0;
3090         }
3091 out:
3092         return IRQ_RETVAL(handled);
3093 }
3094
3095 /* ISR for interrupt test */
3096 static irqreturn_t tg3_test_isr(int irq, void *dev_id,
3097                 struct pt_regs *regs)
3098 {
3099         struct net_device *dev = dev_id;
3100         struct tg3 *tp = netdev_priv(dev);
3101         struct tg3_hw_status *sblk = tp->hw_status;
3102
3103         if (sblk->status & SD_STATUS_UPDATED) {
3104                 tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
3105                              0x00000001);
3106                 return IRQ_RETVAL(1);
3107         }
3108         return IRQ_RETVAL(0);
3109 }
3110
3111 static int tg3_init_hw(struct tg3 *);
3112 static int tg3_halt(struct tg3 *, int, int);
3113
3114 #ifdef CONFIG_NET_POLL_CONTROLLER
3115 static void tg3_poll_controller(struct net_device *dev)
3116 {
3117         struct tg3 *tp = netdev_priv(dev);
3118
3119         tg3_interrupt(tp->pdev->irq, dev, NULL);
3120 }
3121 #endif
3122
3123 static void tg3_reset_task(void *_data)
3124 {
3125         struct tg3 *tp = _data;
3126         unsigned int restart_timer;
3127
3128         tg3_netif_stop(tp);
3129
3130         tg3_full_lock(tp, 1);
3131
3132         restart_timer = tp->tg3_flags2 & TG3_FLG2_RESTART_TIMER;
3133         tp->tg3_flags2 &= ~TG3_FLG2_RESTART_TIMER;
3134
3135         tg3_halt(tp, RESET_KIND_SHUTDOWN, 0);
3136         tg3_init_hw(tp);
3137
3138         tg3_netif_start(tp);
3139
3140         tg3_full_unlock(tp);
3141
3142         if (restart_timer)
3143                 mod_timer(&tp->timer, jiffies + 1);
3144 }
3145
3146 static void tg3_tx_timeout(struct net_device *dev)
3147 {
3148         struct tg3 *tp = netdev_priv(dev);
3149
3150         printk(KERN_ERR PFX "%s: transmit timed out, resetting\n",
3151                dev->name);
3152
3153         schedule_work(&tp->reset_task);
3154 }
3155
3156 static void tg3_set_txd(struct tg3 *, int, dma_addr_t, int, u32, u32);
3157
3158 static int tigon3_4gb_hwbug_workaround(struct tg3 *tp, struct sk_buff *skb,
3159                                        u32 guilty_entry, int guilty_len,
3160                                        u32 last_plus_one, u32 *start, u32 mss)
3161 {
3162         struct sk_buff *new_skb = skb_copy(skb, GFP_ATOMIC);
3163         dma_addr_t new_addr;
3164         u32 entry = *start;
3165         int i;
3166
3167         if (!new_skb) {
3168                 dev_kfree_skb(skb);
3169                 return -1;
3170         }
3171
3172         /* New SKB is guaranteed to be linear. */
3173         entry = *start;
3174         new_addr = pci_map_single(tp->pdev, new_skb->data, new_skb->len,
3175                                   PCI_DMA_TODEVICE);
3176         tg3_set_txd(tp, entry, new_addr, new_skb->len,
3177                     (skb->ip_summed == CHECKSUM_HW) ?
3178                     TXD_FLAG_TCPUDP_CSUM : 0, 1 | (mss << 1));
3179         *start = NEXT_TX(entry);
3180
3181         /* Now clean up the sw ring entries. */
3182         i = 0;
3183         while (entry != last_plus_one) {
3184                 int len;
3185
3186                 if (i == 0)
3187                         len = skb_headlen(skb);
3188                 else
3189                         len = skb_shinfo(skb)->frags[i-1].size;
3190                 pci_unmap_single(tp->pdev,
3191                                  pci_unmap_addr(&tp->tx_buffers[entry], mapping),
3192                                  len, PCI_DMA_TODEVICE);
3193                 if (i == 0) {
3194                         tp->tx_buffers[entry].skb = new_skb;
3195                         pci_unmap_addr_set(&tp->tx_buffers[entry], mapping, new_addr);
3196                 } else {
3197                         tp->tx_buffers[entry].skb = NULL;
3198                 }
3199                 entry = NEXT_TX(entry);
3200                 i++;
3201         }
3202
3203         dev_kfree_skb(skb);
3204
3205         return 0;
3206 }
3207
3208 static void tg3_set_txd(struct tg3 *tp, int entry,
3209                         dma_addr_t mapping, int len, u32 flags,
3210                         u32 mss_and_is_end)
3211 {
3212         struct tg3_tx_buffer_desc *txd = &tp->tx_ring[entry];
3213         int is_end = (mss_and_is_end & 0x1);
3214         u32 mss = (mss_and_is_end >> 1);
3215         u32 vlan_tag = 0;
3216
3217         if (is_end)
3218                 flags |= TXD_FLAG_END;
3219         if (flags & TXD_FLAG_VLAN) {
3220                 vlan_tag = flags >> 16;
3221                 flags &= 0xffff;
3222         }
3223         vlan_tag |= (mss << TXD_MSS_SHIFT);
3224
3225         txd->addr_hi = ((u64) mapping >> 32);
3226         txd->addr_lo = ((u64) mapping & 0xffffffff);
3227         txd->len_flags = (len << TXD_LEN_SHIFT) | flags;
3228         txd->vlan_tag = vlan_tag << TXD_VLAN_TAG_SHIFT;
3229 }
3230
3231 static inline int tg3_4g_overflow_test(dma_addr_t mapping, int len)
3232 {
3233         u32 base = (u32) mapping & 0xffffffff;
3234
3235         return ((base > 0xffffdcc0) &&
3236                 (base + len + 8 < base));
3237 }
3238
3239 static int tg3_start_xmit(struct sk_buff *skb, struct net_device *dev)
3240 {
3241         struct tg3 *tp = netdev_priv(dev);
3242         dma_addr_t mapping;
3243         unsigned int i;
3244         u32 len, entry, base_flags, mss;
3245         int would_hit_hwbug;
3246
3247