Automatic merge of master.kernel.org:/home/rmk/linux-2.6-rmk.git
[linux-2.6.git] / drivers / net / tg3.c
1 /*
2  * tg3.c: Broadcom Tigon3 ethernet driver.
3  *
4  * Copyright (C) 2001, 2002, 2003, 2004 David S. Miller (davem@redhat.com)
5  * Copyright (C) 2001, 2002, 2003 Jeff Garzik (jgarzik@pobox.com)
6  * Copyright (C) 2004 Sun Microsystems Inc.
7  * Copyright (C) 2005 Broadcom Corporation.
8  *
9  * Firmware is:
10  *      Copyright (C) 2000-2003 Broadcom Corporation.
11  */
12
13 #include <linux/config.h>
14
15 #include <linux/module.h>
16 #include <linux/moduleparam.h>
17 #include <linux/kernel.h>
18 #include <linux/types.h>
19 #include <linux/compiler.h>
20 #include <linux/slab.h>
21 #include <linux/delay.h>
22 #include <linux/init.h>
23 #include <linux/ioport.h>
24 #include <linux/pci.h>
25 #include <linux/netdevice.h>
26 #include <linux/etherdevice.h>
27 #include <linux/skbuff.h>
28 #include <linux/ethtool.h>
29 #include <linux/mii.h>
30 #include <linux/if_vlan.h>
31 #include <linux/ip.h>
32 #include <linux/tcp.h>
33 #include <linux/workqueue.h>
34
35 #include <net/checksum.h>
36
37 #include <asm/system.h>
38 #include <asm/io.h>
39 #include <asm/byteorder.h>
40 #include <asm/uaccess.h>
41
42 #ifdef CONFIG_SPARC64
43 #include <asm/idprom.h>
44 #include <asm/oplib.h>
45 #include <asm/pbm.h>
46 #endif
47
48 #if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE)
49 #define TG3_VLAN_TAG_USED 1
50 #else
51 #define TG3_VLAN_TAG_USED 0
52 #endif
53
54 #ifdef NETIF_F_TSO
55 #define TG3_TSO_SUPPORT 1
56 #else
57 #define TG3_TSO_SUPPORT 0
58 #endif
59
60 #include "tg3.h"
61
62 #define DRV_MODULE_NAME         "tg3"
63 #define PFX DRV_MODULE_NAME     ": "
64 #define DRV_MODULE_VERSION      "3.26"
65 #define DRV_MODULE_RELDATE      "April 24, 2005"
66
67 #define TG3_DEF_MAC_MODE        0
68 #define TG3_DEF_RX_MODE         0
69 #define TG3_DEF_TX_MODE         0
70 #define TG3_DEF_MSG_ENABLE        \
71         (NETIF_MSG_DRV          | \
72          NETIF_MSG_PROBE        | \
73          NETIF_MSG_LINK         | \
74          NETIF_MSG_TIMER        | \
75          NETIF_MSG_IFDOWN       | \
76          NETIF_MSG_IFUP         | \
77          NETIF_MSG_RX_ERR       | \
78          NETIF_MSG_TX_ERR)
79
80 /* length of time before we decide the hardware is borked,
81  * and dev->tx_timeout() should be called to fix the problem
82  */
83 #define TG3_TX_TIMEOUT                  (5 * HZ)
84
85 /* hardware minimum and maximum for a single frame's data payload */
86 #define TG3_MIN_MTU                     60
87 #define TG3_MAX_MTU(tp) \
88         (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS) ? 9000 : 1500)
89
90 /* These numbers seem to be hard coded in the NIC firmware somehow.
91  * You can't change the ring sizes, but you can change where you place
92  * them in the NIC onboard memory.
93  */
94 #define TG3_RX_RING_SIZE                512
95 #define TG3_DEF_RX_RING_PENDING         200
96 #define TG3_RX_JUMBO_RING_SIZE          256
97 #define TG3_DEF_RX_JUMBO_RING_PENDING   100
98
99 /* Do not place this n-ring entries value into the tp struct itself,
100  * we really want to expose these constants to GCC so that modulo et
101  * al.  operations are done with shifts and masks instead of with
102  * hw multiply/modulo instructions.  Another solution would be to
103  * replace things like '% foo' with '& (foo - 1)'.
104  */
105 #define TG3_RX_RCB_RING_SIZE(tp)        \
106         ((tp->tg3_flags2 & TG3_FLG2_5705_PLUS) ?  512 : 1024)
107
108 #define TG3_TX_RING_SIZE                512
109 #define TG3_DEF_TX_RING_PENDING         (TG3_TX_RING_SIZE - 1)
110
111 #define TG3_RX_RING_BYTES       (sizeof(struct tg3_rx_buffer_desc) * \
112                                  TG3_RX_RING_SIZE)
113 #define TG3_RX_JUMBO_RING_BYTES (sizeof(struct tg3_rx_buffer_desc) * \
114                                  TG3_RX_JUMBO_RING_SIZE)
115 #define TG3_RX_RCB_RING_BYTES(tp) (sizeof(struct tg3_rx_buffer_desc) * \
116                                    TG3_RX_RCB_RING_SIZE(tp))
117 #define TG3_TX_RING_BYTES       (sizeof(struct tg3_tx_buffer_desc) * \
118                                  TG3_TX_RING_SIZE)
119 #define TX_RING_GAP(TP) \
120         (TG3_TX_RING_SIZE - (TP)->tx_pending)
121 #define TX_BUFFS_AVAIL(TP)                                              \
122         (((TP)->tx_cons <= (TP)->tx_prod) ?                             \
123           (TP)->tx_cons + (TP)->tx_pending - (TP)->tx_prod :            \
124           (TP)->tx_cons - (TP)->tx_prod - TX_RING_GAP(TP))
125 #define NEXT_TX(N)              (((N) + 1) & (TG3_TX_RING_SIZE - 1))
126
127 #define RX_PKT_BUF_SZ           (1536 + tp->rx_offset + 64)
128 #define RX_JUMBO_PKT_BUF_SZ     (9046 + tp->rx_offset + 64)
129
130 /* minimum number of free TX descriptors required to wake up TX process */
131 #define TG3_TX_WAKEUP_THRESH            (TG3_TX_RING_SIZE / 4)
132
133 /* number of ETHTOOL_GSTATS u64's */
134 #define TG3_NUM_STATS           (sizeof(struct tg3_ethtool_stats)/sizeof(u64))
135
136 static char version[] __devinitdata =
137         DRV_MODULE_NAME ".c:v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
138
139 MODULE_AUTHOR("David S. Miller (davem@redhat.com) and Jeff Garzik (jgarzik@pobox.com)");
140 MODULE_DESCRIPTION("Broadcom Tigon3 ethernet driver");
141 MODULE_LICENSE("GPL");
142 MODULE_VERSION(DRV_MODULE_VERSION);
143
144 static int tg3_debug = -1;      /* -1 == use TG3_DEF_MSG_ENABLE as value */
145 module_param(tg3_debug, int, 0);
146 MODULE_PARM_DESC(tg3_debug, "Tigon3 bitmapped debugging message enable value");
147
148 static struct pci_device_id tg3_pci_tbl[] = {
149         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5700,
150           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
151         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5701,
152           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
153         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702,
154           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
155         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703,
156           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
157         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704,
158           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
159         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702FE,
160           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
161         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705,
162           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
163         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705_2,
164           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
165         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705M,
166           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
167         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705M_2,
168           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
169         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702X,
170           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
171         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703X,
172           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
173         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704S,
174           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
175         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702A3,
176           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
177         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703A3,
178           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
179         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5782,
180           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
181         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5788,
182           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
183         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5789,
184           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
185         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5901,
186           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
187         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5901_2,
188           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
189         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704S_2,
190           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
191         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705F,
192           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
193         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5720,
194           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
195         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5721,
196           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
197         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5750,
198           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
199         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751,
200           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
201         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5750M,
202           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
203         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751M,
204           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
205         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751F,
206           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
207         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5752,
208           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
209         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753,
210           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
211         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753M,
212           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
213         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753F,
214           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
215         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5781,
216           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
217         { PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9DXX,
218           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
219         { PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9MXX,
220           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
221         { PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1000,
222           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
223         { PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1001,
224           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
225         { PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1003,
226           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
227         { PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC9100,
228           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
229         { PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_TIGON3,
230           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
231         { 0, }
232 };
233
234 MODULE_DEVICE_TABLE(pci, tg3_pci_tbl);
235
236 static struct {
237         const char string[ETH_GSTRING_LEN];
238 } ethtool_stats_keys[TG3_NUM_STATS] = {
239         { "rx_octets" },
240         { "rx_fragments" },
241         { "rx_ucast_packets" },
242         { "rx_mcast_packets" },
243         { "rx_bcast_packets" },
244         { "rx_fcs_errors" },
245         { "rx_align_errors" },
246         { "rx_xon_pause_rcvd" },
247         { "rx_xoff_pause_rcvd" },
248         { "rx_mac_ctrl_rcvd" },
249         { "rx_xoff_entered" },
250         { "rx_frame_too_long_errors" },
251         { "rx_jabbers" },
252         { "rx_undersize_packets" },
253         { "rx_in_length_errors" },
254         { "rx_out_length_errors" },
255         { "rx_64_or_less_octet_packets" },
256         { "rx_65_to_127_octet_packets" },
257         { "rx_128_to_255_octet_packets" },
258         { "rx_256_to_511_octet_packets" },
259         { "rx_512_to_1023_octet_packets" },
260         { "rx_1024_to_1522_octet_packets" },
261         { "rx_1523_to_2047_octet_packets" },
262         { "rx_2048_to_4095_octet_packets" },
263         { "rx_4096_to_8191_octet_packets" },
264         { "rx_8192_to_9022_octet_packets" },
265
266         { "tx_octets" },
267         { "tx_collisions" },
268
269         { "tx_xon_sent" },
270         { "tx_xoff_sent" },
271         { "tx_flow_control" },
272         { "tx_mac_errors" },
273         { "tx_single_collisions" },
274         { "tx_mult_collisions" },
275         { "tx_deferred" },
276         { "tx_excessive_collisions" },
277         { "tx_late_collisions" },
278         { "tx_collide_2times" },
279         { "tx_collide_3times" },
280         { "tx_collide_4times" },
281         { "tx_collide_5times" },
282         { "tx_collide_6times" },
283         { "tx_collide_7times" },
284         { "tx_collide_8times" },
285         { "tx_collide_9times" },
286         { "tx_collide_10times" },
287         { "tx_collide_11times" },
288         { "tx_collide_12times" },
289         { "tx_collide_13times" },
290         { "tx_collide_14times" },
291         { "tx_collide_15times" },
292         { "tx_ucast_packets" },
293         { "tx_mcast_packets" },
294         { "tx_bcast_packets" },
295         { "tx_carrier_sense_errors" },
296         { "tx_discards" },
297         { "tx_errors" },
298
299         { "dma_writeq_full" },
300         { "dma_write_prioq_full" },
301         { "rxbds_empty" },
302         { "rx_discards" },
303         { "rx_errors" },
304         { "rx_threshold_hit" },
305
306         { "dma_readq_full" },
307         { "dma_read_prioq_full" },
308         { "tx_comp_queue_full" },
309
310         { "ring_set_send_prod_index" },
311         { "ring_status_update" },
312         { "nic_irqs" },
313         { "nic_avoided_irqs" },
314         { "nic_tx_threshold_hit" }
315 };
316
317 static void tg3_write_indirect_reg32(struct tg3 *tp, u32 off, u32 val)
318 {
319         if ((tp->tg3_flags & TG3_FLAG_PCIX_TARGET_HWBUG) != 0) {
320                 unsigned long flags;
321
322                 spin_lock_irqsave(&tp->indirect_lock, flags);
323                 pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off);
324                 pci_write_config_dword(tp->pdev, TG3PCI_REG_DATA, val);
325                 spin_unlock_irqrestore(&tp->indirect_lock, flags);
326         } else {
327                 writel(val, tp->regs + off);
328                 if ((tp->tg3_flags & TG3_FLAG_5701_REG_WRITE_BUG) != 0)
329                         readl(tp->regs + off);
330         }
331 }
332
333 static void _tw32_flush(struct tg3 *tp, u32 off, u32 val)
334 {
335         if ((tp->tg3_flags & TG3_FLAG_PCIX_TARGET_HWBUG) != 0) {
336                 unsigned long flags;
337
338                 spin_lock_irqsave(&tp->indirect_lock, flags);
339                 pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off);
340                 pci_write_config_dword(tp->pdev, TG3PCI_REG_DATA, val);
341                 spin_unlock_irqrestore(&tp->indirect_lock, flags);
342         } else {
343                 void __iomem *dest = tp->regs + off;
344                 writel(val, dest);
345                 readl(dest);    /* always flush PCI write */
346         }
347 }
348
349 static inline void _tw32_rx_mbox(struct tg3 *tp, u32 off, u32 val)
350 {
351         void __iomem *mbox = tp->regs + off;
352         writel(val, mbox);
353         if (tp->tg3_flags & TG3_FLAG_MBOX_WRITE_REORDER)
354                 readl(mbox);
355 }
356
357 static inline void _tw32_tx_mbox(struct tg3 *tp, u32 off, u32 val)
358 {
359         void __iomem *mbox = tp->regs + off;
360         writel(val, mbox);
361         if (tp->tg3_flags & TG3_FLAG_TXD_MBOX_HWBUG)
362                 writel(val, mbox);
363         if (tp->tg3_flags & TG3_FLAG_MBOX_WRITE_REORDER)
364                 readl(mbox);
365 }
366
367 #define tw32_mailbox(reg, val)  writel(((val) & 0xffffffff), tp->regs + (reg))
368 #define tw32_rx_mbox(reg, val)  _tw32_rx_mbox(tp, reg, val)
369 #define tw32_tx_mbox(reg, val)  _tw32_tx_mbox(tp, reg, val)
370
371 #define tw32(reg,val)           tg3_write_indirect_reg32(tp,(reg),(val))
372 #define tw32_f(reg,val)         _tw32_flush(tp,(reg),(val))
373 #define tw16(reg,val)           writew(((val) & 0xffff), tp->regs + (reg))
374 #define tw8(reg,val)            writeb(((val) & 0xff), tp->regs + (reg))
375 #define tr32(reg)               readl(tp->regs + (reg))
376 #define tr16(reg)               readw(tp->regs + (reg))
377 #define tr8(reg)                readb(tp->regs + (reg))
378
379 static void tg3_write_mem(struct tg3 *tp, u32 off, u32 val)
380 {
381         unsigned long flags;
382
383         spin_lock_irqsave(&tp->indirect_lock, flags);
384         pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off);
385         pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
386
387         /* Always leave this as zero. */
388         pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
389         spin_unlock_irqrestore(&tp->indirect_lock, flags);
390 }
391
392 static void tg3_read_mem(struct tg3 *tp, u32 off, u32 *val)
393 {
394         unsigned long flags;
395
396         spin_lock_irqsave(&tp->indirect_lock, flags);
397         pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off);
398         pci_read_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
399
400         /* Always leave this as zero. */
401         pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
402         spin_unlock_irqrestore(&tp->indirect_lock, flags);
403 }
404
405 static void tg3_disable_ints(struct tg3 *tp)
406 {
407         tw32(TG3PCI_MISC_HOST_CTRL,
408              (tp->misc_host_ctrl | MISC_HOST_CTRL_MASK_PCI_INT));
409         tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
410         tr32(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW);
411 }
412
413 static inline void tg3_cond_int(struct tg3 *tp)
414 {
415         if (tp->hw_status->status & SD_STATUS_UPDATED)
416                 tw32(GRC_LOCAL_CTRL, tp->grc_local_ctrl | GRC_LCLCTRL_SETINT);
417 }
418
419 static void tg3_enable_ints(struct tg3 *tp)
420 {
421         tw32(TG3PCI_MISC_HOST_CTRL,
422              (tp->misc_host_ctrl & ~MISC_HOST_CTRL_MASK_PCI_INT));
423         tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000000);
424         tr32(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW);
425
426         tg3_cond_int(tp);
427 }
428
429 static inline unsigned int tg3_has_work(struct tg3 *tp)
430 {
431         struct tg3_hw_status *sblk = tp->hw_status;
432         unsigned int work_exists = 0;
433
434         /* check for phy events */
435         if (!(tp->tg3_flags &
436               (TG3_FLAG_USE_LINKCHG_REG |
437                TG3_FLAG_POLL_SERDES))) {
438                 if (sblk->status & SD_STATUS_LINK_CHG)
439                         work_exists = 1;
440         }
441         /* check for RX/TX work to do */
442         if (sblk->idx[0].tx_consumer != tp->tx_cons ||
443             sblk->idx[0].rx_producer != tp->rx_rcb_ptr)
444                 work_exists = 1;
445
446         return work_exists;
447 }
448
449 /* tg3_restart_ints
450  *  similar to tg3_enable_ints, but it accurately determines whether there
451  *  is new work pending and can return without flushing the PIO write
452  *  which reenables interrupts 
453  */
454 static void tg3_restart_ints(struct tg3 *tp)
455 {
456         tw32(TG3PCI_MISC_HOST_CTRL,
457                 (tp->misc_host_ctrl & ~MISC_HOST_CTRL_MASK_PCI_INT));
458         tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000000);
459         mmiowb();
460
461         if (tg3_has_work(tp))
462                 tw32(HOSTCC_MODE, tp->coalesce_mode |
463                      (HOSTCC_MODE_ENABLE | HOSTCC_MODE_NOW));
464 }
465
466 static inline void tg3_netif_stop(struct tg3 *tp)
467 {
468         netif_poll_disable(tp->dev);
469         netif_tx_disable(tp->dev);
470 }
471
472 static inline void tg3_netif_start(struct tg3 *tp)
473 {
474         netif_wake_queue(tp->dev);
475         /* NOTE: unconditional netif_wake_queue is only appropriate
476          * so long as all callers are assured to have free tx slots
477          * (such as after tg3_init_hw)
478          */
479         netif_poll_enable(tp->dev);
480         tg3_cond_int(tp);
481 }
482
483 static void tg3_switch_clocks(struct tg3 *tp)
484 {
485         u32 clock_ctrl = tr32(TG3PCI_CLOCK_CTRL);
486         u32 orig_clock_ctrl;
487
488         orig_clock_ctrl = clock_ctrl;
489         clock_ctrl &= (CLOCK_CTRL_FORCE_CLKRUN |
490                        CLOCK_CTRL_CLKRUN_OENABLE |
491                        0x1f);
492         tp->pci_clock_ctrl = clock_ctrl;
493
494         if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
495                 if (orig_clock_ctrl & CLOCK_CTRL_625_CORE) {
496                         tw32_f(TG3PCI_CLOCK_CTRL,
497                                clock_ctrl | CLOCK_CTRL_625_CORE);
498                         udelay(40);
499                 }
500         } else if ((orig_clock_ctrl & CLOCK_CTRL_44MHZ_CORE) != 0) {
501                 tw32_f(TG3PCI_CLOCK_CTRL,
502                      clock_ctrl |
503                      (CLOCK_CTRL_44MHZ_CORE | CLOCK_CTRL_ALTCLK));
504                 udelay(40);
505                 tw32_f(TG3PCI_CLOCK_CTRL,
506                      clock_ctrl | (CLOCK_CTRL_ALTCLK));
507                 udelay(40);
508         }
509         tw32_f(TG3PCI_CLOCK_CTRL, clock_ctrl);
510         udelay(40);
511 }
512
513 #define PHY_BUSY_LOOPS  5000
514
515 static int tg3_readphy(struct tg3 *tp, int reg, u32 *val)
516 {
517         u32 frame_val;
518         unsigned int loops;
519         int ret;
520
521         if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
522                 tw32_f(MAC_MI_MODE,
523                      (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
524                 udelay(80);
525         }
526
527         *val = 0x0;
528
529         frame_val  = ((PHY_ADDR << MI_COM_PHY_ADDR_SHIFT) &
530                       MI_COM_PHY_ADDR_MASK);
531         frame_val |= ((reg << MI_COM_REG_ADDR_SHIFT) &
532                       MI_COM_REG_ADDR_MASK);
533         frame_val |= (MI_COM_CMD_READ | MI_COM_START);
534         
535         tw32_f(MAC_MI_COM, frame_val);
536
537         loops = PHY_BUSY_LOOPS;
538         while (loops != 0) {
539                 udelay(10);
540                 frame_val = tr32(MAC_MI_COM);
541
542                 if ((frame_val & MI_COM_BUSY) == 0) {
543                         udelay(5);
544                         frame_val = tr32(MAC_MI_COM);
545                         break;
546                 }
547                 loops -= 1;
548         }
549
550         ret = -EBUSY;
551         if (loops != 0) {
552                 *val = frame_val & MI_COM_DATA_MASK;
553                 ret = 0;
554         }
555
556         if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
557                 tw32_f(MAC_MI_MODE, tp->mi_mode);
558                 udelay(80);
559         }
560
561         return ret;
562 }
563
564 static int tg3_writephy(struct tg3 *tp, int reg, u32 val)
565 {
566         u32 frame_val;
567         unsigned int loops;
568         int ret;
569
570         if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
571                 tw32_f(MAC_MI_MODE,
572                      (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
573                 udelay(80);
574         }
575
576         frame_val  = ((PHY_ADDR << MI_COM_PHY_ADDR_SHIFT) &
577                       MI_COM_PHY_ADDR_MASK);
578         frame_val |= ((reg << MI_COM_REG_ADDR_SHIFT) &
579                       MI_COM_REG_ADDR_MASK);
580         frame_val |= (val & MI_COM_DATA_MASK);
581         frame_val |= (MI_COM_CMD_WRITE | MI_COM_START);
582         
583         tw32_f(MAC_MI_COM, frame_val);
584
585         loops = PHY_BUSY_LOOPS;
586         while (loops != 0) {
587                 udelay(10);
588                 frame_val = tr32(MAC_MI_COM);
589                 if ((frame_val & MI_COM_BUSY) == 0) {
590                         udelay(5);
591                         frame_val = tr32(MAC_MI_COM);
592                         break;
593                 }
594                 loops -= 1;
595         }
596
597         ret = -EBUSY;
598         if (loops != 0)
599                 ret = 0;
600
601         if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
602                 tw32_f(MAC_MI_MODE, tp->mi_mode);
603                 udelay(80);
604         }
605
606         return ret;
607 }
608
609 static void tg3_phy_set_wirespeed(struct tg3 *tp)
610 {
611         u32 val;
612
613         if (tp->tg3_flags2 & TG3_FLG2_NO_ETH_WIRE_SPEED)
614                 return;
615
616         if (!tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x7007) &&
617             !tg3_readphy(tp, MII_TG3_AUX_CTRL, &val))
618                 tg3_writephy(tp, MII_TG3_AUX_CTRL,
619                              (val | (1 << 15) | (1 << 4)));
620 }
621
622 static int tg3_bmcr_reset(struct tg3 *tp)
623 {
624         u32 phy_control;
625         int limit, err;
626
627         /* OK, reset it, and poll the BMCR_RESET bit until it
628          * clears or we time out.
629          */
630         phy_control = BMCR_RESET;
631         err = tg3_writephy(tp, MII_BMCR, phy_control);
632         if (err != 0)
633                 return -EBUSY;
634
635         limit = 5000;
636         while (limit--) {
637                 err = tg3_readphy(tp, MII_BMCR, &phy_control);
638                 if (err != 0)
639                         return -EBUSY;
640
641                 if ((phy_control & BMCR_RESET) == 0) {
642                         udelay(40);
643                         break;
644                 }
645                 udelay(10);
646         }
647         if (limit <= 0)
648                 return -EBUSY;
649
650         return 0;
651 }
652
653 static int tg3_wait_macro_done(struct tg3 *tp)
654 {
655         int limit = 100;
656
657         while (limit--) {
658                 u32 tmp32;
659
660                 if (!tg3_readphy(tp, 0x16, &tmp32)) {
661                         if ((tmp32 & 0x1000) == 0)
662                                 break;
663                 }
664         }
665         if (limit <= 0)
666                 return -EBUSY;
667
668         return 0;
669 }
670
671 static int tg3_phy_write_and_check_testpat(struct tg3 *tp, int *resetp)
672 {
673         static const u32 test_pat[4][6] = {
674         { 0x00005555, 0x00000005, 0x00002aaa, 0x0000000a, 0x00003456, 0x00000003 },
675         { 0x00002aaa, 0x0000000a, 0x00003333, 0x00000003, 0x0000789a, 0x00000005 },
676         { 0x00005a5a, 0x00000005, 0x00002a6a, 0x0000000a, 0x00001bcd, 0x00000003 },
677         { 0x00002a5a, 0x0000000a, 0x000033c3, 0x00000003, 0x00002ef1, 0x00000005 }
678         };
679         int chan;
680
681         for (chan = 0; chan < 4; chan++) {
682                 int i;
683
684                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
685                              (chan * 0x2000) | 0x0200);
686                 tg3_writephy(tp, 0x16, 0x0002);
687
688                 for (i = 0; i < 6; i++)
689                         tg3_writephy(tp, MII_TG3_DSP_RW_PORT,
690                                      test_pat[chan][i]);
691
692                 tg3_writephy(tp, 0x16, 0x0202);
693                 if (tg3_wait_macro_done(tp)) {
694                         *resetp = 1;
695                         return -EBUSY;
696                 }
697
698                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
699                              (chan * 0x2000) | 0x0200);
700                 tg3_writephy(tp, 0x16, 0x0082);
701                 if (tg3_wait_macro_done(tp)) {
702                         *resetp = 1;
703                         return -EBUSY;
704                 }
705
706                 tg3_writephy(tp, 0x16, 0x0802);
707                 if (tg3_wait_macro_done(tp)) {
708                         *resetp = 1;
709                         return -EBUSY;
710                 }
711
712                 for (i = 0; i < 6; i += 2) {
713                         u32 low, high;
714
715                         if (tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &low) ||
716                             tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &high) ||
717                             tg3_wait_macro_done(tp)) {
718                                 *resetp = 1;
719                                 return -EBUSY;
720                         }
721                         low &= 0x7fff;
722                         high &= 0x000f;
723                         if (low != test_pat[chan][i] ||
724                             high != test_pat[chan][i+1]) {
725                                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000b);
726                                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x4001);
727                                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x4005);
728
729                                 return -EBUSY;
730                         }
731                 }
732         }
733
734         return 0;
735 }
736
737 static int tg3_phy_reset_chanpat(struct tg3 *tp)
738 {
739         int chan;
740
741         for (chan = 0; chan < 4; chan++) {
742                 int i;
743
744                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
745                              (chan * 0x2000) | 0x0200);
746                 tg3_writephy(tp, 0x16, 0x0002);
747                 for (i = 0; i < 6; i++)
748                         tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x000);
749                 tg3_writephy(tp, 0x16, 0x0202);
750                 if (tg3_wait_macro_done(tp))
751                         return -EBUSY;
752         }
753
754         return 0;
755 }
756
757 static int tg3_phy_reset_5703_4_5(struct tg3 *tp)
758 {
759         u32 reg32, phy9_orig;
760         int retries, do_phy_reset, err;
761
762         retries = 10;
763         do_phy_reset = 1;
764         do {
765                 if (do_phy_reset) {
766                         err = tg3_bmcr_reset(tp);
767                         if (err)
768                                 return err;
769                         do_phy_reset = 0;
770                 }
771
772                 /* Disable transmitter and interrupt.  */
773                 if (tg3_readphy(tp, MII_TG3_EXT_CTRL, &reg32))
774                         continue;
775
776                 reg32 |= 0x3000;
777                 tg3_writephy(tp, MII_TG3_EXT_CTRL, reg32);
778
779                 /* Set full-duplex, 1000 mbps.  */
780                 tg3_writephy(tp, MII_BMCR,
781                              BMCR_FULLDPLX | TG3_BMCR_SPEED1000);
782
783                 /* Set to master mode.  */
784                 if (tg3_readphy(tp, MII_TG3_CTRL, &phy9_orig))
785                         continue;
786
787                 tg3_writephy(tp, MII_TG3_CTRL,
788                              (MII_TG3_CTRL_AS_MASTER |
789                               MII_TG3_CTRL_ENABLE_AS_MASTER));
790
791                 /* Enable SM_DSP_CLOCK and 6dB.  */
792                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0c00);
793
794                 /* Block the PHY control access.  */
795                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8005);
796                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0800);
797
798                 err = tg3_phy_write_and_check_testpat(tp, &do_phy_reset);
799                 if (!err)
800                         break;
801         } while (--retries);
802
803         err = tg3_phy_reset_chanpat(tp);
804         if (err)
805                 return err;
806
807         tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8005);
808         tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0000);
809
810         tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8200);
811         tg3_writephy(tp, 0x16, 0x0000);
812
813         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
814             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
815                 /* Set Extended packet length bit for jumbo frames */
816                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x4400);
817         }
818         else {
819                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0400);
820         }
821
822         tg3_writephy(tp, MII_TG3_CTRL, phy9_orig);
823
824         if (!tg3_readphy(tp, MII_TG3_EXT_CTRL, &reg32)) {
825                 reg32 &= ~0x3000;
826                 tg3_writephy(tp, MII_TG3_EXT_CTRL, reg32);
827         } else if (!err)
828                 err = -EBUSY;
829
830         return err;
831 }
832
833 /* This will reset the tigon3 PHY if there is no valid
834  * link unless the FORCE argument is non-zero.
835  */
836 static int tg3_phy_reset(struct tg3 *tp)
837 {
838         u32 phy_status;
839         int err;
840
841         err  = tg3_readphy(tp, MII_BMSR, &phy_status);
842         err |= tg3_readphy(tp, MII_BMSR, &phy_status);
843         if (err != 0)
844                 return -EBUSY;
845
846         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
847             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
848             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
849                 err = tg3_phy_reset_5703_4_5(tp);
850                 if (err)
851                         return err;
852                 goto out;
853         }
854
855         err = tg3_bmcr_reset(tp);
856         if (err)
857                 return err;
858
859 out:
860         if (tp->tg3_flags2 & TG3_FLG2_PHY_ADC_BUG) {
861                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0c00);
862                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x201f);
863                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x2aaa);
864                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000a);
865                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0323);
866                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0400);
867         }
868         if (tp->tg3_flags2 & TG3_FLG2_PHY_5704_A0_BUG) {
869                 tg3_writephy(tp, 0x1c, 0x8d68);
870                 tg3_writephy(tp, 0x1c, 0x8d68);
871         }
872         if (tp->tg3_flags2 & TG3_FLG2_PHY_BER_BUG) {
873                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0c00);
874                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000a);
875                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x310b);
876                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x201f);
877                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x9506);
878                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x401f);
879                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x14e2);
880                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0400);
881         }
882         /* Set Extended packet length bit (bit 14) on all chips that */
883         /* support jumbo frames */
884         if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5401) {
885                 /* Cannot do read-modify-write on 5401 */
886                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x4c20);
887         } else if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
888                 u32 phy_reg;
889
890                 /* Set bit 14 with read-modify-write to preserve other bits */
891                 if (!tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0007) &&
892                     !tg3_readphy(tp, MII_TG3_AUX_CTRL, &phy_reg))
893                         tg3_writephy(tp, MII_TG3_AUX_CTRL, phy_reg | 0x4000);
894         }
895
896         /* Set phy register 0x10 bit 0 to high fifo elasticity to support
897          * jumbo frames transmission.
898          */
899         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
900                 u32 phy_reg;
901
902                 if (!tg3_readphy(tp, MII_TG3_EXT_CTRL, &phy_reg))
903                     tg3_writephy(tp, MII_TG3_EXT_CTRL,
904                                  phy_reg | MII_TG3_EXT_CTRL_FIFO_ELASTIC);
905         }
906
907         tg3_phy_set_wirespeed(tp);
908         return 0;
909 }
910
911 static void tg3_frob_aux_power(struct tg3 *tp)
912 {
913         struct tg3 *tp_peer = tp;
914
915         if ((tp->tg3_flags & TG3_FLAG_EEPROM_WRITE_PROT) != 0)
916                 return;
917
918         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
919                 tp_peer = pci_get_drvdata(tp->pdev_peer);
920                 if (!tp_peer)
921                         BUG();
922         }
923
924
925         if ((tp->tg3_flags & TG3_FLAG_WOL_ENABLE) != 0 ||
926             (tp_peer->tg3_flags & TG3_FLAG_WOL_ENABLE) != 0) {
927                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
928                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
929                         tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
930                              (GRC_LCLCTRL_GPIO_OE0 |
931                               GRC_LCLCTRL_GPIO_OE1 |
932                               GRC_LCLCTRL_GPIO_OE2 |
933                               GRC_LCLCTRL_GPIO_OUTPUT0 |
934                               GRC_LCLCTRL_GPIO_OUTPUT1));
935                         udelay(100);
936                 } else {
937                         u32 no_gpio2;
938                         u32 grc_local_ctrl;
939
940                         if (tp_peer != tp &&
941                             (tp_peer->tg3_flags & TG3_FLAG_INIT_COMPLETE) != 0)
942                                 return;
943
944                         /* On 5753 and variants, GPIO2 cannot be used. */
945                         no_gpio2 = tp->nic_sram_data_cfg &
946                                     NIC_SRAM_DATA_CFG_NO_GPIO2;
947
948                         grc_local_ctrl = GRC_LCLCTRL_GPIO_OE0 |
949                                          GRC_LCLCTRL_GPIO_OE1 |
950                                          GRC_LCLCTRL_GPIO_OE2 |
951                                          GRC_LCLCTRL_GPIO_OUTPUT1 |
952                                          GRC_LCLCTRL_GPIO_OUTPUT2;
953                         if (no_gpio2) {
954                                 grc_local_ctrl &= ~(GRC_LCLCTRL_GPIO_OE2 |
955                                                     GRC_LCLCTRL_GPIO_OUTPUT2);
956                         }
957                         tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
958                                                 grc_local_ctrl);
959                         udelay(100);
960
961                         grc_local_ctrl |= GRC_LCLCTRL_GPIO_OUTPUT0;
962
963                         tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
964                                                 grc_local_ctrl);
965                         udelay(100);
966
967                         if (!no_gpio2) {
968                                 grc_local_ctrl &= ~GRC_LCLCTRL_GPIO_OUTPUT2;
969                                 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
970                                        grc_local_ctrl);
971                                 udelay(100);
972                         }
973                 }
974         } else {
975                 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
976                     GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701) {
977                         if (tp_peer != tp &&
978                             (tp_peer->tg3_flags & TG3_FLAG_INIT_COMPLETE) != 0)
979                                 return;
980
981                         tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
982                              (GRC_LCLCTRL_GPIO_OE1 |
983                               GRC_LCLCTRL_GPIO_OUTPUT1));
984                         udelay(100);
985
986                         tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
987                              (GRC_LCLCTRL_GPIO_OE1));
988                         udelay(100);
989
990                         tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
991                              (GRC_LCLCTRL_GPIO_OE1 |
992                               GRC_LCLCTRL_GPIO_OUTPUT1));
993                         udelay(100);
994                 }
995         }
996 }
997
998 static int tg3_setup_phy(struct tg3 *, int);
999
1000 #define RESET_KIND_SHUTDOWN     0
1001 #define RESET_KIND_INIT         1
1002 #define RESET_KIND_SUSPEND      2
1003
1004 static void tg3_write_sig_post_reset(struct tg3 *, int);
1005 static int tg3_halt_cpu(struct tg3 *, u32);
1006
1007 static int tg3_set_power_state(struct tg3 *tp, int state)
1008 {
1009         u32 misc_host_ctrl;
1010         u16 power_control, power_caps;
1011         int pm = tp->pm_cap;
1012
1013         /* Make sure register accesses (indirect or otherwise)
1014          * will function correctly.
1015          */
1016         pci_write_config_dword(tp->pdev,
1017                                TG3PCI_MISC_HOST_CTRL,
1018                                tp->misc_host_ctrl);
1019
1020         pci_read_config_word(tp->pdev,
1021                              pm + PCI_PM_CTRL,
1022                              &power_control);
1023         power_control |= PCI_PM_CTRL_PME_STATUS;
1024         power_control &= ~(PCI_PM_CTRL_STATE_MASK);
1025         switch (state) {
1026         case 0:
1027                 power_control |= 0;
1028                 pci_write_config_word(tp->pdev,
1029                                       pm + PCI_PM_CTRL,
1030                                       power_control);
1031                 udelay(100);    /* Delay after power state change */
1032
1033                 /* Switch out of Vaux if it is not a LOM */
1034                 if (!(tp->tg3_flags & TG3_FLAG_EEPROM_WRITE_PROT)) {
1035                         tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
1036                         udelay(100);
1037                 }
1038
1039                 return 0;
1040
1041         case 1:
1042                 power_control |= 1;
1043                 break;
1044
1045         case 2:
1046                 power_control |= 2;
1047                 break;
1048
1049         case 3:
1050                 power_control |= 3;
1051                 break;
1052
1053         default:
1054                 printk(KERN_WARNING PFX "%s: Invalid power state (%d) "
1055                        "requested.\n",
1056                        tp->dev->name, state);
1057                 return -EINVAL;
1058         };
1059
1060         power_control |= PCI_PM_CTRL_PME_ENABLE;
1061
1062         misc_host_ctrl = tr32(TG3PCI_MISC_HOST_CTRL);
1063         tw32(TG3PCI_MISC_HOST_CTRL,
1064              misc_host_ctrl | MISC_HOST_CTRL_MASK_PCI_INT);
1065
1066         if (tp->link_config.phy_is_low_power == 0) {
1067                 tp->link_config.phy_is_low_power = 1;
1068                 tp->link_config.orig_speed = tp->link_config.speed;
1069                 tp->link_config.orig_duplex = tp->link_config.duplex;
1070                 tp->link_config.orig_autoneg = tp->link_config.autoneg;
1071         }
1072
1073         if (!(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)) {
1074                 tp->link_config.speed = SPEED_10;
1075                 tp->link_config.duplex = DUPLEX_HALF;
1076                 tp->link_config.autoneg = AUTONEG_ENABLE;
1077                 tg3_setup_phy(tp, 0);
1078         }
1079
1080         pci_read_config_word(tp->pdev, pm + PCI_PM_PMC, &power_caps);
1081
1082         if (tp->tg3_flags & TG3_FLAG_WOL_ENABLE) {
1083                 u32 mac_mode;
1084
1085                 if (!(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)) {
1086                         tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x5a);
1087                         udelay(40);
1088
1089                         mac_mode = MAC_MODE_PORT_MODE_MII;
1090
1091                         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 ||
1092                             !(tp->tg3_flags & TG3_FLAG_WOL_SPEED_100MB))
1093                                 mac_mode |= MAC_MODE_LINK_POLARITY;
1094                 } else {
1095                         mac_mode = MAC_MODE_PORT_MODE_TBI;
1096                 }
1097
1098                 if (!(tp->tg3_flags2 & TG3_FLG2_5750_PLUS))
1099                         tw32(MAC_LED_CTRL, tp->led_ctrl);
1100
1101                 if (((power_caps & PCI_PM_CAP_PME_D3cold) &&
1102                      (tp->tg3_flags & TG3_FLAG_WOL_ENABLE)))
1103                         mac_mode |= MAC_MODE_MAGIC_PKT_ENABLE;
1104
1105                 tw32_f(MAC_MODE, mac_mode);
1106                 udelay(100);
1107
1108                 tw32_f(MAC_RX_MODE, RX_MODE_ENABLE);
1109                 udelay(10);
1110         }
1111
1112         if (!(tp->tg3_flags & TG3_FLAG_WOL_SPEED_100MB) &&
1113             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
1114              GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)) {
1115                 u32 base_val;
1116
1117                 base_val = tp->pci_clock_ctrl;
1118                 base_val |= (CLOCK_CTRL_RXCLK_DISABLE |
1119                              CLOCK_CTRL_TXCLK_DISABLE);
1120
1121                 tw32_f(TG3PCI_CLOCK_CTRL, base_val |
1122                      CLOCK_CTRL_ALTCLK |
1123                      CLOCK_CTRL_PWRDOWN_PLL133);
1124                 udelay(40);
1125         } else if (!((tp->tg3_flags2 & TG3_FLG2_5750_PLUS) &&
1126                      (tp->tg3_flags & TG3_FLAG_ENABLE_ASF))) {
1127                 u32 newbits1, newbits2;
1128
1129                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
1130                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
1131                         newbits1 = (CLOCK_CTRL_RXCLK_DISABLE |
1132                                     CLOCK_CTRL_TXCLK_DISABLE |
1133                                     CLOCK_CTRL_ALTCLK);
1134                         newbits2 = newbits1 | CLOCK_CTRL_44MHZ_CORE;
1135                 } else if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
1136                         newbits1 = CLOCK_CTRL_625_CORE;
1137                         newbits2 = newbits1 | CLOCK_CTRL_ALTCLK;
1138                 } else {
1139                         newbits1 = CLOCK_CTRL_ALTCLK;
1140                         newbits2 = newbits1 | CLOCK_CTRL_44MHZ_CORE;
1141                 }
1142
1143                 tw32_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl | newbits1);
1144                 udelay(40);
1145
1146                 tw32_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl | newbits2);
1147                 udelay(40);
1148
1149                 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
1150                         u32 newbits3;
1151
1152                         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
1153                             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
1154                                 newbits3 = (CLOCK_CTRL_RXCLK_DISABLE |
1155                                             CLOCK_CTRL_TXCLK_DISABLE |
1156                                             CLOCK_CTRL_44MHZ_CORE);
1157                         } else {
1158                                 newbits3 = CLOCK_CTRL_44MHZ_CORE;
1159                         }
1160
1161                         tw32_f(TG3PCI_CLOCK_CTRL,
1162                                          tp->pci_clock_ctrl | newbits3);
1163                         udelay(40);
1164                 }
1165         }
1166
1167         tg3_frob_aux_power(tp);
1168
1169         /* Workaround for unstable PLL clock */
1170         if ((GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_AX) ||
1171             (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_BX)) {
1172                 u32 val = tr32(0x7d00);
1173
1174                 val &= ~((1 << 16) | (1 << 4) | (1 << 2) | (1 << 1) | 1);
1175                 tw32(0x7d00, val);
1176                 if (!(tp->tg3_flags & TG3_FLAG_ENABLE_ASF))
1177                         tg3_halt_cpu(tp, RX_CPU_BASE);
1178         }
1179
1180         /* Finally, set the new power state. */
1181         pci_write_config_word(tp->pdev, pm + PCI_PM_CTRL, power_control);
1182         udelay(100);    /* Delay after power state change */
1183
1184         tg3_write_sig_post_reset(tp, RESET_KIND_SHUTDOWN);
1185
1186         return 0;
1187 }
1188
1189 static void tg3_link_report(struct tg3 *tp)
1190 {
1191         if (!netif_carrier_ok(tp->dev)) {
1192                 printk(KERN_INFO PFX "%s: Link is down.\n", tp->dev->name);
1193         } else {
1194                 printk(KERN_INFO PFX "%s: Link is up at %d Mbps, %s duplex.\n",
1195                        tp->dev->name,
1196                        (tp->link_config.active_speed == SPEED_1000 ?
1197                         1000 :
1198                         (tp->link_config.active_speed == SPEED_100 ?
1199                          100 : 10)),
1200                        (tp->link_config.active_duplex == DUPLEX_FULL ?
1201                         "full" : "half"));
1202
1203                 printk(KERN_INFO PFX "%s: Flow control is %s for TX and "
1204                        "%s for RX.\n",
1205                        tp->dev->name,
1206                        (tp->tg3_flags & TG3_FLAG_TX_PAUSE) ? "on" : "off",
1207                        (tp->tg3_flags & TG3_FLAG_RX_PAUSE) ? "on" : "off");
1208         }
1209 }
1210
1211 static void tg3_setup_flow_control(struct tg3 *tp, u32 local_adv, u32 remote_adv)
1212 {
1213         u32 new_tg3_flags = 0;
1214         u32 old_rx_mode = tp->rx_mode;
1215         u32 old_tx_mode = tp->tx_mode;
1216
1217         if (tp->tg3_flags & TG3_FLAG_PAUSE_AUTONEG) {
1218                 if (local_adv & ADVERTISE_PAUSE_CAP) {
1219                         if (local_adv & ADVERTISE_PAUSE_ASYM) {
1220                                 if (remote_adv & LPA_PAUSE_CAP)
1221                                         new_tg3_flags |=
1222                                                 (TG3_FLAG_RX_PAUSE |
1223                                                 TG3_FLAG_TX_PAUSE);
1224                                 else if (remote_adv & LPA_PAUSE_ASYM)
1225                                         new_tg3_flags |=
1226                                                 (TG3_FLAG_RX_PAUSE);
1227                         } else {
1228                                 if (remote_adv & LPA_PAUSE_CAP)
1229                                         new_tg3_flags |=
1230                                                 (TG3_FLAG_RX_PAUSE |
1231                                                 TG3_FLAG_TX_PAUSE);
1232                         }
1233                 } else if (local_adv & ADVERTISE_PAUSE_ASYM) {
1234                         if ((remote_adv & LPA_PAUSE_CAP) &&
1235                         (remote_adv & LPA_PAUSE_ASYM))
1236                                 new_tg3_flags |= TG3_FLAG_TX_PAUSE;
1237                 }
1238
1239                 tp->tg3_flags &= ~(TG3_FLAG_RX_PAUSE | TG3_FLAG_TX_PAUSE);
1240                 tp->tg3_flags |= new_tg3_flags;
1241         } else {
1242                 new_tg3_flags = tp->tg3_flags;
1243         }
1244
1245         if (new_tg3_flags & TG3_FLAG_RX_PAUSE)
1246                 tp->rx_mode |= RX_MODE_FLOW_CTRL_ENABLE;
1247         else
1248                 tp->rx_mode &= ~RX_MODE_FLOW_CTRL_ENABLE;
1249
1250         if (old_rx_mode != tp->rx_mode) {
1251                 tw32_f(MAC_RX_MODE, tp->rx_mode);
1252         }
1253         
1254         if (new_tg3_flags & TG3_FLAG_TX_PAUSE)
1255                 tp->tx_mode |= TX_MODE_FLOW_CTRL_ENABLE;
1256         else
1257                 tp->tx_mode &= ~TX_MODE_FLOW_CTRL_ENABLE;
1258
1259         if (old_tx_mode != tp->tx_mode) {
1260                 tw32_f(MAC_TX_MODE, tp->tx_mode);
1261         }
1262 }
1263
1264 static void tg3_aux_stat_to_speed_duplex(struct tg3 *tp, u32 val, u16 *speed, u8 *duplex)
1265 {
1266         switch (val & MII_TG3_AUX_STAT_SPDMASK) {
1267         case MII_TG3_AUX_STAT_10HALF:
1268                 *speed = SPEED_10;
1269                 *duplex = DUPLEX_HALF;
1270                 break;
1271
1272         case MII_TG3_AUX_STAT_10FULL:
1273                 *speed = SPEED_10;
1274                 *duplex = DUPLEX_FULL;
1275                 break;
1276
1277         case MII_TG3_AUX_STAT_100HALF:
1278                 *speed = SPEED_100;
1279                 *duplex = DUPLEX_HALF;
1280                 break;
1281
1282         case MII_TG3_AUX_STAT_100FULL:
1283                 *speed = SPEED_100;
1284                 *duplex = DUPLEX_FULL;
1285                 break;
1286
1287         case MII_TG3_AUX_STAT_1000HALF:
1288                 *speed = SPEED_1000;
1289                 *duplex = DUPLEX_HALF;
1290                 break;
1291
1292         case MII_TG3_AUX_STAT_1000FULL:
1293                 *speed = SPEED_1000;
1294                 *duplex = DUPLEX_FULL;
1295                 break;
1296
1297         default:
1298                 *speed = SPEED_INVALID;
1299                 *duplex = DUPLEX_INVALID;
1300                 break;
1301         };
1302 }
1303
1304 static void tg3_phy_copper_begin(struct tg3 *tp)
1305 {
1306         u32 new_adv;
1307         int i;
1308
1309         if (tp->link_config.phy_is_low_power) {
1310                 /* Entering low power mode.  Disable gigabit and
1311                  * 100baseT advertisements.
1312                  */
1313                 tg3_writephy(tp, MII_TG3_CTRL, 0);
1314
1315                 new_adv = (ADVERTISE_10HALF | ADVERTISE_10FULL |
1316                            ADVERTISE_CSMA | ADVERTISE_PAUSE_CAP);
1317                 if (tp->tg3_flags & TG3_FLAG_WOL_SPEED_100MB)
1318                         new_adv |= (ADVERTISE_100HALF | ADVERTISE_100FULL);
1319
1320                 tg3_writephy(tp, MII_ADVERTISE, new_adv);
1321         } else if (tp->link_config.speed == SPEED_INVALID) {
1322                 tp->link_config.advertising =
1323                         (ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full |
1324                          ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full |
1325                          ADVERTISED_1000baseT_Half | ADVERTISED_1000baseT_Full |
1326                          ADVERTISED_Autoneg | ADVERTISED_MII);
1327
1328                 if (tp->tg3_flags & TG3_FLAG_10_100_ONLY)
1329                         tp->link_config.advertising &=
1330                                 ~(ADVERTISED_1000baseT_Half |
1331                                   ADVERTISED_1000baseT_Full);
1332
1333                 new_adv = (ADVERTISE_CSMA | ADVERTISE_PAUSE_CAP);
1334                 if (tp->link_config.advertising & ADVERTISED_10baseT_Half)
1335                         new_adv |= ADVERTISE_10HALF;
1336                 if (tp->link_config.advertising & ADVERTISED_10baseT_Full)
1337                         new_adv |= ADVERTISE_10FULL;
1338                 if (tp->link_config.advertising & ADVERTISED_100baseT_Half)
1339                         new_adv |= ADVERTISE_100HALF;
1340                 if (tp->link_config.advertising & ADVERTISED_100baseT_Full)
1341                         new_adv |= ADVERTISE_100FULL;
1342                 tg3_writephy(tp, MII_ADVERTISE, new_adv);
1343
1344                 if (tp->link_config.advertising &
1345                     (ADVERTISED_1000baseT_Half | ADVERTISED_1000baseT_Full)) {
1346                         new_adv = 0;
1347                         if (tp->link_config.advertising & ADVERTISED_1000baseT_Half)
1348                                 new_adv |= MII_TG3_CTRL_ADV_1000_HALF;
1349                         if (tp->link_config.advertising & ADVERTISED_1000baseT_Full)
1350                                 new_adv |= MII_TG3_CTRL_ADV_1000_FULL;
1351                         if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY) &&
1352                             (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
1353                              tp->pci_chip_rev_id == CHIPREV_ID_5701_B0))
1354                                 new_adv |= (MII_TG3_CTRL_AS_MASTER |
1355                                             MII_TG3_CTRL_ENABLE_AS_MASTER);
1356                         tg3_writephy(tp, MII_TG3_CTRL, new_adv);
1357                 } else {
1358                         tg3_writephy(tp, MII_TG3_CTRL, 0);
1359                 }
1360         } else {
1361                 /* Asking for a specific link mode. */
1362                 if (tp->link_config.speed == SPEED_1000) {
1363                         new_adv = ADVERTISE_CSMA | ADVERTISE_PAUSE_CAP;
1364                         tg3_writephy(tp, MII_ADVERTISE, new_adv);
1365
1366                         if (tp->link_config.duplex == DUPLEX_FULL)
1367                                 new_adv = MII_TG3_CTRL_ADV_1000_FULL;
1368                         else
1369                                 new_adv = MII_TG3_CTRL_ADV_1000_HALF;
1370                         if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
1371                             tp->pci_chip_rev_id == CHIPREV_ID_5701_B0)
1372                                 new_adv |= (MII_TG3_CTRL_AS_MASTER |
1373                                             MII_TG3_CTRL_ENABLE_AS_MASTER);
1374                         tg3_writephy(tp, MII_TG3_CTRL, new_adv);
1375                 } else {
1376                         tg3_writephy(tp, MII_TG3_CTRL, 0);
1377
1378                         new_adv = ADVERTISE_CSMA | ADVERTISE_PAUSE_CAP;
1379                         if (tp->link_config.speed == SPEED_100) {
1380                                 if (tp->link_config.duplex == DUPLEX_FULL)
1381                                         new_adv |= ADVERTISE_100FULL;
1382                                 else
1383                                         new_adv |= ADVERTISE_100HALF;
1384                         } else {
1385                                 if (tp->link_config.duplex == DUPLEX_FULL)
1386                                         new_adv |= ADVERTISE_10FULL;
1387                                 else
1388                                         new_adv |= ADVERTISE_10HALF;
1389                         }
1390                         tg3_writephy(tp, MII_ADVERTISE, new_adv);
1391                 }
1392         }
1393
1394         if (tp->link_config.autoneg == AUTONEG_DISABLE &&
1395             tp->link_config.speed != SPEED_INVALID) {
1396                 u32 bmcr, orig_bmcr;
1397
1398                 tp->link_config.active_speed = tp->link_config.speed;
1399                 tp->link_config.active_duplex = tp->link_config.duplex;
1400
1401                 bmcr = 0;
1402                 switch (tp->link_config.speed) {
1403                 default:
1404                 case SPEED_10:
1405                         break;
1406
1407                 case SPEED_100:
1408                         bmcr |= BMCR_SPEED100;
1409                         break;
1410
1411                 case SPEED_1000:
1412                         bmcr |= TG3_BMCR_SPEED1000;
1413                         break;
1414                 };
1415
1416                 if (tp->link_config.duplex == DUPLEX_FULL)
1417                         bmcr |= BMCR_FULLDPLX;
1418
1419                 if (!tg3_readphy(tp, MII_BMCR, &orig_bmcr) &&
1420                     (bmcr != orig_bmcr)) {
1421                         tg3_writephy(tp, MII_BMCR, BMCR_LOOPBACK);
1422                         for (i = 0; i < 1500; i++) {
1423                                 u32 tmp;
1424
1425                                 udelay(10);
1426                                 if (tg3_readphy(tp, MII_BMSR, &tmp) ||
1427                                     tg3_readphy(tp, MII_BMSR, &tmp))
1428                                         continue;
1429                                 if (!(tmp & BMSR_LSTATUS)) {
1430                                         udelay(40);
1431                                         break;
1432                                 }
1433                         }
1434                         tg3_writephy(tp, MII_BMCR, bmcr);
1435                         udelay(40);
1436                 }
1437         } else {
1438                 tg3_writephy(tp, MII_BMCR,
1439                              BMCR_ANENABLE | BMCR_ANRESTART);
1440         }
1441 }
1442
1443 static int tg3_init_5401phy_dsp(struct tg3 *tp)
1444 {
1445         int err;
1446
1447         /* Turn off tap power management. */
1448         /* Set Extended packet length bit */
1449         err  = tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x4c20);
1450
1451         err |= tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x0012);
1452         err |= tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x1804);
1453
1454         err |= tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x0013);
1455         err |= tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x1204);
1456
1457         err |= tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8006);
1458         err |= tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0132);
1459
1460         err |= tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8006);
1461         err |= tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0232);
1462
1463         err |= tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x201f);
1464         err |= tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0a20);
1465
1466         udelay(40);
1467
1468         return err;
1469 }
1470
1471 static int tg3_copper_is_advertising_all(struct tg3 *tp)
1472 {
1473         u32 adv_reg, all_mask;
1474
1475         if (tg3_readphy(tp, MII_ADVERTISE, &adv_reg))
1476                 return 0;
1477
1478         all_mask = (ADVERTISE_10HALF | ADVERTISE_10FULL |
1479                     ADVERTISE_100HALF | ADVERTISE_100FULL);
1480         if ((adv_reg & all_mask) != all_mask)
1481                 return 0;
1482         if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY)) {
1483                 u32 tg3_ctrl;
1484
1485                 if (tg3_readphy(tp, MII_TG3_CTRL, &tg3_ctrl))
1486                         return 0;
1487
1488                 all_mask = (MII_TG3_CTRL_ADV_1000_HALF |
1489                             MII_TG3_CTRL_ADV_1000_FULL);
1490                 if ((tg3_ctrl & all_mask) != all_mask)
1491                         return 0;
1492         }
1493         return 1;
1494 }
1495
1496 static int tg3_setup_copper_phy(struct tg3 *tp, int force_reset)
1497 {
1498         int current_link_up;
1499         u32 bmsr, dummy;
1500         u16 current_speed;
1501         u8 current_duplex;
1502         int i, err;
1503
1504         tw32(MAC_EVENT, 0);
1505
1506         tw32_f(MAC_STATUS,
1507              (MAC_STATUS_SYNC_CHANGED |
1508               MAC_STATUS_CFG_CHANGED |
1509               MAC_STATUS_MI_COMPLETION |
1510               MAC_STATUS_LNKSTATE_CHANGED));
1511         udelay(40);
1512
1513         tp->mi_mode = MAC_MI_MODE_BASE;
1514         tw32_f(MAC_MI_MODE, tp->mi_mode);
1515         udelay(80);
1516
1517         tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x02);
1518
1519         /* Some third-party PHYs need to be reset on link going
1520          * down.
1521          */
1522         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
1523              GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
1524              GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) &&
1525             netif_carrier_ok(tp->dev)) {
1526                 tg3_readphy(tp, MII_BMSR, &bmsr);
1527                 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
1528                     !(bmsr & BMSR_LSTATUS))
1529                         force_reset = 1;
1530         }
1531         if (force_reset)
1532                 tg3_phy_reset(tp);
1533
1534         if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5401) {
1535                 tg3_readphy(tp, MII_BMSR, &bmsr);
1536                 if (tg3_readphy(tp, MII_BMSR, &bmsr) ||
1537                     !(tp->tg3_flags & TG3_FLAG_INIT_COMPLETE))
1538                         bmsr = 0;
1539
1540                 if (!(bmsr & BMSR_LSTATUS)) {
1541                         err = tg3_init_5401phy_dsp(tp);
1542                         if (err)
1543                                 return err;
1544
1545                         tg3_readphy(tp, MII_BMSR, &bmsr);
1546                         for (i = 0; i < 1000; i++) {
1547                                 udelay(10);
1548                                 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
1549                                     (bmsr & BMSR_LSTATUS)) {
1550                                         udelay(40);
1551                                         break;
1552                                 }
1553                         }
1554
1555                         if ((tp->phy_id & PHY_ID_REV_MASK) == PHY_REV_BCM5401_B0 &&
1556                             !(bmsr & BMSR_LSTATUS) &&
1557                             tp->link_config.active_speed == SPEED_1000) {
1558                                 err = tg3_phy_reset(tp);
1559                                 if (!err)
1560                                         err = tg3_init_5401phy_dsp(tp);
1561                                 if (err)
1562                                         return err;
1563                         }
1564                 }
1565         } else if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
1566                    tp->pci_chip_rev_id == CHIPREV_ID_5701_B0) {
1567                 /* 5701 {A0,B0} CRC bug workaround */
1568                 tg3_writephy(tp, 0x15, 0x0a75);
1569                 tg3_writephy(tp, 0x1c, 0x8c68);
1570                 tg3_writephy(tp, 0x1c, 0x8d68);
1571                 tg3_writephy(tp, 0x1c, 0x8c68);
1572         }
1573
1574         /* Clear pending interrupts... */
1575         tg3_readphy(tp, MII_TG3_ISTAT, &dummy);
1576         tg3_readphy(tp, MII_TG3_ISTAT, &dummy);
1577
1578         if (tp->tg3_flags & TG3_FLAG_USE_MI_INTERRUPT)
1579                 tg3_writephy(tp, MII_TG3_IMASK, ~MII_TG3_INT_LINKCHG);
1580         else
1581                 tg3_writephy(tp, MII_TG3_IMASK, ~0);
1582
1583         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
1584             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
1585                 if (tp->led_ctrl == LED_CTRL_MODE_PHY_1)
1586                         tg3_writephy(tp, MII_TG3_EXT_CTRL,
1587                                      MII_TG3_EXT_CTRL_LNK3_LED_MODE);
1588                 else
1589                         tg3_writephy(tp, MII_TG3_EXT_CTRL, 0);
1590         }
1591
1592         current_link_up = 0;
1593         current_speed = SPEED_INVALID;
1594         current_duplex = DUPLEX_INVALID;
1595
1596         if (tp->tg3_flags2 & TG3_FLG2_CAPACITIVE_COUPLING) {
1597                 u32 val;
1598
1599                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x4007);
1600                 tg3_readphy(tp, MII_TG3_AUX_CTRL, &val);
1601                 if (!(val & (1 << 10))) {
1602                         val |= (1 << 10);
1603                         tg3_writephy(tp, MII_TG3_AUX_CTRL, val);
1604                         goto relink;
1605                 }
1606         }
1607
1608         bmsr = 0;
1609         for (i = 0; i < 100; i++) {
1610                 tg3_readphy(tp, MII_BMSR, &bmsr);
1611                 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
1612                     (bmsr & BMSR_LSTATUS))
1613                         break;
1614                 udelay(40);
1615         }
1616
1617         if (bmsr & BMSR_LSTATUS) {
1618                 u32 aux_stat, bmcr;
1619
1620                 tg3_readphy(tp, MII_TG3_AUX_STAT, &aux_stat);
1621                 for (i = 0; i < 2000; i++) {
1622                         udelay(10);
1623                         if (!tg3_readphy(tp, MII_TG3_AUX_STAT, &aux_stat) &&
1624                             aux_stat)
1625                                 break;
1626                 }
1627
1628                 tg3_aux_stat_to_speed_duplex(tp, aux_stat,
1629                                              &current_speed,
1630                                              &current_duplex);
1631
1632                 bmcr = 0;
1633                 for (i = 0; i < 200; i++) {
1634                         tg3_readphy(tp, MII_BMCR, &bmcr);
1635                         if (tg3_readphy(tp, MII_BMCR, &bmcr))
1636                                 continue;
1637                         if (bmcr && bmcr != 0x7fff)
1638                                 break;
1639                         udelay(10);
1640                 }
1641
1642                 if (tp->link_config.autoneg == AUTONEG_ENABLE) {
1643                         if (bmcr & BMCR_ANENABLE) {
1644                                 current_link_up = 1;
1645
1646                                 /* Force autoneg restart if we are exiting
1647                                  * low power mode.
1648                                  */
1649                                 if (!tg3_copper_is_advertising_all(tp))
1650                                         current_link_up = 0;
1651                         } else {
1652                                 current_link_up = 0;
1653                         }
1654                 } else {
1655                         if (!(bmcr & BMCR_ANENABLE) &&
1656                             tp->link_config.speed == current_speed &&
1657                             tp->link_config.duplex == current_duplex) {
1658                                 current_link_up = 1;
1659                         } else {
1660                                 current_link_up = 0;
1661                         }
1662                 }
1663
1664                 tp->link_config.active_speed = current_speed;
1665                 tp->link_config.active_duplex = current_duplex;
1666         }
1667
1668         if (current_link_up == 1 &&
1669             (tp->link_config.active_duplex == DUPLEX_FULL) &&
1670             (tp->link_config.autoneg == AUTONEG_ENABLE)) {
1671                 u32 local_adv, remote_adv;
1672
1673                 if (tg3_readphy(tp, MII_ADVERTISE, &local_adv))
1674                         local_adv = 0;
1675                 local_adv &= (ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM);
1676
1677                 if (tg3_readphy(tp, MII_LPA, &remote_adv))
1678                         remote_adv = 0;
1679
1680                 remote_adv &= (LPA_PAUSE_CAP | LPA_PAUSE_ASYM);
1681
1682                 /* If we are not advertising full pause capability,
1683                  * something is wrong.  Bring the link down and reconfigure.
1684                  */
1685                 if (local_adv != ADVERTISE_PAUSE_CAP) {
1686                         current_link_up = 0;
1687                 } else {
1688                         tg3_setup_flow_control(tp, local_adv, remote_adv);
1689                 }
1690         }
1691 relink:
1692         if (current_link_up == 0) {
1693                 u32 tmp;
1694
1695                 tg3_phy_copper_begin(tp);
1696
1697                 tg3_readphy(tp, MII_BMSR, &tmp);
1698                 if (!tg3_readphy(tp, MII_BMSR, &tmp) &&
1699                     (tmp & BMSR_LSTATUS))
1700                         current_link_up = 1;
1701         }
1702
1703         tp->mac_mode &= ~MAC_MODE_PORT_MODE_MASK;
1704         if (current_link_up == 1) {
1705                 if (tp->link_config.active_speed == SPEED_100 ||
1706                     tp->link_config.active_speed == SPEED_10)
1707                         tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
1708                 else
1709                         tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
1710         } else
1711                 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
1712
1713         tp->mac_mode &= ~MAC_MODE_HALF_DUPLEX;
1714         if (tp->link_config.active_duplex == DUPLEX_HALF)
1715                 tp->mac_mode |= MAC_MODE_HALF_DUPLEX;
1716
1717         tp->mac_mode &= ~MAC_MODE_LINK_POLARITY;
1718         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700) {
1719                 if ((tp->led_ctrl == LED_CTRL_MODE_PHY_2) ||
1720                     (current_link_up == 1 &&
1721                      tp->link_config.active_speed == SPEED_10))
1722                         tp->mac_mode |= MAC_MODE_LINK_POLARITY;
1723         } else {
1724                 if (current_link_up == 1)
1725                         tp->mac_mode |= MAC_MODE_LINK_POLARITY;
1726         }
1727
1728         /* ??? Without this setting Netgear GA302T PHY does not
1729          * ??? send/receive packets...
1730          */
1731         if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5411 &&
1732             tp->pci_chip_rev_id == CHIPREV_ID_5700_ALTIMA) {
1733                 tp->mi_mode |= MAC_MI_MODE_AUTO_POLL;
1734                 tw32_f(MAC_MI_MODE, tp->mi_mode);
1735                 udelay(80);
1736         }
1737
1738         tw32_f(MAC_MODE, tp->mac_mode);
1739         udelay(40);
1740
1741         if (tp->tg3_flags & TG3_FLAG_USE_LINKCHG_REG) {
1742                 /* Polled via timer. */
1743                 tw32_f(MAC_EVENT, 0);
1744         } else {
1745                 tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
1746         }
1747         udelay(40);
1748
1749         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 &&
1750             current_link_up == 1 &&
1751             tp->link_config.active_speed == SPEED_1000 &&
1752             ((tp->tg3_flags & TG3_FLAG_PCIX_MODE) ||
1753              (tp->tg3_flags & TG3_FLAG_PCI_HIGH_SPEED))) {
1754                 udelay(120);
1755                 tw32_f(MAC_STATUS,
1756                      (MAC_STATUS_SYNC_CHANGED |
1757                       MAC_STATUS_CFG_CHANGED));
1758                 udelay(40);
1759                 tg3_write_mem(tp,
1760                               NIC_SRAM_FIRMWARE_MBOX,
1761                               NIC_SRAM_FIRMWARE_MBOX_MAGIC2);
1762         }
1763
1764         if (current_link_up != netif_carrier_ok(tp->dev)) {
1765                 if (current_link_up)
1766                         netif_carrier_on(tp->dev);
1767                 else
1768                         netif_carrier_off(tp->dev);
1769                 tg3_link_report(tp);
1770         }
1771
1772         return 0;
1773 }
1774
1775 struct tg3_fiber_aneginfo {
1776         int state;
1777 #define ANEG_STATE_UNKNOWN              0
1778 #define ANEG_STATE_AN_ENABLE            1
1779 #define ANEG_STATE_RESTART_INIT         2
1780 #define ANEG_STATE_RESTART              3
1781 #define ANEG_STATE_DISABLE_LINK_OK      4
1782 #define ANEG_STATE_ABILITY_DETECT_INIT  5
1783 #define ANEG_STATE_ABILITY_DETECT       6
1784 #define ANEG_STATE_ACK_DETECT_INIT      7
1785 #define ANEG_STATE_ACK_DETECT           8
1786 #define ANEG_STATE_COMPLETE_ACK_INIT    9
1787 #define ANEG_STATE_COMPLETE_ACK         10
1788 #define ANEG_STATE_IDLE_DETECT_INIT     11
1789 #define ANEG_STATE_IDLE_DETECT          12
1790 #define ANEG_STATE_LINK_OK              13
1791 #define ANEG_STATE_NEXT_PAGE_WAIT_INIT  14
1792 #define ANEG_STATE_NEXT_PAGE_WAIT       15
1793
1794         u32 flags;
1795 #define MR_AN_ENABLE            0x00000001
1796 #define MR_RESTART_AN           0x00000002
1797 #define MR_AN_COMPLETE          0x00000004
1798 #define MR_PAGE_RX              0x00000008
1799 #define MR_NP_LOADED            0x00000010
1800 #define MR_TOGGLE_TX            0x00000020
1801 #define MR_LP_ADV_FULL_DUPLEX   0x00000040
1802 #define MR_LP_ADV_HALF_DUPLEX   0x00000080
1803 #define MR_LP_ADV_SYM_PAUSE     0x00000100
1804 #define MR_LP_ADV_ASYM_PAUSE    0x00000200
1805 #define MR_LP_ADV_REMOTE_FAULT1 0x00000400
1806 #define MR_LP_ADV_REMOTE_FAULT2 0x00000800
1807 #define MR_LP_ADV_NEXT_PAGE     0x00001000
1808 #define MR_TOGGLE_RX            0x00002000
1809 #define MR_NP_RX                0x00004000
1810
1811 #define MR_LINK_OK              0x80000000
1812
1813         unsigned long link_time, cur_time;
1814
1815         u32 ability_match_cfg;
1816         int ability_match_count;
1817
1818         char ability_match, idle_match, ack_match;
1819
1820         u32 txconfig, rxconfig;
1821 #define ANEG_CFG_NP             0x00000080
1822 #define ANEG_CFG_ACK            0x00000040
1823 #define ANEG_CFG_RF2            0x00000020
1824 #define ANEG_CFG_RF1            0x00000010
1825 #define ANEG_CFG_PS2            0x00000001
1826 #define ANEG_CFG_PS1            0x00008000
1827 #define ANEG_CFG_HD             0x00004000
1828 #define ANEG_CFG_FD             0x00002000
1829 #define ANEG_CFG_INVAL          0x00001f06
1830
1831 };
1832 #define ANEG_OK         0
1833 #define ANEG_DONE       1
1834 #define ANEG_TIMER_ENAB 2
1835 #define ANEG_FAILED     -1
1836
1837 #define ANEG_STATE_SETTLE_TIME  10000
1838
1839 static int tg3_fiber_aneg_smachine(struct tg3 *tp,
1840                                    struct tg3_fiber_aneginfo *ap)
1841 {
1842         unsigned long delta;
1843         u32 rx_cfg_reg;
1844         int ret;
1845
1846         if (ap->state == ANEG_STATE_UNKNOWN) {
1847                 ap->rxconfig = 0;
1848                 ap->link_time = 0;
1849                 ap->cur_time = 0;
1850                 ap->ability_match_cfg = 0;
1851                 ap->ability_match_count = 0;
1852                 ap->ability_match = 0;
1853                 ap->idle_match = 0;
1854                 ap->ack_match = 0;
1855         }
1856         ap->cur_time++;
1857
1858         if (tr32(MAC_STATUS) & MAC_STATUS_RCVD_CFG) {
1859                 rx_cfg_reg = tr32(MAC_RX_AUTO_NEG);
1860
1861                 if (rx_cfg_reg != ap->ability_match_cfg) {
1862                         ap->ability_match_cfg = rx_cfg_reg;
1863                         ap->ability_match = 0;
1864                         ap->ability_match_count = 0;
1865                 } else {
1866                         if (++ap->ability_match_count > 1) {
1867                                 ap->ability_match = 1;
1868                                 ap->ability_match_cfg = rx_cfg_reg;
1869                         }
1870                 }
1871                 if (rx_cfg_reg & ANEG_CFG_ACK)
1872                         ap->ack_match = 1;
1873                 else
1874                         ap->ack_match = 0;
1875
1876                 ap->idle_match = 0;
1877         } else {
1878                 ap->idle_match = 1;
1879                 ap->ability_match_cfg = 0;
1880                 ap->ability_match_count = 0;
1881                 ap->ability_match = 0;
1882                 ap->ack_match = 0;
1883
1884                 rx_cfg_reg = 0;
1885         }
1886
1887         ap->rxconfig = rx_cfg_reg;
1888         ret = ANEG_OK;
1889
1890         switch(ap->state) {
1891         case ANEG_STATE_UNKNOWN:
1892                 if (ap->flags & (MR_AN_ENABLE | MR_RESTART_AN))
1893                         ap->state = ANEG_STATE_AN_ENABLE;
1894
1895                 /* fallthru */
1896         case ANEG_STATE_AN_ENABLE:
1897                 ap->flags &= ~(MR_AN_COMPLETE | MR_PAGE_RX);
1898                 if (ap->flags & MR_AN_ENABLE) {
1899                         ap->link_time = 0;
1900                         ap->cur_time = 0;
1901                         ap->ability_match_cfg = 0;
1902                         ap->ability_match_count = 0;
1903                         ap->ability_match = 0;
1904                         ap->idle_match = 0;
1905                         ap->ack_match = 0;
1906
1907                         ap->state = ANEG_STATE_RESTART_INIT;
1908                 } else {
1909                         ap->state = ANEG_STATE_DISABLE_LINK_OK;
1910                 }
1911                 break;
1912
1913         case ANEG_STATE_RESTART_INIT:
1914                 ap->link_time = ap->cur_time;
1915                 ap->flags &= ~(MR_NP_LOADED);
1916                 ap->txconfig = 0;
1917                 tw32(MAC_TX_AUTO_NEG, 0);
1918                 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
1919                 tw32_f(MAC_MODE, tp->mac_mode);
1920                 udelay(40);
1921
1922                 ret = ANEG_TIMER_ENAB;
1923                 ap->state = ANEG_STATE_RESTART;
1924
1925                 /* fallthru */
1926         case ANEG_STATE_RESTART:
1927                 delta = ap->cur_time - ap->link_time;
1928                 if (delta > ANEG_STATE_SETTLE_TIME) {
1929                         ap->state = ANEG_STATE_ABILITY_DETECT_INIT;
1930                 } else {
1931                         ret = ANEG_TIMER_ENAB;
1932                 }
1933                 break;
1934
1935         case ANEG_STATE_DISABLE_LINK_OK:
1936                 ret = ANEG_DONE;
1937                 break;
1938
1939         case ANEG_STATE_ABILITY_DETECT_INIT:
1940                 ap->flags &= ~(MR_TOGGLE_TX);
1941                 ap->txconfig = (ANEG_CFG_FD | ANEG_CFG_PS1);
1942                 tw32(MAC_TX_AUTO_NEG, ap->txconfig);
1943                 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
1944                 tw32_f(MAC_MODE, tp->mac_mode);
1945                 udelay(40);
1946
1947                 ap->state = ANEG_STATE_ABILITY_DETECT;
1948                 break;
1949
1950         case ANEG_STATE_ABILITY_DETECT:
1951                 if (ap->ability_match != 0 && ap->rxconfig != 0) {
1952                         ap->state = ANEG_STATE_ACK_DETECT_INIT;
1953                 }
1954                 break;
1955
1956         case ANEG_STATE_ACK_DETECT_INIT:
1957                 ap->txconfig |= ANEG_CFG_ACK;
1958                 tw32(MAC_TX_AUTO_NEG, ap->txconfig);
1959                 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
1960                 tw32_f(MAC_MODE, tp->mac_mode);
1961                 udelay(40);
1962
1963                 ap->state = ANEG_STATE_ACK_DETECT;
1964
1965                 /* fallthru */
1966         case ANEG_STATE_ACK_DETECT:
1967                 if (ap->ack_match != 0) {
1968                         if ((ap->rxconfig & ~ANEG_CFG_ACK) ==
1969                             (ap->ability_match_cfg & ~ANEG_CFG_ACK)) {
1970                                 ap->state = ANEG_STATE_COMPLETE_ACK_INIT;
1971                         } else {
1972                                 ap->state = ANEG_STATE_AN_ENABLE;
1973                         }
1974                 } else if (ap->ability_match != 0 &&
1975                            ap->rxconfig == 0) {
1976                         ap->state = ANEG_STATE_AN_ENABLE;
1977                 }
1978                 break;
1979
1980         case ANEG_STATE_COMPLETE_ACK_INIT:
1981                 if (ap->rxconfig & ANEG_CFG_INVAL) {
1982                         ret = ANEG_FAILED;
1983                         break;
1984                 }
1985                 ap->flags &= ~(MR_LP_ADV_FULL_DUPLEX |
1986                                MR_LP_ADV_HALF_DUPLEX |
1987                                MR_LP_ADV_SYM_PAUSE |
1988                                MR_LP_ADV_ASYM_PAUSE |
1989                                MR_LP_ADV_REMOTE_FAULT1 |
1990                                MR_LP_ADV_REMOTE_FAULT2 |
1991                                MR_LP_ADV_NEXT_PAGE |
1992                                MR_TOGGLE_RX |
1993                                MR_NP_RX);
1994                 if (ap->rxconfig & ANEG_CFG_FD)
1995                         ap->flags |= MR_LP_ADV_FULL_DUPLEX;
1996                 if (ap->rxconfig & ANEG_CFG_HD)
1997                         ap->flags |= MR_LP_ADV_HALF_DUPLEX;
1998                 if (ap->rxconfig & ANEG_CFG_PS1)
1999                         ap->flags |= MR_LP_ADV_SYM_PAUSE;
2000                 if (ap->rxconfig & ANEG_CFG_PS2)
2001                         ap->flags |= MR_LP_ADV_ASYM_PAUSE;
2002                 if (ap->rxconfig & ANEG_CFG_RF1)
2003                         ap->flags |= MR_LP_ADV_REMOTE_FAULT1;
2004                 if (ap->rxconfig & ANEG_CFG_RF2)
2005                         ap->flags |= MR_LP_ADV_REMOTE_FAULT2;
2006                 if (ap->rxconfig & ANEG_CFG_NP)
2007                         ap->flags |= MR_LP_ADV_NEXT_PAGE;
2008
2009                 ap->link_time = ap->cur_time;
2010
2011                 ap->flags ^= (MR_TOGGLE_TX);
2012                 if (ap->rxconfig & 0x0008)
2013                         ap->flags |= MR_TOGGLE_RX;
2014                 if (ap->rxconfig & ANEG_CFG_NP)
2015                         ap->flags |= MR_NP_RX;
2016                 ap->flags |= MR_PAGE_RX;
2017
2018                 ap->state = ANEG_STATE_COMPLETE_ACK;
2019                 ret = ANEG_TIMER_ENAB;
2020                 break;
2021
2022         case ANEG_STATE_COMPLETE_ACK:
2023                 if (ap->ability_match != 0 &&
2024                     ap->rxconfig == 0) {
2025                         ap->state = ANEG_STATE_AN_ENABLE;
2026                         break;
2027                 }
2028                 delta = ap->cur_time - ap->link_time;
2029                 if (delta > ANEG_STATE_SETTLE_TIME) {
2030                         if (!(ap->flags & (MR_LP_ADV_NEXT_PAGE))) {
2031                                 ap->state = ANEG_STATE_IDLE_DETECT_INIT;
2032                         } else {
2033                                 if ((ap->txconfig & ANEG_CFG_NP) == 0 &&
2034                                     !(ap->flags & MR_NP_RX)) {
2035                                         ap->state = ANEG_STATE_IDLE_DETECT_INIT;
2036                                 } else {
2037                                         ret = ANEG_FAILED;
2038                                 }
2039                         }
2040                 }
2041                 break;
2042
2043         case ANEG_STATE_IDLE_DETECT_INIT:
2044                 ap->link_time = ap->cur_time;
2045                 tp->mac_mode &= ~MAC_MODE_SEND_CONFIGS;
2046                 tw32_f(MAC_MODE, tp->mac_mode);
2047                 udelay(40);
2048
2049                 ap->state = ANEG_STATE_IDLE_DETECT;
2050                 ret = ANEG_TIMER_ENAB;
2051                 break;
2052
2053         case ANEG_STATE_IDLE_DETECT:
2054                 if (ap->ability_match != 0 &&
2055                     ap->rxconfig == 0) {
2056                         ap->state = ANEG_STATE_AN_ENABLE;
2057                         break;
2058                 }
2059                 delta = ap->cur_time - ap->link_time;
2060                 if (delta > ANEG_STATE_SETTLE_TIME) {
2061                         /* XXX another gem from the Broadcom driver :( */
2062                         ap->state = ANEG_STATE_LINK_OK;
2063                 }
2064                 break;
2065
2066         case ANEG_STATE_LINK_OK:
2067                 ap->flags |= (MR_AN_COMPLETE | MR_LINK_OK);
2068                 ret = ANEG_DONE;
2069                 break;
2070
2071         case ANEG_STATE_NEXT_PAGE_WAIT_INIT:
2072                 /* ??? unimplemented */
2073                 break;
2074
2075         case ANEG_STATE_NEXT_PAGE_WAIT:
2076                 /* ??? unimplemented */
2077                 break;
2078
2079         default:
2080                 ret = ANEG_FAILED;
2081                 break;
2082         };
2083
2084         return ret;
2085 }
2086
2087 static int fiber_autoneg(struct tg3 *tp, u32 *flags)
2088 {
2089         int res = 0;
2090         struct tg3_fiber_aneginfo aninfo;
2091         int status = ANEG_FAILED;
2092         unsigned int tick;
2093         u32 tmp;
2094
2095         tw32_f(MAC_TX_AUTO_NEG, 0);
2096
2097         tmp = tp->mac_mode & ~MAC_MODE_PORT_MODE_MASK;
2098         tw32_f(MAC_MODE, tmp | MAC_MODE_PORT_MODE_GMII);
2099         udelay(40);
2100
2101         tw32_f(MAC_MODE, tp->mac_mode | MAC_MODE_SEND_CONFIGS);
2102         udelay(40);
2103
2104         memset(&aninfo, 0, sizeof(aninfo));
2105         aninfo.flags |= MR_AN_ENABLE;
2106         aninfo.state = ANEG_STATE_UNKNOWN;
2107         aninfo.cur_time = 0;
2108         tick = 0;
2109         while (++tick < 195000) {
2110                 status = tg3_fiber_aneg_smachine(tp, &aninfo);
2111                 if (status == ANEG_DONE || status == ANEG_FAILED)
2112                         break;
2113
2114                 udelay(1);
2115         }
2116
2117         tp->mac_mode &= ~MAC_MODE_SEND_CONFIGS;
2118         tw32_f(MAC_MODE, tp->mac_mode);
2119         udelay(40);
2120
2121         *flags = aninfo.flags;
2122
2123         if (status == ANEG_DONE &&
2124             (aninfo.flags & (MR_AN_COMPLETE | MR_LINK_OK |
2125                              MR_LP_ADV_FULL_DUPLEX)))
2126                 res = 1;
2127
2128         return res;
2129 }
2130
2131 static void tg3_init_bcm8002(struct tg3 *tp)
2132 {
2133         u32 mac_status = tr32(MAC_STATUS);
2134         int i;
2135
2136         /* Reset when initting first time or we have a link. */
2137         if ((tp->tg3_flags & TG3_FLAG_INIT_COMPLETE) &&
2138             !(mac_status & MAC_STATUS_PCS_SYNCED))
2139                 return;
2140
2141         /* Set PLL lock range. */
2142         tg3_writephy(tp, 0x16, 0x8007);
2143
2144         /* SW reset */
2145         tg3_writephy(tp, MII_BMCR, BMCR_RESET);
2146
2147         /* Wait for reset to complete. */
2148         /* XXX schedule_timeout() ... */
2149         for (i = 0; i < 500; i++)
2150                 udelay(10);
2151
2152         /* Config mode; select PMA/Ch 1 regs. */
2153         tg3_writephy(tp, 0x10, 0x8411);
2154
2155         /* Enable auto-lock and comdet, select txclk for tx. */
2156         tg3_writephy(tp, 0x11, 0x0a10);
2157
2158         tg3_writephy(tp, 0x18, 0x00a0);
2159         tg3_writephy(tp, 0x16, 0x41ff);
2160
2161         /* Assert and deassert POR. */
2162         tg3_writephy(tp, 0x13, 0x0400);
2163         udelay(40);
2164         tg3_writephy(tp, 0x13, 0x0000);
2165
2166         tg3_writephy(tp, 0x11, 0x0a50);
2167         udelay(40);
2168         tg3_writephy(tp, 0x11, 0x0a10);
2169
2170         /* Wait for signal to stabilize */
2171         /* XXX schedule_timeout() ... */
2172         for (i = 0; i < 15000; i++)
2173                 udelay(10);
2174
2175         /* Deselect the channel register so we can read the PHYID
2176          * later.
2177          */
2178         tg3_writephy(tp, 0x10, 0x8011);
2179 }
2180
2181 static int tg3_setup_fiber_hw_autoneg(struct tg3 *tp, u32 mac_status)
2182 {
2183         u32 sg_dig_ctrl, sg_dig_status;
2184         u32 serdes_cfg, expected_sg_dig_ctrl;
2185         int workaround, port_a;
2186         int current_link_up;
2187
2188         serdes_cfg = 0;
2189         expected_sg_dig_ctrl = 0;
2190         workaround = 0;
2191         port_a = 1;
2192         current_link_up = 0;
2193
2194         if (tp->pci_chip_rev_id != CHIPREV_ID_5704_A0 &&
2195             tp->pci_chip_rev_id != CHIPREV_ID_5704_A1) {
2196                 workaround = 1;
2197                 if (tr32(TG3PCI_DUAL_MAC_CTRL) & DUAL_MAC_CTRL_ID)
2198                         port_a = 0;
2199
2200                 /* preserve bits 0-11,13,14 for signal pre-emphasis */
2201                 /* preserve bits 20-23 for voltage regulator */
2202                 serdes_cfg = tr32(MAC_SERDES_CFG) & 0x00f06fff;
2203         }
2204
2205         sg_dig_ctrl = tr32(SG_DIG_CTRL);
2206
2207         if (tp->link_config.autoneg != AUTONEG_ENABLE) {
2208                 if (sg_dig_ctrl & (1 << 31)) {
2209                         if (workaround) {
2210                                 u32 val = serdes_cfg;
2211
2212                                 if (port_a)
2213                                         val |= 0xc010000;
2214                                 else
2215                                         val |= 0x4010000;
2216                                 tw32_f(MAC_SERDES_CFG, val);
2217                         }
2218                         tw32_f(SG_DIG_CTRL, 0x01388400);
2219                 }
2220                 if (mac_status & MAC_STATUS_PCS_SYNCED) {
2221                         tg3_setup_flow_control(tp, 0, 0);
2222                         current_link_up = 1;
2223                 }
2224                 goto out;
2225         }
2226
2227         /* Want auto-negotiation.  */
2228         expected_sg_dig_ctrl = 0x81388400;
2229
2230         /* Pause capability */
2231         expected_sg_dig_ctrl |= (1 << 11);
2232
2233         /* Asymettric pause */
2234         expected_sg_dig_ctrl |= (1 << 12);
2235
2236         if (sg_dig_ctrl != expected_sg_dig_ctrl) {
2237                 if (workaround)
2238                         tw32_f(MAC_SERDES_CFG, serdes_cfg | 0xc011000);
2239                 tw32_f(SG_DIG_CTRL, expected_sg_dig_ctrl | (1 << 30));
2240                 udelay(5);
2241                 tw32_f(SG_DIG_CTRL, expected_sg_dig_ctrl);
2242
2243                 tp->tg3_flags2 |= TG3_FLG2_PHY_JUST_INITTED;
2244         } else if (mac_status & (MAC_STATUS_PCS_SYNCED |
2245                                  MAC_STATUS_SIGNAL_DET)) {
2246                 int i;
2247
2248                 /* Giver time to negotiate (~200ms) */
2249                 for (i = 0; i < 40000; i++) {
2250                         sg_dig_status = tr32(SG_DIG_STATUS);
2251                         if (sg_dig_status & (0x3))
2252                                 break;
2253                         udelay(5);
2254                 }
2255                 mac_status = tr32(MAC_STATUS);
2256
2257                 if ((sg_dig_status & (1 << 1)) &&
2258                     (mac_status & MAC_STATUS_PCS_SYNCED)) {
2259                         u32 local_adv, remote_adv;
2260
2261                         local_adv = ADVERTISE_PAUSE_CAP;
2262                         remote_adv = 0;
2263                         if (sg_dig_status & (1 << 19))
2264                                 remote_adv |= LPA_PAUSE_CAP;
2265                         if (sg_dig_status & (1 << 20))
2266                                 remote_adv |= LPA_PAUSE_ASYM;
2267
2268                         tg3_setup_flow_control(tp, local_adv, remote_adv);
2269                         current_link_up = 1;
2270                         tp->tg3_flags2 &= ~TG3_FLG2_PHY_JUST_INITTED;
2271                 } else if (!(sg_dig_status & (1 << 1))) {
2272                         if (tp->tg3_flags2 & TG3_FLG2_PHY_JUST_INITTED)
2273                                 tp->tg3_flags2 &= ~TG3_FLG2_PHY_JUST_INITTED;
2274                         else {
2275                                 if (workaround) {
2276                                         u32 val = serdes_cfg;
2277
2278                                         if (port_a)
2279                                                 val |= 0xc010000;
2280                                         else
2281                                                 val |= 0x4010000;
2282
2283                                         tw32_f(MAC_SERDES_CFG, val);
2284                                 }
2285
2286                                 tw32_f(SG_DIG_CTRL, 0x01388400);
2287                                 udelay(40);
2288
2289                                 /* Link parallel detection - link is up */
2290                                 /* only if we have PCS_SYNC and not */
2291                                 /* receiving config code words */
2292                                 mac_status = tr32(MAC_STATUS);
2293                                 if ((mac_status & MAC_STATUS_PCS_SYNCED) &&
2294                                     !(mac_status & MAC_STATUS_RCVD_CFG)) {
2295                                         tg3_setup_flow_control(tp, 0, 0);
2296                                         current_link_up = 1;
2297                                 }
2298                         }
2299                 }
2300         }
2301
2302 out:
2303         return current_link_up;
2304 }
2305
2306 static int tg3_setup_fiber_by_hand(struct tg3 *tp, u32 mac_status)
2307 {
2308         int current_link_up = 0;
2309
2310         if (!(mac_status & MAC_STATUS_PCS_SYNCED)) {
2311                 tp->tg3_flags &= ~TG3_FLAG_GOT_SERDES_FLOWCTL;
2312                 goto out;
2313         }
2314
2315         if (tp->link_config.autoneg == AUTONEG_ENABLE) {
2316                 u32 flags;
2317                 int i;
2318   
2319                 if (fiber_autoneg(tp, &flags)) {
2320                         u32 local_adv, remote_adv;
2321
2322                         local_adv = ADVERTISE_PAUSE_CAP;
2323                         remote_adv = 0;
2324                         if (flags & MR_LP_ADV_SYM_PAUSE)
2325                                 remote_adv |= LPA_PAUSE_CAP;
2326                         if (flags & MR_LP_ADV_ASYM_PAUSE)
2327                                 remote_adv |= LPA_PAUSE_ASYM;
2328
2329                         tg3_setup_flow_control(tp, local_adv, remote_adv);
2330
2331                         tp->tg3_flags |= TG3_FLAG_GOT_SERDES_FLOWCTL;
2332                         current_link_up = 1;
2333                 }
2334                 for (i = 0; i < 30; i++) {
2335                         udelay(20);
2336                         tw32_f(MAC_STATUS,
2337                                (MAC_STATUS_SYNC_CHANGED |
2338                                 MAC_STATUS_CFG_CHANGED));
2339                         udelay(40);
2340                         if ((tr32(MAC_STATUS) &
2341                              (MAC_STATUS_SYNC_CHANGED |
2342                               MAC_STATUS_CFG_CHANGED)) == 0)
2343                                 break;
2344                 }
2345
2346                 mac_status = tr32(MAC_STATUS);
2347                 if (current_link_up == 0 &&
2348                     (mac_status & MAC_STATUS_PCS_SYNCED) &&
2349                     !(mac_status & MAC_STATUS_RCVD_CFG))
2350                         current_link_up = 1;
2351         } else {
2352                 /* Forcing 1000FD link up. */
2353                 current_link_up = 1;
2354                 tp->tg3_flags |= TG3_FLAG_GOT_SERDES_FLOWCTL;
2355
2356                 tw32_f(MAC_MODE, (tp->mac_mode | MAC_MODE_SEND_CONFIGS));
2357                 udelay(40);
2358         }
2359
2360 out:
2361         return current_link_up;
2362 }
2363
2364 static int tg3_setup_fiber_phy(struct tg3 *tp, int force_reset)
2365 {
2366         u32 orig_pause_cfg;
2367         u16 orig_active_speed;
2368         u8 orig_active_duplex;
2369         u32 mac_status;
2370         int current_link_up;
2371         int i;
2372
2373         orig_pause_cfg =
2374                 (tp->tg3_flags & (TG3_FLAG_RX_PAUSE |
2375                                   TG3_FLAG_TX_PAUSE));
2376         orig_active_speed = tp->link_config.active_speed;
2377         orig_active_duplex = tp->link_config.active_duplex;
2378
2379         if (!(tp->tg3_flags2 & TG3_FLG2_HW_AUTONEG) &&
2380             netif_carrier_ok(tp->dev) &&
2381             (tp->tg3_flags & TG3_FLAG_INIT_COMPLETE)) {
2382                 mac_status = tr32(MAC_STATUS);
2383                 mac_status &= (MAC_STATUS_PCS_SYNCED |
2384                                MAC_STATUS_SIGNAL_DET |
2385                                MAC_STATUS_CFG_CHANGED |
2386                                MAC_STATUS_RCVD_CFG);
2387                 if (mac_status == (MAC_STATUS_PCS_SYNCED |
2388                                    MAC_STATUS_SIGNAL_DET)) {
2389                         tw32_f(MAC_STATUS, (MAC_STATUS_SYNC_CHANGED |
2390                                             MAC_STATUS_CFG_CHANGED));
2391                         return 0;
2392                 }
2393         }
2394
2395         tw32_f(MAC_TX_AUTO_NEG, 0);
2396
2397         tp->mac_mode &= ~(MAC_MODE_PORT_MODE_MASK | MAC_MODE_HALF_DUPLEX);
2398         tp->mac_mode |= MAC_MODE_PORT_MODE_TBI;
2399         tw32_f(MAC_MODE, tp->mac_mode);
2400         udelay(40);
2401
2402         if (tp->phy_id == PHY_ID_BCM8002)
2403                 tg3_init_bcm8002(tp);
2404
2405         /* Enable link change event even when serdes polling.  */
2406         tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
2407         udelay(40);
2408
2409         current_link_up = 0;
2410         mac_status = tr32(MAC_STATUS);
2411
2412         if (tp->tg3_flags2 & TG3_FLG2_HW_AUTONEG)
2413                 current_link_up = tg3_setup_fiber_hw_autoneg(tp, mac_status);
2414         else
2415                 current_link_up = tg3_setup_fiber_by_hand(tp, mac_status);
2416
2417         tp->mac_mode &= ~MAC_MODE_LINK_POLARITY;
2418         tw32_f(MAC_MODE, tp->mac_mode);
2419         udelay(40);
2420
2421         tp->hw_status->status =
2422                 (SD_STATUS_UPDATED |
2423                  (tp->hw_status->status & ~SD_STATUS_LINK_CHG));
2424
2425         for (i = 0; i < 100; i++) {
2426                 tw32_f(MAC_STATUS, (MAC_STATUS_SYNC_CHANGED |
2427                                     MAC_STATUS_CFG_CHANGED));
2428                 udelay(5);
2429                 if ((tr32(MAC_STATUS) & (MAC_STATUS_SYNC_CHANGED |
2430                                          MAC_STATUS_CFG_CHANGED)) == 0)
2431                         break;
2432         }
2433
2434         mac_status = tr32(MAC_STATUS);
2435         if ((mac_status & MAC_STATUS_PCS_SYNCED) == 0) {
2436                 current_link_up = 0;
2437                 if (tp->link_config.autoneg == AUTONEG_ENABLE) {
2438                         tw32_f(MAC_MODE, (tp->mac_mode |
2439                                           MAC_MODE_SEND_CONFIGS));
2440                         udelay(1);
2441                         tw32_f(MAC_MODE, tp->mac_mode);
2442                 }
2443         }
2444
2445         if (current_link_up == 1) {
2446                 tp->link_config.active_speed = SPEED_1000;
2447                 tp->link_config.active_duplex = DUPLEX_FULL;
2448                 tw32(MAC_LED_CTRL, (tp->led_ctrl |
2449                                     LED_CTRL_LNKLED_OVERRIDE |
2450                                     LED_CTRL_1000MBPS_ON));
2451         } else {
2452                 tp->link_config.active_speed = SPEED_INVALID;
2453                 tp->link_config.active_duplex = DUPLEX_INVALID;
2454                 tw32(MAC_LED_CTRL, (tp->led_ctrl |
2455                                     LED_CTRL_LNKLED_OVERRIDE |
2456                                     LED_CTRL_TRAFFIC_OVERRIDE));
2457         }
2458
2459         if (current_link_up != netif_carrier_ok(tp->dev)) {
2460                 if (current_link_up)
2461                         netif_carrier_on(tp->dev);
2462                 else
2463                         netif_carrier_off(tp->dev);
2464                 tg3_link_report(tp);
2465         } else {
2466                 u32 now_pause_cfg =
2467                         tp->tg3_flags & (TG3_FLAG_RX_PAUSE |
2468                                          TG3_FLAG_TX_PAUSE);
2469                 if (orig_pause_cfg != now_pause_cfg ||
2470                     orig_active_speed != tp->link_config.active_speed ||
2471                     orig_active_duplex != tp->link_config.active_duplex)
2472                         tg3_link_report(tp);
2473         }
2474
2475         return 0;
2476 }
2477
2478 static int tg3_setup_phy(struct tg3 *tp, int force_reset)
2479 {
2480         int err;
2481
2482         if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) {
2483                 err = tg3_setup_fiber_phy(tp, force_reset);
2484         } else {
2485                 err = tg3_setup_copper_phy(tp, force_reset);
2486         }
2487
2488         if (tp->link_config.active_speed == SPEED_1000 &&
2489             tp->link_config.active_duplex == DUPLEX_HALF)
2490                 tw32(MAC_TX_LENGTHS,
2491                      ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
2492                       (6 << TX_LENGTHS_IPG_SHIFT) |
2493                       (0xff << TX_LENGTHS_SLOT_TIME_SHIFT)));
2494         else
2495                 tw32(MAC_TX_LENGTHS,
2496                      ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
2497                       (6 << TX_LENGTHS_IPG_SHIFT) |
2498                       (32 << TX_LENGTHS_SLOT_TIME_SHIFT)));
2499
2500         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
2501                 if (netif_carrier_ok(tp->dev)) {
2502                         tw32(HOSTCC_STAT_COAL_TICKS,
2503                              DEFAULT_STAT_COAL_TICKS);
2504                 } else {
2505                         tw32(HOSTCC_STAT_COAL_TICKS, 0);
2506                 }
2507         }
2508
2509         return err;
2510 }
2511
2512 /* Tigon3 never reports partial packet sends.  So we do not
2513  * need special logic to handle SKBs that have not had all
2514  * of their frags sent yet, like SunGEM does.
2515  */
2516 static void tg3_tx(struct tg3 *tp)
2517 {
2518         u32 hw_idx = tp->hw_status->idx[0].tx_consumer;
2519         u32 sw_idx = tp->tx_cons;
2520
2521         while (sw_idx != hw_idx) {
2522                 struct tx_ring_info *ri = &tp->tx_buffers[sw_idx];
2523                 struct sk_buff *skb = ri->skb;
2524                 int i;
2525
2526                 if (unlikely(skb == NULL))
2527                         BUG();
2528
2529                 pci_unmap_single(tp->pdev,
2530                                  pci_unmap_addr(ri, mapping),
2531                                  skb_headlen(skb),
2532                                  PCI_DMA_TODEVICE);
2533
2534                 ri->skb = NULL;
2535
2536                 sw_idx = NEXT_TX(sw_idx);
2537
2538                 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
2539                         if (unlikely(sw_idx == hw_idx))
2540                                 BUG();
2541
2542                         ri = &tp->tx_buffers[sw_idx];
2543                         if (unlikely(ri->skb != NULL))
2544                                 BUG();
2545
2546                         pci_unmap_page(tp->pdev,
2547                                        pci_unmap_addr(ri, mapping),
2548                                        skb_shinfo(skb)->frags[i].size,
2549                                        PCI_DMA_TODEVICE);
2550
2551                         sw_idx = NEXT_TX(sw_idx);
2552                 }
2553
2554                 dev_kfree_skb_irq(skb);
2555         }
2556
2557         tp->tx_cons = sw_idx;
2558
2559         if (netif_queue_stopped(tp->dev) &&
2560             (TX_BUFFS_AVAIL(tp) > TG3_TX_WAKEUP_THRESH))
2561                 netif_wake_queue(tp->dev);
2562 }
2563
2564 /* Returns size of skb allocated or < 0 on error.
2565  *
2566  * We only need to fill in the address because the other members
2567  * of the RX descriptor are invariant, see tg3_init_rings.
2568  *
2569  * Note the purposeful assymetry of cpu vs. chip accesses.  For
2570  * posting buffers we only dirty the first cache line of the RX
2571  * descriptor (containing the address).  Whereas for the RX status
2572  * buffers the cpu only reads the last cacheline of the RX descriptor
2573  * (to fetch the error flags, vlan tag, checksum, and opaque cookie).
2574  */
2575 static int tg3_alloc_rx_skb(struct tg3 *tp, u32 opaque_key,
2576                             int src_idx, u32 dest_idx_unmasked)
2577 {
2578         struct tg3_rx_buffer_desc *desc;
2579         struct ring_info *map, *src_map;
2580         struct sk_buff *skb;
2581         dma_addr_t mapping;
2582         int skb_size, dest_idx;
2583
2584         src_map = NULL;
2585         switch (opaque_key) {
2586         case RXD_OPAQUE_RING_STD:
2587                 dest_idx = dest_idx_unmasked % TG3_RX_RING_SIZE;
2588                 desc = &tp->rx_std[dest_idx];
2589                 map = &tp->rx_std_buffers[dest_idx];
2590                 if (src_idx >= 0)
2591                         src_map = &tp->rx_std_buffers[src_idx];
2592                 skb_size = RX_PKT_BUF_SZ;
2593                 break;
2594
2595         case RXD_OPAQUE_RING_JUMBO:
2596                 dest_idx = dest_idx_unmasked % TG3_RX_JUMBO_RING_SIZE;
2597                 desc = &tp->rx_jumbo[dest_idx];
2598                 map = &tp->rx_jumbo_buffers[dest_idx];
2599                 if (src_idx >= 0)
2600                         src_map = &tp->rx_jumbo_buffers[src_idx];
2601                 skb_size = RX_JUMBO_PKT_BUF_SZ;
2602                 break;
2603
2604         default:
2605                 return -EINVAL;
2606         };
2607
2608         /* Do not overwrite any of the map or rp information
2609          * until we are sure we can commit to a new buffer.
2610          *
2611          * Callers depend upon this behavior and assume that
2612          * we leave everything unchanged if we fail.
2613          */
2614         skb = dev_alloc_skb(skb_size);
2615         if (skb == NULL)
2616                 return -ENOMEM;
2617
2618         skb->dev = tp->dev;
2619         skb_reserve(skb, tp->rx_offset);
2620
2621         mapping = pci_map_single(tp->pdev, skb->data,
2622                                  skb_size - tp->rx_offset,
2623                                  PCI_DMA_FROMDEVICE);
2624
2625         map->skb = skb;
2626         pci_unmap_addr_set(map, mapping, mapping);
2627
2628         if (src_map != NULL)
2629                 src_map->skb = NULL;
2630
2631         desc->addr_hi = ((u64)mapping >> 32);
2632         desc->addr_lo = ((u64)mapping & 0xffffffff);
2633
2634         return skb_size;
2635 }
2636
2637 /* We only need to move over in the address because the other
2638  * members of the RX descriptor are invariant.  See notes above
2639  * tg3_alloc_rx_skb for full details.
2640  */
2641 static void tg3_recycle_rx(struct tg3 *tp, u32 opaque_key,
2642                            int src_idx, u32 dest_idx_unmasked)
2643 {
2644         struct tg3_rx_buffer_desc *src_desc, *dest_desc;
2645         struct ring_info *src_map, *dest_map;
2646         int dest_idx;
2647
2648         switch (opaque_key) {
2649         case RXD_OPAQUE_RING_STD:
2650                 dest_idx = dest_idx_unmasked % TG3_RX_RING_SIZE;
2651                 dest_desc = &tp->rx_std[dest_idx];
2652                 dest_map = &tp->rx_std_buffers[dest_idx];
2653                 src_desc = &tp->rx_std[src_idx];
2654                 src_map = &tp->rx_std_buffers[src_idx];
2655                 break;
2656
2657         case RXD_OPAQUE_RING_JUMBO:
2658                 dest_idx = dest_idx_unmasked % TG3_RX_JUMBO_RING_SIZE;
2659                 dest_desc = &tp->rx_jumbo[dest_idx];
2660                 dest_map = &tp->rx_jumbo_buffers[dest_idx];
2661                 src_desc = &tp->rx_jumbo[src_idx];
2662                 src_map = &tp->rx_jumbo_buffers[src_idx];
2663                 break;
2664
2665         default:
2666                 return;
2667         };
2668
2669         dest_map->skb = src_map->skb;
2670         pci_unmap_addr_set(dest_map, mapping,
2671                            pci_unmap_addr(src_map, mapping));
2672         dest_desc->addr_hi = src_desc->addr_hi;
2673         dest_desc->addr_lo = src_desc->addr_lo;
2674
2675         src_map->skb = NULL;
2676 }
2677
2678 #if TG3_VLAN_TAG_USED
2679 static int tg3_vlan_rx(struct tg3 *tp, struct sk_buff *skb, u16 vlan_tag)
2680 {
2681         return vlan_hwaccel_receive_skb(skb, tp->vlgrp, vlan_tag);
2682 }
2683 #endif
2684
2685 /* The RX ring scheme is composed of multiple rings which post fresh
2686  * buffers to the chip, and one special ring the chip uses to report
2687  * status back to the host.
2688  *
2689  * The special ring reports the status of received packets to the
2690  * host.  The chip does not write into the original descriptor the
2691  * RX buffer was obtained from.  The chip simply takes the original
2692  * descriptor as provided by the host, updates the status and length
2693  * field, then writes this into the next status ring entry.
2694  *
2695  * Each ring the host uses to post buffers to the chip is described
2696  * by a TG3_BDINFO entry in the chips SRAM area.  When a packet arrives,
2697  * it is first placed into the on-chip ram.  When the packet's length
2698  * is known, it walks down the TG3_BDINFO entries to select the ring.
2699  * Each TG3_BDINFO specifies a MAXLEN field and the first TG3_BDINFO
2700  * which is within the range of the new packet's length is chosen.
2701  *
2702  * The "separate ring for rx status" scheme may sound queer, but it makes
2703  * sense from a cache coherency perspective.  If only the host writes
2704  * to the buffer post rings, and only the chip writes to the rx status
2705  * rings, then cache lines never move beyond shared-modified state.
2706  * If both the host and chip were to write into the same ring, cache line
2707  * eviction could occur since both entities want it in an exclusive state.
2708  */
2709 static int tg3_rx(struct tg3 *tp, int budget)
2710 {
2711         u32 work_mask;
2712         u32 sw_idx = tp->rx_rcb_ptr;
2713         u16 hw_idx;
2714         int received;
2715
2716         hw_idx = tp->hw_status->idx[0].rx_producer;
2717         /*
2718          * We need to order the read of hw_idx and the read of
2719          * the opaque cookie.
2720          */
2721         rmb();
2722         work_mask = 0;
2723         received = 0;
2724         while (sw_idx != hw_idx && budget > 0) {
2725                 struct tg3_rx_buffer_desc *desc = &tp->rx_rcb[sw_idx];
2726                 unsigned int len;
2727                 struct sk_buff *skb;
2728                 dma_addr_t dma_addr;
2729                 u32 opaque_key, desc_idx, *post_ptr;
2730
2731                 desc_idx = desc->opaque & RXD_OPAQUE_INDEX_MASK;
2732                 opaque_key = desc->opaque & RXD_OPAQUE_RING_MASK;
2733                 if (opaque_key == RXD_OPAQUE_RING_STD) {
2734                         dma_addr = pci_unmap_addr(&tp->rx_std_buffers[desc_idx],
2735                                                   mapping);
2736                         skb = tp->rx_std_buffers[desc_idx].skb;
2737                         post_ptr = &tp->rx_std_ptr;
2738                 } else if (opaque_key == RXD_OPAQUE_RING_JUMBO) {
2739                         dma_addr = pci_unmap_addr(&tp->rx_jumbo_buffers[desc_idx],
2740                                                   mapping);
2741                         skb = tp->rx_jumbo_buffers[desc_idx].skb;
2742                         post_ptr = &tp->rx_jumbo_ptr;
2743                 }
2744                 else {
2745                         goto next_pkt_nopost;
2746                 }
2747
2748                 work_mask |= opaque_key;
2749
2750                 if ((desc->err_vlan & RXD_ERR_MASK) != 0 &&
2751                     (desc->err_vlan != RXD_ERR_ODD_NIBBLE_RCVD_MII)) {
2752                 drop_it:
2753                         tg3_recycle_rx(tp, opaque_key,
2754                                        desc_idx, *post_ptr);
2755                 drop_it_no_recycle:
2756                         /* Other statistics kept track of by card. */
2757                         tp->net_stats.rx_dropped++;
2758                         goto next_pkt;
2759                 }
2760
2761                 len = ((desc->idx_len & RXD_LEN_MASK) >> RXD_LEN_SHIFT) - 4; /* omit crc */
2762
2763                 if (len > RX_COPY_THRESHOLD 
2764                         && tp->rx_offset == 2
2765                         /* rx_offset != 2 iff this is a 5701 card running
2766                          * in PCI-X mode [see tg3_get_invariants()] */
2767                 ) {
2768                         int skb_size;
2769
2770                         skb_size = tg3_alloc_rx_skb(tp, opaque_key,
2771                                                     desc_idx, *post_ptr);
2772                         if (skb_size < 0)
2773                                 goto drop_it;
2774
2775                         pci_unmap_single(tp->pdev, dma_addr,
2776                                          skb_size - tp->rx_offset,
2777                                          PCI_DMA_FROMDEVICE);
2778
2779                         skb_put(skb, len);
2780                 } else {
2781                         struct sk_buff *copy_skb;
2782
2783                         tg3_recycle_rx(tp, opaque_key,
2784                                        desc_idx, *post_ptr);
2785
2786                         copy_skb = dev_alloc_skb(len + 2);
2787                         if (copy_skb == NULL)
2788                                 goto drop_it_no_recycle;
2789
2790                         copy_skb->dev = tp->dev;
2791                         skb_reserve(copy_skb, 2);
2792                         skb_put(copy_skb, len);
2793                         pci_dma_sync_single_for_cpu(tp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE);
2794                         memcpy(copy_skb->data, skb->data, len);
2795                         pci_dma_sync_single_for_device(tp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE);
2796
2797                         /* We'll reuse the original ring buffer. */
2798                         skb = copy_skb;
2799                 }
2800
2801                 if ((tp->tg3_flags & TG3_FLAG_RX_CHECKSUMS) &&
2802                     (desc->type_flags & RXD_FLAG_TCPUDP_CSUM) &&
2803                     (((desc->ip_tcp_csum & RXD_TCPCSUM_MASK)
2804                       >> RXD_TCPCSUM_SHIFT) == 0xffff))
2805                         skb->ip_summed = CHECKSUM_UNNECESSARY;
2806                 else
2807                         skb->ip_summed = CHECKSUM_NONE;
2808
2809                 skb->protocol = eth_type_trans(skb, tp->dev);
2810 #if TG3_VLAN_TAG_USED
2811                 if (tp->vlgrp != NULL &&
2812                     desc->type_flags & RXD_FLAG_VLAN) {
2813                         tg3_vlan_rx(tp, skb,
2814                                     desc->err_vlan & RXD_VLAN_MASK);
2815                 } else
2816 #endif
2817                         netif_receive_skb(skb);
2818
2819                 tp->dev->last_rx = jiffies;
2820                 received++;
2821                 budget--;
2822
2823 next_pkt:
2824                 (*post_ptr)++;
2825 next_pkt_nopost:
2826                 sw_idx++;
2827                 sw_idx %= TG3_RX_RCB_RING_SIZE(tp);
2828
2829                 /* Refresh hw_idx to see if there is new work */
2830                 if (sw_idx == hw_idx) {
2831                         hw_idx = tp->hw_status->idx[0].rx_producer;
2832                         rmb();
2833                 }
2834         }
2835
2836         /* ACK the status ring. */
2837         tp->rx_rcb_ptr = sw_idx;
2838         tw32_rx_mbox(MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW, sw_idx);
2839
2840         /* Refill RX ring(s). */
2841         if (work_mask & RXD_OPAQUE_RING_STD) {
2842                 sw_idx = tp->rx_std_ptr % TG3_RX_RING_SIZE;
2843                 tw32_rx_mbox(MAILBOX_RCV_STD_PROD_IDX + TG3_64BIT_REG_LOW,
2844                              sw_idx);
2845         }
2846         if (work_mask & RXD_OPAQUE_RING_JUMBO) {
2847                 sw_idx = tp->rx_jumbo_ptr % TG3_RX_JUMBO_RING_SIZE;
2848                 tw32_rx_mbox(MAILBOX_RCV_JUMBO_PROD_IDX + TG3_64BIT_REG_LOW,
2849                              sw_idx);
2850         }
2851         mmiowb();
2852
2853         return received;
2854 }
2855
2856 static int tg3_poll(struct net_device *netdev, int *budget)
2857 {
2858         struct tg3 *tp = netdev_priv(netdev);
2859         struct tg3_hw_status *sblk = tp->hw_status;
2860         unsigned long flags;
2861         int done;
2862
2863         spin_lock_irqsave(&tp->lock, flags);
2864
2865         /* handle link change and other phy events */
2866         if (!(tp->tg3_flags &
2867               (TG3_FLAG_USE_LINKCHG_REG |
2868                TG3_FLAG_POLL_SERDES))) {
2869                 if (sblk->status & SD_STATUS_LINK_CHG) {
2870                         sblk->status = SD_STATUS_UPDATED |
2871                                 (sblk->status & ~SD_STATUS_LINK_CHG);
2872                         tg3_setup_phy(tp, 0);
2873                 }
2874         }
2875
2876         /* run TX completion thread */
2877         if (sblk->idx[0].tx_consumer != tp->tx_cons) {
2878                 spin_lock(&tp->tx_lock);
2879                 tg3_tx(tp);
2880                 spin_unlock(&tp->tx_lock);
2881         }
2882
2883         spin_unlock_irqrestore(&tp->lock, flags);
2884
2885         /* run RX thread, within the bounds set by NAPI.
2886          * All RX "locking" is done by ensuring outside
2887          * code synchronizes with dev->poll()
2888          */
2889         done = 1;
2890         if (sblk->idx[0].rx_producer != tp->rx_rcb_ptr) {
2891                 int orig_budget = *budget;
2892                 int work_done;
2893
2894                 if (orig_budget > netdev->quota)
2895                         orig_budget = netdev->quota;
2896
2897                 work_done = tg3_rx(tp, orig_budget);
2898
2899                 *budget -= work_done;
2900                 netdev->quota -= work_done;
2901
2902                 if (work_done >= orig_budget)
2903                         done = 0;
2904         }
2905
2906         /* if no more work, tell net stack and NIC we're done */
2907         if (done) {
2908                 spin_lock_irqsave(&tp->lock, flags);
2909                 __netif_rx_complete(netdev);
2910                 tg3_restart_ints(tp);
2911                 spin_unlock_irqrestore(&tp->lock, flags);
2912         }
2913
2914         return (done ? 0 : 1);
2915 }
2916
2917 /* MSI ISR - No need to check for interrupt sharing and no need to
2918  * flush status block and interrupt mailbox. PCI ordering rules
2919  * guarantee that MSI will arrive after the status block.
2920  */
2921 static irqreturn_t tg3_msi(int irq, void *dev_id, struct pt_regs *regs)
2922 {
2923         struct net_device *dev = dev_id;
2924         struct tg3 *tp = netdev_priv(dev);
2925         struct tg3_hw_status *sblk = tp->hw_status;
2926         unsigned long flags;
2927
2928         spin_lock_irqsave(&tp->lock, flags);
2929
2930         /*
2931          * writing any value to intr-mbox-0 clears PCI INTA# and
2932          * chip-internal interrupt pending events.
2933          * writing non-zero to intr-mbox-0 additional tells the
2934          * NIC to stop sending us irqs, engaging "in-intr-handler"
2935          * event coalescing.
2936          */
2937         tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
2938         sblk->status &= ~SD_STATUS_UPDATED;
2939
2940         if (likely(tg3_has_work(tp)))
2941                 netif_rx_schedule(dev);         /* schedule NAPI poll */
2942         else {
2943                 /* no work, re-enable interrupts
2944                  */
2945                 tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
2946                              0x00000000);
2947         }
2948
2949         spin_unlock_irqrestore(&tp->lock, flags);
2950
2951         return IRQ_RETVAL(1);
2952 }
2953
2954 static irqreturn_t tg3_interrupt(int irq, void *dev_id, struct pt_regs *regs)
2955 {
2956         struct net_device *dev = dev_id;
2957         struct tg3 *tp = netdev_priv(dev);
2958         struct tg3_hw_status *sblk = tp->hw_status;
2959         unsigned long flags;
2960         unsigned int handled = 1;
2961
2962         spin_lock_irqsave(&tp->lock, flags);
2963
2964         /* In INTx mode, it is possible for the interrupt to arrive at
2965          * the CPU before the status block posted prior to the interrupt.
2966          * Reading the PCI State register will confirm whether the
2967          * interrupt is ours and will flush the status block.
2968          */
2969         if ((sblk->status & SD_STATUS_UPDATED) ||
2970             !(tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
2971                 /*
2972                  * writing any value to intr-mbox-0 clears PCI INTA# and
2973                  * chip-internal interrupt pending events.
2974                  * writing non-zero to intr-mbox-0 additional tells the
2975                  * NIC to stop sending us irqs, engaging "in-intr-handler"
2976                  * event coalescing.
2977                  */
2978                 tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
2979                              0x00000001);
2980                 /*
2981                  * Flush PCI write.  This also guarantees that our
2982                  * status block has been flushed to host memory.
2983                  */
2984                 tr32(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW);
2985                 sblk->status &= ~SD_STATUS_UPDATED;
2986
2987                 if (likely(tg3_has_work(tp)))
2988                         netif_rx_schedule(dev);         /* schedule NAPI poll */
2989                 else {
2990                         /* no work, shared interrupt perhaps?  re-enable
2991                          * interrupts, and flush that PCI write
2992                          */
2993                         tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
2994                                 0x00000000);
2995                         tr32(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW);
2996                 }
2997         } else {        /* shared interrupt */
2998                 handled = 0;
2999         }
3000
3001         spin_unlock_irqrestore(&tp->lock, flags);
3002
3003         return IRQ_RETVAL(handled);
3004 }
3005
3006 /* ISR for interrupt test */
3007 static irqreturn_t tg3_test_isr(int irq, void *dev_id,
3008                 struct pt_regs *regs)
3009 {
3010         struct net_device *dev = dev_id;
3011         struct tg3 *tp = netdev_priv(dev);
3012         struct tg3_hw_status *sblk = tp->hw_status;
3013
3014         if (sblk->status & SD_STATUS_UPDATED) {
3015                 tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
3016                              0x00000001);
3017                 return IRQ_RETVAL(1);
3018         }
3019         return IRQ_RETVAL(0);
3020 }
3021
3022 static int tg3_init_hw(struct tg3 *);
3023 static int tg3_halt(struct tg3 *);
3024
3025 #ifdef CONFIG_NET_POLL_CONTROLLER
3026 static void tg3_poll_controller(struct net_device *dev)
3027 {
3028         struct tg3 *tp = netdev_priv(dev);
3029
3030         tg3_interrupt(tp->pdev->irq, dev, NULL);
3031 }
3032 #endif
3033
3034 static void tg3_reset_task(void *_data)
3035 {
3036         struct tg3 *tp = _data;
3037         unsigned int restart_timer;
3038
3039         tg3_netif_stop(tp);
3040
3041         spin_lock_irq(&tp->lock);
3042         spin_lock(&tp->tx_lock);
3043
3044         restart_timer = tp->tg3_flags2 & TG3_FLG2_RESTART_TIMER;
3045         tp->tg3_flags2 &= ~TG3_FLG2_RESTART_TIMER;
3046
3047         tg3_halt(tp);
3048         tg3_init_hw(tp);
3049
3050         tg3_netif_start(tp);
3051
3052         spin_unlock(&tp->tx_lock);
3053         spin_unlock_irq(&tp->lock);
3054
3055         if (restart_timer)
3056                 mod_timer(&tp->timer, jiffies + 1);
3057 }
3058
3059 static void tg3_tx_timeout(struct net_device *dev)
3060 {
3061         struct tg3 *tp = netdev_priv(dev);
3062
3063         printk(KERN_ERR PFX "%s: transmit timed out, resetting\n",
3064                dev->name);
3065
3066         schedule_work(&tp->reset_task);
3067 }
3068
3069 static void tg3_set_txd(struct tg3 *, int, dma_addr_t, int, u32, u32);
3070
3071 static int tigon3_4gb_hwbug_workaround(struct tg3 *tp, struct sk_buff *skb,
3072                                        u32 guilty_entry, int guilty_len,
3073                                        u32 last_plus_one, u32 *start, u32 mss)
3074 {
3075         struct sk_buff *new_skb = skb_copy(skb, GFP_ATOMIC);
3076         dma_addr_t new_addr;
3077         u32 entry = *start;
3078         int i;
3079
3080         if (!new_skb) {
3081                 dev_kfree_skb(skb);
3082                 return -1;
3083         }
3084
3085         /* New SKB is guaranteed to be linear. */
3086         entry = *start;
3087         new_addr = pci_map_single(tp->pdev, new_skb->data, new_skb->len,
3088                                   PCI_DMA_TODEVICE);
3089         tg3_set_txd(tp, entry, new_addr, new_skb->len,
3090                     (skb->ip_summed == CHECKSUM_HW) ?
3091                     TXD_FLAG_TCPUDP_CSUM : 0, 1 | (mss << 1));
3092         *start = NEXT_TX(entry);
3093
3094         /* Now clean up the sw ring entries. */
3095         i = 0;
3096         while (entry != last_plus_one) {
3097                 int len;
3098
3099                 if (i == 0)
3100                         len = skb_headlen(skb);
3101                 else
3102                         len = skb_shinfo(skb)->frags[i-1].size;
3103                 pci_unmap_single(tp->pdev,
3104                                  pci_unmap_addr(&tp->tx_buffers[entry], mapping),
3105                                  len, PCI_DMA_TODEVICE);
3106                 if (i == 0) {
3107                         tp->tx_buffers[entry].skb = new_skb;
3108                         pci_unmap_addr_set(&tp->tx_buffers[entry], mapping, new_addr);
3109                 } else {
3110                         tp->tx_buffers[entry].skb = NULL;
3111                 }
3112                 entry = NEXT_TX(entry);
3113                 i++;
3114         }
3115
3116         dev_kfree_skb(skb);
3117
3118         return 0;
3119 }
3120
3121 static void tg3_set_txd(struct tg3 *tp, int entry,
3122                         dma_addr_t mapping, int len, u32 flags,
3123                         u32 mss_and_is_end)
3124 {
3125         struct tg3_tx_buffer_desc *txd = &tp->tx_ring[entry];
3126         int is_end = (mss_and_is_end & 0x1);
3127         u32 mss = (mss_and_is_end >> 1);
3128         u32 vlan_tag = 0;
3129
3130         if (is_end)
3131                 flags |= TXD_FLAG_END;
3132         if (flags & TXD_FLAG_VLAN) {
3133                 vlan_tag = flags >> 16;
3134                 flags &= 0xffff;
3135         }
3136         vlan_tag |= (mss << TXD_MSS_SHIFT);
3137
3138         txd->addr_hi = ((u64) mapping >> 32);
3139         txd->addr_lo = ((u64) mapping & 0xffffffff);
3140         txd->len_flags = (len << TXD_LEN_SHIFT) | flags;
3141         txd->vlan_tag = vlan_tag << TXD_VLAN_TAG_SHIFT;
3142 }
3143
3144 static inline int tg3_4g_overflow_test(dma_addr_t mapping, int len)
3145 {
3146         u32 base = (u32) mapping & 0xffffffff;
3147
3148         return ((base > 0xffffdcc0) &&
3149                 (base + len + 8 < base));
3150 }
3151
3152 static int tg3_start_xmit(struct sk_buff *skb, struct net_device *dev)
3153 {
3154         struct tg3 *tp = netdev_priv(dev);
3155         dma_addr_t mapping;
3156         unsigned int i;
3157         u32 len, entry, base_flags, mss;
3158         int would_hit_hwbug;
3159         unsigned long flags;
3160
3161         len = skb_headlen(skb);
3162
3163         /* No BH disabling for tx_lock here.  We are running in BH disabled
3164          * context and TX reclaim runs via tp->poll inside of a software
3165          * interrupt.  Rejoice!
3166          *
3167          * Actually, things are not so simple.  If we are to take a hw
3168          * IRQ here, we can deadlock, consider:
3169          *
3170          *       CPU1           CPU2
3171          *   tg3_start_xmit
3172          *   take tp->tx_lock
3173          *                      tg3_timer
3174          *                      take tp->lock
3175          *   tg3_interrupt
3176          *   spin on tp->lock
3177          *                      spin on tp->tx_lock
3178          *
3179          * So we really do need to disable interrupts when taking
3180          * tx_lock here.
3181          */
3182         local_irq_save(flags);
3183         if (!spin_trylock(&tp->tx_lock)) { 
3184                 local_irq_restore(flags);
3185                 return NETDEV_TX_LOCKED; 
3186         } 
3187
3188         /* This is a hard error, log it. */
3189         if (unlikely(TX_BUFFS_AVAIL(tp) <= (skb_shinfo(skb)->nr_frags + 1))) {
3190                 netif_stop_queue(dev);
3191                 spin_unlock_irqrestore(&tp->tx_lock, flags);
3192                 printk(KERN_ERR PFX "%s: BUG! Tx Ring full when queue awake!\n",
3193                        dev->name);
3194                 return NETDEV_TX_BUSY;
3195         }
3196
3197         entry = tp->tx_prod;
3198         base_flags = 0;
3199         if (skb->ip_summed == CHECKSUM_HW)
3200                 base_flags |= TXD_FLAG_TCPUDP_CSUM;
3201 #if TG3_TSO_SUPPORT != 0
3202         mss = 0;
3203         if (skb->len > (tp->dev->mtu + ETH_HLEN) &&
3204             (mss = skb_shinfo(skb)->tso_size) != 0) {
3205                 int tcp_opt_len, ip_tcp_len;
3206
3207                 if (skb_header_cloned(skb) &&
3208                     pskb_expand_head(skb, 0, 0, GFP_ATOMIC)) {
3209                         dev_kfree_skb(skb);
3210                         goto out_unlock;
3211                 }
3212
3213                 tcp_opt_len = ((skb->h.th->doff - 5) * 4);
3214                 ip_tcp_len = (skb->nh.iph->ihl * 4) + sizeof(struct tcphdr);
3215
3216                 base_flags |= (TXD_FLAG_CPU_PRE_DMA |
3217                                TXD_FLAG_CPU_POST_DMA);
3218
3219                 skb->nh.iph->check = 0;
3220                 skb->nh.iph->tot_len = ntohs(mss + ip_tcp_len + tcp_opt_len);
3221                 if (tp->tg3_flags2 & TG3_FLG2_HW_TSO) {
3222                         skb->h.th->check = 0;
3223                         base_flags &= ~TXD_FLAG_TCPUDP_CSUM;
3224                 }
3225                 else {
3226                         skb->h.th->check =
3227                                 ~csum_tcpudp_magic(skb->nh.iph->saddr,
3228                                                    skb->nh.iph->daddr,
3229                                                    0, IPPROTO_TCP, 0);
3230                 }
3231
3232                 if ((tp->tg3_flags2 & TG3_FLG2_HW_TSO) ||
3233                     (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705)) {
3234                         if (tcp_opt_len || skb->nh.iph->ihl > 5) {
3235                                 int tsflags;
3236
3237                                 tsflags = ((skb->nh.iph->ihl - 5) +
3238                                            (tcp_opt_len >> 2));
3239                                 mss |= (tsflags << 11);
3240                         }
3241                 } else {
3242                         if (tcp_opt_len || skb->nh.iph->ihl > 5) {
3243                                 int tsflags;
3244
3245                                 tsflags = ((skb->nh.iph->ihl - 5) +
3246                                            (tcp_opt_len >> 2));
3247                                 base_flags |= tsflags << 12;
3248                         }
3249                 }