tg3: fix 64 bit counter for ethtool stats
[linux-2.6.git] / drivers / net / tg3.c
1 /*
2  * tg3.c: Broadcom Tigon3 ethernet driver.
3  *
4  * Copyright (C) 2001, 2002, 2003, 2004 David S. Miller (davem@redhat.com)
5  * Copyright (C) 2001, 2002, 2003 Jeff Garzik (jgarzik@pobox.com)
6  * Copyright (C) 2004 Sun Microsystems Inc.
7  * Copyright (C) 2005-2007 Broadcom Corporation.
8  *
9  * Firmware is:
10  *      Derived from proprietary unpublished source code,
11  *      Copyright (C) 2000-2003 Broadcom Corporation.
12  *
13  *      Permission is hereby granted for the distribution of this firmware
14  *      data in hexadecimal or equivalent format, provided this copyright
15  *      notice is accompanying it.
16  */
17
18
19 #include <linux/module.h>
20 #include <linux/moduleparam.h>
21 #include <linux/kernel.h>
22 #include <linux/types.h>
23 #include <linux/compiler.h>
24 #include <linux/slab.h>
25 #include <linux/delay.h>
26 #include <linux/in.h>
27 #include <linux/init.h>
28 #include <linux/ioport.h>
29 #include <linux/pci.h>
30 #include <linux/netdevice.h>
31 #include <linux/etherdevice.h>
32 #include <linux/skbuff.h>
33 #include <linux/ethtool.h>
34 #include <linux/mii.h>
35 #include <linux/phy.h>
36 #include <linux/brcmphy.h>
37 #include <linux/if_vlan.h>
38 #include <linux/ip.h>
39 #include <linux/tcp.h>
40 #include <linux/workqueue.h>
41 #include <linux/prefetch.h>
42 #include <linux/dma-mapping.h>
43
44 #include <net/checksum.h>
45 #include <net/ip.h>
46
47 #include <asm/system.h>
48 #include <asm/io.h>
49 #include <asm/byteorder.h>
50 #include <asm/uaccess.h>
51
52 #ifdef CONFIG_SPARC
53 #include <asm/idprom.h>
54 #include <asm/prom.h>
55 #endif
56
57 #if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE)
58 #define TG3_VLAN_TAG_USED 1
59 #else
60 #define TG3_VLAN_TAG_USED 0
61 #endif
62
63 #define TG3_TSO_SUPPORT 1
64
65 #include "tg3.h"
66
67 #define DRV_MODULE_NAME         "tg3"
68 #define PFX DRV_MODULE_NAME     ": "
69 #define DRV_MODULE_VERSION      "3.93"
70 #define DRV_MODULE_RELDATE      "May 22, 2008"
71
72 #define TG3_DEF_MAC_MODE        0
73 #define TG3_DEF_RX_MODE         0
74 #define TG3_DEF_TX_MODE         0
75 #define TG3_DEF_MSG_ENABLE        \
76         (NETIF_MSG_DRV          | \
77          NETIF_MSG_PROBE        | \
78          NETIF_MSG_LINK         | \
79          NETIF_MSG_TIMER        | \
80          NETIF_MSG_IFDOWN       | \
81          NETIF_MSG_IFUP         | \
82          NETIF_MSG_RX_ERR       | \
83          NETIF_MSG_TX_ERR)
84
85 /* length of time before we decide the hardware is borked,
86  * and dev->tx_timeout() should be called to fix the problem
87  */
88 #define TG3_TX_TIMEOUT                  (5 * HZ)
89
90 /* hardware minimum and maximum for a single frame's data payload */
91 #define TG3_MIN_MTU                     60
92 #define TG3_MAX_MTU(tp) \
93         ((tp->tg3_flags2 & TG3_FLG2_JUMBO_CAPABLE) ? 9000 : 1500)
94
95 /* These numbers seem to be hard coded in the NIC firmware somehow.
96  * You can't change the ring sizes, but you can change where you place
97  * them in the NIC onboard memory.
98  */
99 #define TG3_RX_RING_SIZE                512
100 #define TG3_DEF_RX_RING_PENDING         200
101 #define TG3_RX_JUMBO_RING_SIZE          256
102 #define TG3_DEF_RX_JUMBO_RING_PENDING   100
103
104 /* Do not place this n-ring entries value into the tp struct itself,
105  * we really want to expose these constants to GCC so that modulo et
106  * al.  operations are done with shifts and masks instead of with
107  * hw multiply/modulo instructions.  Another solution would be to
108  * replace things like '% foo' with '& (foo - 1)'.
109  */
110 #define TG3_RX_RCB_RING_SIZE(tp)        \
111         ((tp->tg3_flags2 & TG3_FLG2_5705_PLUS) ?  512 : 1024)
112
113 #define TG3_TX_RING_SIZE                512
114 #define TG3_DEF_TX_RING_PENDING         (TG3_TX_RING_SIZE - 1)
115
116 #define TG3_RX_RING_BYTES       (sizeof(struct tg3_rx_buffer_desc) * \
117                                  TG3_RX_RING_SIZE)
118 #define TG3_RX_JUMBO_RING_BYTES (sizeof(struct tg3_rx_buffer_desc) * \
119                                  TG3_RX_JUMBO_RING_SIZE)
120 #define TG3_RX_RCB_RING_BYTES(tp) (sizeof(struct tg3_rx_buffer_desc) * \
121                                    TG3_RX_RCB_RING_SIZE(tp))
122 #define TG3_TX_RING_BYTES       (sizeof(struct tg3_tx_buffer_desc) * \
123                                  TG3_TX_RING_SIZE)
124 #define NEXT_TX(N)              (((N) + 1) & (TG3_TX_RING_SIZE - 1))
125
126 #define RX_PKT_BUF_SZ           (1536 + tp->rx_offset + 64)
127 #define RX_JUMBO_PKT_BUF_SZ     (9046 + tp->rx_offset + 64)
128
129 /* minimum number of free TX descriptors required to wake up TX process */
130 #define TG3_TX_WAKEUP_THRESH(tp)                ((tp)->tx_pending / 4)
131
132 /* number of ETHTOOL_GSTATS u64's */
133 #define TG3_NUM_STATS           (sizeof(struct tg3_ethtool_stats)/sizeof(u64))
134
135 #define TG3_NUM_TEST            6
136
137 static char version[] __devinitdata =
138         DRV_MODULE_NAME ".c:v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
139
140 MODULE_AUTHOR("David S. Miller (davem@redhat.com) and Jeff Garzik (jgarzik@pobox.com)");
141 MODULE_DESCRIPTION("Broadcom Tigon3 ethernet driver");
142 MODULE_LICENSE("GPL");
143 MODULE_VERSION(DRV_MODULE_VERSION);
144
145 static int tg3_debug = -1;      /* -1 == use TG3_DEF_MSG_ENABLE as value */
146 module_param(tg3_debug, int, 0);
147 MODULE_PARM_DESC(tg3_debug, "Tigon3 bitmapped debugging message enable value");
148
149 static struct pci_device_id tg3_pci_tbl[] = {
150         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5700)},
151         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5701)},
152         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702)},
153         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703)},
154         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704)},
155         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702FE)},
156         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705)},
157         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705_2)},
158         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705M)},
159         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705M_2)},
160         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702X)},
161         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703X)},
162         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704S)},
163         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702A3)},
164         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703A3)},
165         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5782)},
166         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5788)},
167         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5789)},
168         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5901)},
169         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5901_2)},
170         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704S_2)},
171         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705F)},
172         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5720)},
173         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5721)},
174         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5722)},
175         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5750)},
176         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751)},
177         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5750M)},
178         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751M)},
179         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751F)},
180         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5752)},
181         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5752M)},
182         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753)},
183         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753M)},
184         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753F)},
185         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5754)},
186         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5754M)},
187         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5755)},
188         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5755M)},
189         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5756)},
190         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5786)},
191         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787)},
192         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787M)},
193         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787F)},
194         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5714)},
195         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5714S)},
196         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5715)},
197         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5715S)},
198         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5780)},
199         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5780S)},
200         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5781)},
201         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5906)},
202         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5906M)},
203         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5784)},
204         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5764)},
205         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5723)},
206         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5761)},
207         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5761E)},
208         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5785)},
209         {PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9DXX)},
210         {PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9MXX)},
211         {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1000)},
212         {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1001)},
213         {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1003)},
214         {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC9100)},
215         {PCI_DEVICE(PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_TIGON3)},
216         {}
217 };
218
219 MODULE_DEVICE_TABLE(pci, tg3_pci_tbl);
220
221 static const struct {
222         const char string[ETH_GSTRING_LEN];
223 } ethtool_stats_keys[TG3_NUM_STATS] = {
224         { "rx_octets" },
225         { "rx_fragments" },
226         { "rx_ucast_packets" },
227         { "rx_mcast_packets" },
228         { "rx_bcast_packets" },
229         { "rx_fcs_errors" },
230         { "rx_align_errors" },
231         { "rx_xon_pause_rcvd" },
232         { "rx_xoff_pause_rcvd" },
233         { "rx_mac_ctrl_rcvd" },
234         { "rx_xoff_entered" },
235         { "rx_frame_too_long_errors" },
236         { "rx_jabbers" },
237         { "rx_undersize_packets" },
238         { "rx_in_length_errors" },
239         { "rx_out_length_errors" },
240         { "rx_64_or_less_octet_packets" },
241         { "rx_65_to_127_octet_packets" },
242         { "rx_128_to_255_octet_packets" },
243         { "rx_256_to_511_octet_packets" },
244         { "rx_512_to_1023_octet_packets" },
245         { "rx_1024_to_1522_octet_packets" },
246         { "rx_1523_to_2047_octet_packets" },
247         { "rx_2048_to_4095_octet_packets" },
248         { "rx_4096_to_8191_octet_packets" },
249         { "rx_8192_to_9022_octet_packets" },
250
251         { "tx_octets" },
252         { "tx_collisions" },
253
254         { "tx_xon_sent" },
255         { "tx_xoff_sent" },
256         { "tx_flow_control" },
257         { "tx_mac_errors" },
258         { "tx_single_collisions" },
259         { "tx_mult_collisions" },
260         { "tx_deferred" },
261         { "tx_excessive_collisions" },
262         { "tx_late_collisions" },
263         { "tx_collide_2times" },
264         { "tx_collide_3times" },
265         { "tx_collide_4times" },
266         { "tx_collide_5times" },
267         { "tx_collide_6times" },
268         { "tx_collide_7times" },
269         { "tx_collide_8times" },
270         { "tx_collide_9times" },
271         { "tx_collide_10times" },
272         { "tx_collide_11times" },
273         { "tx_collide_12times" },
274         { "tx_collide_13times" },
275         { "tx_collide_14times" },
276         { "tx_collide_15times" },
277         { "tx_ucast_packets" },
278         { "tx_mcast_packets" },
279         { "tx_bcast_packets" },
280         { "tx_carrier_sense_errors" },
281         { "tx_discards" },
282         { "tx_errors" },
283
284         { "dma_writeq_full" },
285         { "dma_write_prioq_full" },
286         { "rxbds_empty" },
287         { "rx_discards" },
288         { "rx_errors" },
289         { "rx_threshold_hit" },
290
291         { "dma_readq_full" },
292         { "dma_read_prioq_full" },
293         { "tx_comp_queue_full" },
294
295         { "ring_set_send_prod_index" },
296         { "ring_status_update" },
297         { "nic_irqs" },
298         { "nic_avoided_irqs" },
299         { "nic_tx_threshold_hit" }
300 };
301
302 static const struct {
303         const char string[ETH_GSTRING_LEN];
304 } ethtool_test_keys[TG3_NUM_TEST] = {
305         { "nvram test     (online) " },
306         { "link test      (online) " },
307         { "register test  (offline)" },
308         { "memory test    (offline)" },
309         { "loopback test  (offline)" },
310         { "interrupt test (offline)" },
311 };
312
313 static void tg3_write32(struct tg3 *tp, u32 off, u32 val)
314 {
315         writel(val, tp->regs + off);
316 }
317
318 static u32 tg3_read32(struct tg3 *tp, u32 off)
319 {
320         return (readl(tp->regs + off));
321 }
322
323 static void tg3_ape_write32(struct tg3 *tp, u32 off, u32 val)
324 {
325         writel(val, tp->aperegs + off);
326 }
327
328 static u32 tg3_ape_read32(struct tg3 *tp, u32 off)
329 {
330         return (readl(tp->aperegs + off));
331 }
332
333 static void tg3_write_indirect_reg32(struct tg3 *tp, u32 off, u32 val)
334 {
335         unsigned long flags;
336
337         spin_lock_irqsave(&tp->indirect_lock, flags);
338         pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off);
339         pci_write_config_dword(tp->pdev, TG3PCI_REG_DATA, val);
340         spin_unlock_irqrestore(&tp->indirect_lock, flags);
341 }
342
343 static void tg3_write_flush_reg32(struct tg3 *tp, u32 off, u32 val)
344 {
345         writel(val, tp->regs + off);
346         readl(tp->regs + off);
347 }
348
349 static u32 tg3_read_indirect_reg32(struct tg3 *tp, u32 off)
350 {
351         unsigned long flags;
352         u32 val;
353
354         spin_lock_irqsave(&tp->indirect_lock, flags);
355         pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off);
356         pci_read_config_dword(tp->pdev, TG3PCI_REG_DATA, &val);
357         spin_unlock_irqrestore(&tp->indirect_lock, flags);
358         return val;
359 }
360
361 static void tg3_write_indirect_mbox(struct tg3 *tp, u32 off, u32 val)
362 {
363         unsigned long flags;
364
365         if (off == (MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW)) {
366                 pci_write_config_dword(tp->pdev, TG3PCI_RCV_RET_RING_CON_IDX +
367                                        TG3_64BIT_REG_LOW, val);
368                 return;
369         }
370         if (off == (MAILBOX_RCV_STD_PROD_IDX + TG3_64BIT_REG_LOW)) {
371                 pci_write_config_dword(tp->pdev, TG3PCI_STD_RING_PROD_IDX +
372                                        TG3_64BIT_REG_LOW, val);
373                 return;
374         }
375
376         spin_lock_irqsave(&tp->indirect_lock, flags);
377         pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off + 0x5600);
378         pci_write_config_dword(tp->pdev, TG3PCI_REG_DATA, val);
379         spin_unlock_irqrestore(&tp->indirect_lock, flags);
380
381         /* In indirect mode when disabling interrupts, we also need
382          * to clear the interrupt bit in the GRC local ctrl register.
383          */
384         if ((off == (MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW)) &&
385             (val == 0x1)) {
386                 pci_write_config_dword(tp->pdev, TG3PCI_MISC_LOCAL_CTRL,
387                                        tp->grc_local_ctrl|GRC_LCLCTRL_CLEARINT);
388         }
389 }
390
391 static u32 tg3_read_indirect_mbox(struct tg3 *tp, u32 off)
392 {
393         unsigned long flags;
394         u32 val;
395
396         spin_lock_irqsave(&tp->indirect_lock, flags);
397         pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off + 0x5600);
398         pci_read_config_dword(tp->pdev, TG3PCI_REG_DATA, &val);
399         spin_unlock_irqrestore(&tp->indirect_lock, flags);
400         return val;
401 }
402
403 /* usec_wait specifies the wait time in usec when writing to certain registers
404  * where it is unsafe to read back the register without some delay.
405  * GRC_LOCAL_CTRL is one example if the GPIOs are toggled to switch power.
406  * TG3PCI_CLOCK_CTRL is another example if the clock frequencies are changed.
407  */
408 static void _tw32_flush(struct tg3 *tp, u32 off, u32 val, u32 usec_wait)
409 {
410         if ((tp->tg3_flags & TG3_FLAG_PCIX_TARGET_HWBUG) ||
411             (tp->tg3_flags2 & TG3_FLG2_ICH_WORKAROUND))
412                 /* Non-posted methods */
413                 tp->write32(tp, off, val);
414         else {
415                 /* Posted method */
416                 tg3_write32(tp, off, val);
417                 if (usec_wait)
418                         udelay(usec_wait);
419                 tp->read32(tp, off);
420         }
421         /* Wait again after the read for the posted method to guarantee that
422          * the wait time is met.
423          */
424         if (usec_wait)
425                 udelay(usec_wait);
426 }
427
428 static inline void tw32_mailbox_flush(struct tg3 *tp, u32 off, u32 val)
429 {
430         tp->write32_mbox(tp, off, val);
431         if (!(tp->tg3_flags & TG3_FLAG_MBOX_WRITE_REORDER) &&
432             !(tp->tg3_flags2 & TG3_FLG2_ICH_WORKAROUND))
433                 tp->read32_mbox(tp, off);
434 }
435
436 static void tg3_write32_tx_mbox(struct tg3 *tp, u32 off, u32 val)
437 {
438         void __iomem *mbox = tp->regs + off;
439         writel(val, mbox);
440         if (tp->tg3_flags & TG3_FLAG_TXD_MBOX_HWBUG)
441                 writel(val, mbox);
442         if (tp->tg3_flags & TG3_FLAG_MBOX_WRITE_REORDER)
443                 readl(mbox);
444 }
445
446 static u32 tg3_read32_mbox_5906(struct tg3 *tp, u32 off)
447 {
448         return (readl(tp->regs + off + GRCMBOX_BASE));
449 }
450
451 static void tg3_write32_mbox_5906(struct tg3 *tp, u32 off, u32 val)
452 {
453         writel(val, tp->regs + off + GRCMBOX_BASE);
454 }
455
456 #define tw32_mailbox(reg, val)  tp->write32_mbox(tp, reg, val)
457 #define tw32_mailbox_f(reg, val)        tw32_mailbox_flush(tp, (reg), (val))
458 #define tw32_rx_mbox(reg, val)  tp->write32_rx_mbox(tp, reg, val)
459 #define tw32_tx_mbox(reg, val)  tp->write32_tx_mbox(tp, reg, val)
460 #define tr32_mailbox(reg)       tp->read32_mbox(tp, reg)
461
462 #define tw32(reg,val)           tp->write32(tp, reg, val)
463 #define tw32_f(reg,val)         _tw32_flush(tp,(reg),(val), 0)
464 #define tw32_wait_f(reg,val,us) _tw32_flush(tp,(reg),(val), (us))
465 #define tr32(reg)               tp->read32(tp, reg)
466
467 static void tg3_write_mem(struct tg3 *tp, u32 off, u32 val)
468 {
469         unsigned long flags;
470
471         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) &&
472             (off >= NIC_SRAM_STATS_BLK) && (off < NIC_SRAM_TX_BUFFER_DESC))
473                 return;
474
475         spin_lock_irqsave(&tp->indirect_lock, flags);
476         if (tp->tg3_flags & TG3_FLAG_SRAM_USE_CONFIG) {
477                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off);
478                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
479
480                 /* Always leave this as zero. */
481                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
482         } else {
483                 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, off);
484                 tw32_f(TG3PCI_MEM_WIN_DATA, val);
485
486                 /* Always leave this as zero. */
487                 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, 0);
488         }
489         spin_unlock_irqrestore(&tp->indirect_lock, flags);
490 }
491
492 static void tg3_read_mem(struct tg3 *tp, u32 off, u32 *val)
493 {
494         unsigned long flags;
495
496         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) &&
497             (off >= NIC_SRAM_STATS_BLK) && (off < NIC_SRAM_TX_BUFFER_DESC)) {
498                 *val = 0;
499                 return;
500         }
501
502         spin_lock_irqsave(&tp->indirect_lock, flags);
503         if (tp->tg3_flags & TG3_FLAG_SRAM_USE_CONFIG) {
504                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off);
505                 pci_read_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
506
507                 /* Always leave this as zero. */
508                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
509         } else {
510                 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, off);
511                 *val = tr32(TG3PCI_MEM_WIN_DATA);
512
513                 /* Always leave this as zero. */
514                 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, 0);
515         }
516         spin_unlock_irqrestore(&tp->indirect_lock, flags);
517 }
518
519 static void tg3_ape_lock_init(struct tg3 *tp)
520 {
521         int i;
522
523         /* Make sure the driver hasn't any stale locks. */
524         for (i = 0; i < 8; i++)
525                 tg3_ape_write32(tp, TG3_APE_LOCK_GRANT + 4 * i,
526                                 APE_LOCK_GRANT_DRIVER);
527 }
528
529 static int tg3_ape_lock(struct tg3 *tp, int locknum)
530 {
531         int i, off;
532         int ret = 0;
533         u32 status;
534
535         if (!(tp->tg3_flags3 & TG3_FLG3_ENABLE_APE))
536                 return 0;
537
538         switch (locknum) {
539                 case TG3_APE_LOCK_GRC:
540                 case TG3_APE_LOCK_MEM:
541                         break;
542                 default:
543                         return -EINVAL;
544         }
545
546         off = 4 * locknum;
547
548         tg3_ape_write32(tp, TG3_APE_LOCK_REQ + off, APE_LOCK_REQ_DRIVER);
549
550         /* Wait for up to 1 millisecond to acquire lock. */
551         for (i = 0; i < 100; i++) {
552                 status = tg3_ape_read32(tp, TG3_APE_LOCK_GRANT + off);
553                 if (status == APE_LOCK_GRANT_DRIVER)
554                         break;
555                 udelay(10);
556         }
557
558         if (status != APE_LOCK_GRANT_DRIVER) {
559                 /* Revoke the lock request. */
560                 tg3_ape_write32(tp, TG3_APE_LOCK_GRANT + off,
561                                 APE_LOCK_GRANT_DRIVER);
562
563                 ret = -EBUSY;
564         }
565
566         return ret;
567 }
568
569 static void tg3_ape_unlock(struct tg3 *tp, int locknum)
570 {
571         int off;
572
573         if (!(tp->tg3_flags3 & TG3_FLG3_ENABLE_APE))
574                 return;
575
576         switch (locknum) {
577                 case TG3_APE_LOCK_GRC:
578                 case TG3_APE_LOCK_MEM:
579                         break;
580                 default:
581                         return;
582         }
583
584         off = 4 * locknum;
585         tg3_ape_write32(tp, TG3_APE_LOCK_GRANT + off, APE_LOCK_GRANT_DRIVER);
586 }
587
588 static void tg3_disable_ints(struct tg3 *tp)
589 {
590         tw32(TG3PCI_MISC_HOST_CTRL,
591              (tp->misc_host_ctrl | MISC_HOST_CTRL_MASK_PCI_INT));
592         tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
593 }
594
595 static inline void tg3_cond_int(struct tg3 *tp)
596 {
597         if (!(tp->tg3_flags & TG3_FLAG_TAGGED_STATUS) &&
598             (tp->hw_status->status & SD_STATUS_UPDATED))
599                 tw32(GRC_LOCAL_CTRL, tp->grc_local_ctrl | GRC_LCLCTRL_SETINT);
600         else
601                 tw32(HOSTCC_MODE, tp->coalesce_mode |
602                      (HOSTCC_MODE_ENABLE | HOSTCC_MODE_NOW));
603 }
604
605 static void tg3_enable_ints(struct tg3 *tp)
606 {
607         tp->irq_sync = 0;
608         wmb();
609
610         tw32(TG3PCI_MISC_HOST_CTRL,
611              (tp->misc_host_ctrl & ~MISC_HOST_CTRL_MASK_PCI_INT));
612         tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
613                        (tp->last_tag << 24));
614         if (tp->tg3_flags2 & TG3_FLG2_1SHOT_MSI)
615                 tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
616                                (tp->last_tag << 24));
617         tg3_cond_int(tp);
618 }
619
620 static inline unsigned int tg3_has_work(struct tg3 *tp)
621 {
622         struct tg3_hw_status *sblk = tp->hw_status;
623         unsigned int work_exists = 0;
624
625         /* check for phy events */
626         if (!(tp->tg3_flags &
627               (TG3_FLAG_USE_LINKCHG_REG |
628                TG3_FLAG_POLL_SERDES))) {
629                 if (sblk->status & SD_STATUS_LINK_CHG)
630                         work_exists = 1;
631         }
632         /* check for RX/TX work to do */
633         if (sblk->idx[0].tx_consumer != tp->tx_cons ||
634             sblk->idx[0].rx_producer != tp->rx_rcb_ptr)
635                 work_exists = 1;
636
637         return work_exists;
638 }
639
640 /* tg3_restart_ints
641  *  similar to tg3_enable_ints, but it accurately determines whether there
642  *  is new work pending and can return without flushing the PIO write
643  *  which reenables interrupts
644  */
645 static void tg3_restart_ints(struct tg3 *tp)
646 {
647         tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
648                      tp->last_tag << 24);
649         mmiowb();
650
651         /* When doing tagged status, this work check is unnecessary.
652          * The last_tag we write above tells the chip which piece of
653          * work we've completed.
654          */
655         if (!(tp->tg3_flags & TG3_FLAG_TAGGED_STATUS) &&
656             tg3_has_work(tp))
657                 tw32(HOSTCC_MODE, tp->coalesce_mode |
658                      (HOSTCC_MODE_ENABLE | HOSTCC_MODE_NOW));
659 }
660
661 static inline void tg3_netif_stop(struct tg3 *tp)
662 {
663         tp->dev->trans_start = jiffies; /* prevent tx timeout */
664         napi_disable(&tp->napi);
665         netif_tx_disable(tp->dev);
666 }
667
668 static inline void tg3_netif_start(struct tg3 *tp)
669 {
670         netif_wake_queue(tp->dev);
671         /* NOTE: unconditional netif_wake_queue is only appropriate
672          * so long as all callers are assured to have free tx slots
673          * (such as after tg3_init_hw)
674          */
675         napi_enable(&tp->napi);
676         tp->hw_status->status |= SD_STATUS_UPDATED;
677         tg3_enable_ints(tp);
678 }
679
680 static void tg3_switch_clocks(struct tg3 *tp)
681 {
682         u32 clock_ctrl = tr32(TG3PCI_CLOCK_CTRL);
683         u32 orig_clock_ctrl;
684
685         if ((tp->tg3_flags & TG3_FLAG_CPMU_PRESENT) ||
686             (tp->tg3_flags2 & TG3_FLG2_5780_CLASS))
687                 return;
688
689         orig_clock_ctrl = clock_ctrl;
690         clock_ctrl &= (CLOCK_CTRL_FORCE_CLKRUN |
691                        CLOCK_CTRL_CLKRUN_OENABLE |
692                        0x1f);
693         tp->pci_clock_ctrl = clock_ctrl;
694
695         if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
696                 if (orig_clock_ctrl & CLOCK_CTRL_625_CORE) {
697                         tw32_wait_f(TG3PCI_CLOCK_CTRL,
698                                     clock_ctrl | CLOCK_CTRL_625_CORE, 40);
699                 }
700         } else if ((orig_clock_ctrl & CLOCK_CTRL_44MHZ_CORE) != 0) {
701                 tw32_wait_f(TG3PCI_CLOCK_CTRL,
702                             clock_ctrl |
703                             (CLOCK_CTRL_44MHZ_CORE | CLOCK_CTRL_ALTCLK),
704                             40);
705                 tw32_wait_f(TG3PCI_CLOCK_CTRL,
706                             clock_ctrl | (CLOCK_CTRL_ALTCLK),
707                             40);
708         }
709         tw32_wait_f(TG3PCI_CLOCK_CTRL, clock_ctrl, 40);
710 }
711
712 #define PHY_BUSY_LOOPS  5000
713
714 static int tg3_readphy(struct tg3 *tp, int reg, u32 *val)
715 {
716         u32 frame_val;
717         unsigned int loops;
718         int ret;
719
720         if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
721                 tw32_f(MAC_MI_MODE,
722                      (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
723                 udelay(80);
724         }
725
726         *val = 0x0;
727
728         frame_val  = ((PHY_ADDR << MI_COM_PHY_ADDR_SHIFT) &
729                       MI_COM_PHY_ADDR_MASK);
730         frame_val |= ((reg << MI_COM_REG_ADDR_SHIFT) &
731                       MI_COM_REG_ADDR_MASK);
732         frame_val |= (MI_COM_CMD_READ | MI_COM_START);
733
734         tw32_f(MAC_MI_COM, frame_val);
735
736         loops = PHY_BUSY_LOOPS;
737         while (loops != 0) {
738                 udelay(10);
739                 frame_val = tr32(MAC_MI_COM);
740
741                 if ((frame_val & MI_COM_BUSY) == 0) {
742                         udelay(5);
743                         frame_val = tr32(MAC_MI_COM);
744                         break;
745                 }
746                 loops -= 1;
747         }
748
749         ret = -EBUSY;
750         if (loops != 0) {
751                 *val = frame_val & MI_COM_DATA_MASK;
752                 ret = 0;
753         }
754
755         if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
756                 tw32_f(MAC_MI_MODE, tp->mi_mode);
757                 udelay(80);
758         }
759
760         return ret;
761 }
762
763 static int tg3_writephy(struct tg3 *tp, int reg, u32 val)
764 {
765         u32 frame_val;
766         unsigned int loops;
767         int ret;
768
769         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906 &&
770             (reg == MII_TG3_CTRL || reg == MII_TG3_AUX_CTRL))
771                 return 0;
772
773         if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
774                 tw32_f(MAC_MI_MODE,
775                      (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
776                 udelay(80);
777         }
778
779         frame_val  = ((PHY_ADDR << MI_COM_PHY_ADDR_SHIFT) &
780                       MI_COM_PHY_ADDR_MASK);
781         frame_val |= ((reg << MI_COM_REG_ADDR_SHIFT) &
782                       MI_COM_REG_ADDR_MASK);
783         frame_val |= (val & MI_COM_DATA_MASK);
784         frame_val |= (MI_COM_CMD_WRITE | MI_COM_START);
785
786         tw32_f(MAC_MI_COM, frame_val);
787
788         loops = PHY_BUSY_LOOPS;
789         while (loops != 0) {
790                 udelay(10);
791                 frame_val = tr32(MAC_MI_COM);
792                 if ((frame_val & MI_COM_BUSY) == 0) {
793                         udelay(5);
794                         frame_val = tr32(MAC_MI_COM);
795                         break;
796                 }
797                 loops -= 1;
798         }
799
800         ret = -EBUSY;
801         if (loops != 0)
802                 ret = 0;
803
804         if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
805                 tw32_f(MAC_MI_MODE, tp->mi_mode);
806                 udelay(80);
807         }
808
809         return ret;
810 }
811
812 static int tg3_bmcr_reset(struct tg3 *tp)
813 {
814         u32 phy_control;
815         int limit, err;
816
817         /* OK, reset it, and poll the BMCR_RESET bit until it
818          * clears or we time out.
819          */
820         phy_control = BMCR_RESET;
821         err = tg3_writephy(tp, MII_BMCR, phy_control);
822         if (err != 0)
823                 return -EBUSY;
824
825         limit = 5000;
826         while (limit--) {
827                 err = tg3_readphy(tp, MII_BMCR, &phy_control);
828                 if (err != 0)
829                         return -EBUSY;
830
831                 if ((phy_control & BMCR_RESET) == 0) {
832                         udelay(40);
833                         break;
834                 }
835                 udelay(10);
836         }
837         if (limit <= 0)
838                 return -EBUSY;
839
840         return 0;
841 }
842
843 static int tg3_mdio_read(struct mii_bus *bp, int mii_id, int reg)
844 {
845         struct tg3 *tp = (struct tg3 *)bp->priv;
846         u32 val;
847
848         if (tp->tg3_flags3 & TG3_FLG3_MDIOBUS_PAUSED)
849                 return -EAGAIN;
850
851         if (tg3_readphy(tp, reg, &val))
852                 return -EIO;
853
854         return val;
855 }
856
857 static int tg3_mdio_write(struct mii_bus *bp, int mii_id, int reg, u16 val)
858 {
859         struct tg3 *tp = (struct tg3 *)bp->priv;
860
861         if (tp->tg3_flags3 & TG3_FLG3_MDIOBUS_PAUSED)
862                 return -EAGAIN;
863
864         if (tg3_writephy(tp, reg, val))
865                 return -EIO;
866
867         return 0;
868 }
869
870 static int tg3_mdio_reset(struct mii_bus *bp)
871 {
872         return 0;
873 }
874
875 static void tg3_mdio_config(struct tg3 *tp)
876 {
877         u32 val;
878
879         if (tp->mdio_bus.phy_map[PHY_ADDR]->interface !=
880             PHY_INTERFACE_MODE_RGMII)
881                 return;
882
883         val = tr32(MAC_PHYCFG1) & ~(MAC_PHYCFG1_RGMII_EXT_RX_DEC |
884                                     MAC_PHYCFG1_RGMII_SND_STAT_EN);
885         if (tp->tg3_flags3 & TG3_FLG3_RGMII_STD_IBND_DISABLE) {
886                 if (tp->tg3_flags3 & TG3_FLG3_RGMII_EXT_IBND_RX_EN)
887                         val |= MAC_PHYCFG1_RGMII_EXT_RX_DEC;
888                 if (tp->tg3_flags3 & TG3_FLG3_RGMII_EXT_IBND_TX_EN)
889                         val |= MAC_PHYCFG1_RGMII_SND_STAT_EN;
890         }
891         tw32(MAC_PHYCFG1, val | MAC_PHYCFG1_RGMII_INT | MAC_PHYCFG1_TXC_DRV);
892
893         val = tr32(MAC_PHYCFG2) & ~(MAC_PHYCFG2_INBAND_ENABLE);
894         if (!(tp->tg3_flags3 & TG3_FLG3_RGMII_STD_IBND_DISABLE))
895                 val |= MAC_PHYCFG2_INBAND_ENABLE;
896         tw32(MAC_PHYCFG2, val);
897
898         val = tr32(MAC_EXT_RGMII_MODE);
899         val &= ~(MAC_RGMII_MODE_RX_INT_B |
900                  MAC_RGMII_MODE_RX_QUALITY |
901                  MAC_RGMII_MODE_RX_ACTIVITY |
902                  MAC_RGMII_MODE_RX_ENG_DET |
903                  MAC_RGMII_MODE_TX_ENABLE |
904                  MAC_RGMII_MODE_TX_LOWPWR |
905                  MAC_RGMII_MODE_TX_RESET);
906         if (tp->tg3_flags3 & TG3_FLG3_RGMII_STD_IBND_DISABLE) {
907                 if (tp->tg3_flags3 & TG3_FLG3_RGMII_EXT_IBND_RX_EN)
908                         val |= MAC_RGMII_MODE_RX_INT_B |
909                                MAC_RGMII_MODE_RX_QUALITY |
910                                MAC_RGMII_MODE_RX_ACTIVITY |
911                                MAC_RGMII_MODE_RX_ENG_DET;
912                 if (tp->tg3_flags3 & TG3_FLG3_RGMII_EXT_IBND_TX_EN)
913                         val |= MAC_RGMII_MODE_TX_ENABLE |
914                                MAC_RGMII_MODE_TX_LOWPWR |
915                                MAC_RGMII_MODE_TX_RESET;
916         }
917         tw32(MAC_EXT_RGMII_MODE, val);
918 }
919
920 static void tg3_mdio_start(struct tg3 *tp)
921 {
922         if (tp->tg3_flags3 & TG3_FLG3_MDIOBUS_INITED) {
923                 mutex_lock(&tp->mdio_bus.mdio_lock);
924                 tp->tg3_flags3 &= ~TG3_FLG3_MDIOBUS_PAUSED;
925                 mutex_unlock(&tp->mdio_bus.mdio_lock);
926         }
927
928         tp->mi_mode &= ~MAC_MI_MODE_AUTO_POLL;
929         tw32_f(MAC_MI_MODE, tp->mi_mode);
930         udelay(80);
931
932         if (tp->tg3_flags3 & TG3_FLG3_MDIOBUS_INITED)
933                 tg3_mdio_config(tp);
934 }
935
936 static void tg3_mdio_stop(struct tg3 *tp)
937 {
938         if (tp->tg3_flags3 & TG3_FLG3_MDIOBUS_INITED) {
939                 mutex_lock(&tp->mdio_bus.mdio_lock);
940                 tp->tg3_flags3 |= TG3_FLG3_MDIOBUS_PAUSED;
941                 mutex_unlock(&tp->mdio_bus.mdio_lock);
942         }
943 }
944
945 static int tg3_mdio_init(struct tg3 *tp)
946 {
947         int i;
948         u32 reg;
949         struct phy_device *phydev;
950         struct mii_bus *mdio_bus = &tp->mdio_bus;
951
952         tg3_mdio_start(tp);
953
954         if (!(tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB) ||
955             (tp->tg3_flags3 & TG3_FLG3_MDIOBUS_INITED))
956                 return 0;
957
958         memset(mdio_bus, 0, sizeof(*mdio_bus));
959
960         mdio_bus->name     = "tg3 mdio bus";
961         snprintf(mdio_bus->id, MII_BUS_ID_SIZE, "%x",
962                  (tp->pdev->bus->number << 8) | tp->pdev->devfn);
963         mdio_bus->priv     = tp;
964         mdio_bus->dev      = &tp->pdev->dev;
965         mdio_bus->read     = &tg3_mdio_read;
966         mdio_bus->write    = &tg3_mdio_write;
967         mdio_bus->reset    = &tg3_mdio_reset;
968         mdio_bus->phy_mask = ~(1 << PHY_ADDR);
969         mdio_bus->irq      = &tp->mdio_irq[0];
970
971         for (i = 0; i < PHY_MAX_ADDR; i++)
972                 mdio_bus->irq[i] = PHY_POLL;
973
974         /* The bus registration will look for all the PHYs on the mdio bus.
975          * Unfortunately, it does not ensure the PHY is powered up before
976          * accessing the PHY ID registers.  A chip reset is the
977          * quickest way to bring the device back to an operational state..
978          */
979         if (tg3_readphy(tp, MII_BMCR, &reg) || (reg & BMCR_PDOWN))
980                 tg3_bmcr_reset(tp);
981
982         i = mdiobus_register(mdio_bus);
983         if (i) {
984                 printk(KERN_WARNING "%s: mdiobus_reg failed (0x%x)\n",
985                         tp->dev->name, i);
986                 return i;
987         }
988
989         tp->tg3_flags3 |= TG3_FLG3_MDIOBUS_INITED;
990
991         phydev = tp->mdio_bus.phy_map[PHY_ADDR];
992
993         switch (phydev->phy_id) {
994         case TG3_PHY_ID_BCM50610:
995                 phydev->interface = PHY_INTERFACE_MODE_RGMII;
996                 if (tp->tg3_flags3 & TG3_FLG3_RGMII_STD_IBND_DISABLE)
997                         phydev->dev_flags |= PHY_BRCM_STD_IBND_DISABLE;
998                 if (tp->tg3_flags3 & TG3_FLG3_RGMII_EXT_IBND_RX_EN)
999                         phydev->dev_flags |= PHY_BRCM_EXT_IBND_RX_ENABLE;
1000                 if (tp->tg3_flags3 & TG3_FLG3_RGMII_EXT_IBND_TX_EN)
1001                         phydev->dev_flags |= PHY_BRCM_EXT_IBND_TX_ENABLE;
1002                 break;
1003         case TG3_PHY_ID_BCMAC131:
1004                 phydev->interface = PHY_INTERFACE_MODE_MII;
1005                 break;
1006         }
1007
1008         tg3_mdio_config(tp);
1009
1010         return 0;
1011 }
1012
1013 static void tg3_mdio_fini(struct tg3 *tp)
1014 {
1015         if (tp->tg3_flags3 & TG3_FLG3_MDIOBUS_INITED) {
1016                 tp->tg3_flags3 &= ~TG3_FLG3_MDIOBUS_INITED;
1017                 mdiobus_unregister(&tp->mdio_bus);
1018                 tp->tg3_flags3 &= ~TG3_FLG3_MDIOBUS_PAUSED;
1019         }
1020 }
1021
1022 /* tp->lock is held. */
1023 static inline void tg3_generate_fw_event(struct tg3 *tp)
1024 {
1025         u32 val;
1026
1027         val = tr32(GRC_RX_CPU_EVENT);
1028         val |= GRC_RX_CPU_DRIVER_EVENT;
1029         tw32_f(GRC_RX_CPU_EVENT, val);
1030
1031         tp->last_event_jiffies = jiffies;
1032 }
1033
1034 #define TG3_FW_EVENT_TIMEOUT_USEC 2500
1035
1036 /* tp->lock is held. */
1037 static void tg3_wait_for_event_ack(struct tg3 *tp)
1038 {
1039         int i;
1040         unsigned int delay_cnt;
1041         long time_remain;
1042
1043         /* If enough time has passed, no wait is necessary. */
1044         time_remain = (long)(tp->last_event_jiffies + 1 +
1045                       usecs_to_jiffies(TG3_FW_EVENT_TIMEOUT_USEC)) -
1046                       (long)jiffies;
1047         if (time_remain < 0)
1048                 return;
1049
1050         /* Check if we can shorten the wait time. */
1051         delay_cnt = jiffies_to_usecs(time_remain);
1052         if (delay_cnt > TG3_FW_EVENT_TIMEOUT_USEC)
1053                 delay_cnt = TG3_FW_EVENT_TIMEOUT_USEC;
1054         delay_cnt = (delay_cnt >> 3) + 1;
1055
1056         for (i = 0; i < delay_cnt; i++) {
1057                 if (!(tr32(GRC_RX_CPU_EVENT) & GRC_RX_CPU_DRIVER_EVENT))
1058                         break;
1059                 udelay(8);
1060         }
1061 }
1062
1063 /* tp->lock is held. */
1064 static void tg3_ump_link_report(struct tg3 *tp)
1065 {
1066         u32 reg;
1067         u32 val;
1068
1069         if (!(tp->tg3_flags2 & TG3_FLG2_5780_CLASS) ||
1070             !(tp->tg3_flags  & TG3_FLAG_ENABLE_ASF))
1071                 return;
1072
1073         tg3_wait_for_event_ack(tp);
1074
1075         tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX, FWCMD_NICDRV_LINK_UPDATE);
1076
1077         tg3_write_mem(tp, NIC_SRAM_FW_CMD_LEN_MBOX, 14);
1078
1079         val = 0;
1080         if (!tg3_readphy(tp, MII_BMCR, &reg))
1081                 val = reg << 16;
1082         if (!tg3_readphy(tp, MII_BMSR, &reg))
1083                 val |= (reg & 0xffff);
1084         tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX, val);
1085
1086         val = 0;
1087         if (!tg3_readphy(tp, MII_ADVERTISE, &reg))
1088                 val = reg << 16;
1089         if (!tg3_readphy(tp, MII_LPA, &reg))
1090                 val |= (reg & 0xffff);
1091         tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 4, val);
1092
1093         val = 0;
1094         if (!(tp->tg3_flags2 & TG3_FLG2_MII_SERDES)) {
1095                 if (!tg3_readphy(tp, MII_CTRL1000, &reg))
1096                         val = reg << 16;
1097                 if (!tg3_readphy(tp, MII_STAT1000, &reg))
1098                         val |= (reg & 0xffff);
1099         }
1100         tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 8, val);
1101
1102         if (!tg3_readphy(tp, MII_PHYADDR, &reg))
1103                 val = reg << 16;
1104         else
1105                 val = 0;
1106         tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 12, val);
1107
1108         tg3_generate_fw_event(tp);
1109 }
1110
1111 static void tg3_link_report(struct tg3 *tp)
1112 {
1113         if (!netif_carrier_ok(tp->dev)) {
1114                 if (netif_msg_link(tp))
1115                         printk(KERN_INFO PFX "%s: Link is down.\n",
1116                                tp->dev->name);
1117                 tg3_ump_link_report(tp);
1118         } else if (netif_msg_link(tp)) {
1119                 printk(KERN_INFO PFX "%s: Link is up at %d Mbps, %s duplex.\n",
1120                        tp->dev->name,
1121                        (tp->link_config.active_speed == SPEED_1000 ?
1122                         1000 :
1123                         (tp->link_config.active_speed == SPEED_100 ?
1124                          100 : 10)),
1125                        (tp->link_config.active_duplex == DUPLEX_FULL ?
1126                         "full" : "half"));
1127
1128                 printk(KERN_INFO PFX
1129                        "%s: Flow control is %s for TX and %s for RX.\n",
1130                        tp->dev->name,
1131                        (tp->link_config.active_flowctrl & TG3_FLOW_CTRL_TX) ?
1132                        "on" : "off",
1133                        (tp->link_config.active_flowctrl & TG3_FLOW_CTRL_RX) ?
1134                        "on" : "off");
1135                 tg3_ump_link_report(tp);
1136         }
1137 }
1138
1139 static u16 tg3_advert_flowctrl_1000T(u8 flow_ctrl)
1140 {
1141         u16 miireg;
1142
1143         if ((flow_ctrl & TG3_FLOW_CTRL_TX) && (flow_ctrl & TG3_FLOW_CTRL_RX))
1144                 miireg = ADVERTISE_PAUSE_CAP;
1145         else if (flow_ctrl & TG3_FLOW_CTRL_TX)
1146                 miireg = ADVERTISE_PAUSE_ASYM;
1147         else if (flow_ctrl & TG3_FLOW_CTRL_RX)
1148                 miireg = ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM;
1149         else
1150                 miireg = 0;
1151
1152         return miireg;
1153 }
1154
1155 static u16 tg3_advert_flowctrl_1000X(u8 flow_ctrl)
1156 {
1157         u16 miireg;
1158
1159         if ((flow_ctrl & TG3_FLOW_CTRL_TX) && (flow_ctrl & TG3_FLOW_CTRL_RX))
1160                 miireg = ADVERTISE_1000XPAUSE;
1161         else if (flow_ctrl & TG3_FLOW_CTRL_TX)
1162                 miireg = ADVERTISE_1000XPSE_ASYM;
1163         else if (flow_ctrl & TG3_FLOW_CTRL_RX)
1164                 miireg = ADVERTISE_1000XPAUSE | ADVERTISE_1000XPSE_ASYM;
1165         else
1166                 miireg = 0;
1167
1168         return miireg;
1169 }
1170
1171 static u8 tg3_resolve_flowctrl_1000T(u16 lcladv, u16 rmtadv)
1172 {
1173         u8 cap = 0;
1174
1175         if (lcladv & ADVERTISE_PAUSE_CAP) {
1176                 if (lcladv & ADVERTISE_PAUSE_ASYM) {
1177                         if (rmtadv & LPA_PAUSE_CAP)
1178                                 cap = TG3_FLOW_CTRL_TX | TG3_FLOW_CTRL_RX;
1179                         else if (rmtadv & LPA_PAUSE_ASYM)
1180                                 cap = TG3_FLOW_CTRL_RX;
1181                 } else {
1182                         if (rmtadv & LPA_PAUSE_CAP)
1183                                 cap = TG3_FLOW_CTRL_TX | TG3_FLOW_CTRL_RX;
1184                 }
1185         } else if (lcladv & ADVERTISE_PAUSE_ASYM) {
1186                 if ((rmtadv & LPA_PAUSE_CAP) && (rmtadv & LPA_PAUSE_ASYM))
1187                         cap = TG3_FLOW_CTRL_TX;
1188         }
1189
1190         return cap;
1191 }
1192
1193 static u8 tg3_resolve_flowctrl_1000X(u16 lcladv, u16 rmtadv)
1194 {
1195         u8 cap = 0;
1196
1197         if (lcladv & ADVERTISE_1000XPAUSE) {
1198                 if (lcladv & ADVERTISE_1000XPSE_ASYM) {
1199                         if (rmtadv & LPA_1000XPAUSE)
1200                                 cap = TG3_FLOW_CTRL_TX | TG3_FLOW_CTRL_RX;
1201                         else if (rmtadv & LPA_1000XPAUSE_ASYM)
1202                                 cap = TG3_FLOW_CTRL_RX;
1203                 } else {
1204                         if (rmtadv & LPA_1000XPAUSE)
1205                                 cap = TG3_FLOW_CTRL_TX | TG3_FLOW_CTRL_RX;
1206                 }
1207         } else if (lcladv & ADVERTISE_1000XPSE_ASYM) {
1208                 if ((rmtadv & LPA_1000XPAUSE) && (rmtadv & LPA_1000XPAUSE_ASYM))
1209                         cap = TG3_FLOW_CTRL_TX;
1210         }
1211
1212         return cap;
1213 }
1214
1215 static void tg3_setup_flow_control(struct tg3 *tp, u32 lcladv, u32 rmtadv)
1216 {
1217         u8 autoneg;
1218         u8 flowctrl = 0;
1219         u32 old_rx_mode = tp->rx_mode;
1220         u32 old_tx_mode = tp->tx_mode;
1221
1222         if (tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB)
1223                 autoneg = tp->mdio_bus.phy_map[PHY_ADDR]->autoneg;
1224         else
1225                 autoneg = tp->link_config.autoneg;
1226
1227         if (autoneg == AUTONEG_ENABLE &&
1228             (tp->tg3_flags & TG3_FLAG_PAUSE_AUTONEG)) {
1229                 if (tp->tg3_flags2 & TG3_FLG2_ANY_SERDES)
1230                         flowctrl = tg3_resolve_flowctrl_1000X(lcladv, rmtadv);
1231                 else
1232                         flowctrl = tg3_resolve_flowctrl_1000T(lcladv, rmtadv);
1233         } else
1234                 flowctrl = tp->link_config.flowctrl;
1235
1236         tp->link_config.active_flowctrl = flowctrl;
1237
1238         if (flowctrl & TG3_FLOW_CTRL_RX)
1239                 tp->rx_mode |= RX_MODE_FLOW_CTRL_ENABLE;
1240         else
1241                 tp->rx_mode &= ~RX_MODE_FLOW_CTRL_ENABLE;
1242
1243         if (old_rx_mode != tp->rx_mode)
1244                 tw32_f(MAC_RX_MODE, tp->rx_mode);
1245
1246         if (flowctrl & TG3_FLOW_CTRL_TX)
1247                 tp->tx_mode |= TX_MODE_FLOW_CTRL_ENABLE;
1248         else
1249                 tp->tx_mode &= ~TX_MODE_FLOW_CTRL_ENABLE;
1250
1251         if (old_tx_mode != tp->tx_mode)
1252                 tw32_f(MAC_TX_MODE, tp->tx_mode);
1253 }
1254
1255 static void tg3_adjust_link(struct net_device *dev)
1256 {
1257         u8 oldflowctrl, linkmesg = 0;
1258         u32 mac_mode, lcl_adv, rmt_adv;
1259         struct tg3 *tp = netdev_priv(dev);
1260         struct phy_device *phydev = tp->mdio_bus.phy_map[PHY_ADDR];
1261
1262         spin_lock(&tp->lock);
1263
1264         mac_mode = tp->mac_mode & ~(MAC_MODE_PORT_MODE_MASK |
1265                                     MAC_MODE_HALF_DUPLEX);
1266
1267         oldflowctrl = tp->link_config.active_flowctrl;
1268
1269         if (phydev->link) {
1270                 lcl_adv = 0;
1271                 rmt_adv = 0;
1272
1273                 if (phydev->speed == SPEED_100 || phydev->speed == SPEED_10)
1274                         mac_mode |= MAC_MODE_PORT_MODE_MII;
1275                 else
1276                         mac_mode |= MAC_MODE_PORT_MODE_GMII;
1277
1278                 if (phydev->duplex == DUPLEX_HALF)
1279                         mac_mode |= MAC_MODE_HALF_DUPLEX;
1280                 else {
1281                         lcl_adv = tg3_advert_flowctrl_1000T(
1282                                   tp->link_config.flowctrl);
1283
1284                         if (phydev->pause)
1285                                 rmt_adv = LPA_PAUSE_CAP;
1286                         if (phydev->asym_pause)
1287                                 rmt_adv |= LPA_PAUSE_ASYM;
1288                 }
1289
1290                 tg3_setup_flow_control(tp, lcl_adv, rmt_adv);
1291         } else
1292                 mac_mode |= MAC_MODE_PORT_MODE_GMII;
1293
1294         if (mac_mode != tp->mac_mode) {
1295                 tp->mac_mode = mac_mode;
1296                 tw32_f(MAC_MODE, tp->mac_mode);
1297                 udelay(40);
1298         }
1299
1300         if (phydev->speed == SPEED_1000 && phydev->duplex == DUPLEX_HALF)
1301                 tw32(MAC_TX_LENGTHS,
1302                      ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
1303                       (6 << TX_LENGTHS_IPG_SHIFT) |
1304                       (0xff << TX_LENGTHS_SLOT_TIME_SHIFT)));
1305         else
1306                 tw32(MAC_TX_LENGTHS,
1307                      ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
1308                       (6 << TX_LENGTHS_IPG_SHIFT) |
1309                       (32 << TX_LENGTHS_SLOT_TIME_SHIFT)));
1310
1311         if ((phydev->link && tp->link_config.active_speed == SPEED_INVALID) ||
1312             (!phydev->link && tp->link_config.active_speed != SPEED_INVALID) ||
1313             phydev->speed != tp->link_config.active_speed ||
1314             phydev->duplex != tp->link_config.active_duplex ||
1315             oldflowctrl != tp->link_config.active_flowctrl)
1316             linkmesg = 1;
1317
1318         tp->link_config.active_speed = phydev->speed;
1319         tp->link_config.active_duplex = phydev->duplex;
1320
1321         spin_unlock(&tp->lock);
1322
1323         if (linkmesg)
1324                 tg3_link_report(tp);
1325 }
1326
1327 static int tg3_phy_init(struct tg3 *tp)
1328 {
1329         struct phy_device *phydev;
1330
1331         if (tp->tg3_flags3 & TG3_FLG3_PHY_CONNECTED)
1332                 return 0;
1333
1334         /* Bring the PHY back to a known state. */
1335         tg3_bmcr_reset(tp);
1336
1337         phydev = tp->mdio_bus.phy_map[PHY_ADDR];
1338
1339         /* Attach the MAC to the PHY. */
1340         phydev = phy_connect(tp->dev, phydev->dev.bus_id, tg3_adjust_link,
1341                              phydev->dev_flags, phydev->interface);
1342         if (IS_ERR(phydev)) {
1343                 printk(KERN_ERR "%s: Could not attach to PHY\n", tp->dev->name);
1344                 return PTR_ERR(phydev);
1345         }
1346
1347         tp->tg3_flags3 |= TG3_FLG3_PHY_CONNECTED;
1348
1349         /* Mask with MAC supported features. */
1350         phydev->supported &= (PHY_GBIT_FEATURES |
1351                               SUPPORTED_Pause |
1352                               SUPPORTED_Asym_Pause);
1353
1354         phydev->advertising = phydev->supported;
1355
1356         printk(KERN_INFO
1357                "%s: attached PHY driver [%s] (mii_bus:phy_addr=%s)\n",
1358                tp->dev->name, phydev->drv->name, phydev->dev.bus_id);
1359
1360         return 0;
1361 }
1362
1363 static void tg3_phy_start(struct tg3 *tp)
1364 {
1365         struct phy_device *phydev;
1366
1367         if (!(tp->tg3_flags3 & TG3_FLG3_PHY_CONNECTED))
1368                 return;
1369
1370         phydev = tp->mdio_bus.phy_map[PHY_ADDR];
1371
1372         if (tp->link_config.phy_is_low_power) {
1373                 tp->link_config.phy_is_low_power = 0;
1374                 phydev->speed = tp->link_config.orig_speed;
1375                 phydev->duplex = tp->link_config.orig_duplex;
1376                 phydev->autoneg = tp->link_config.orig_autoneg;
1377                 phydev->advertising = tp->link_config.orig_advertising;
1378         }
1379
1380         phy_start(phydev);
1381
1382         phy_start_aneg(phydev);
1383 }
1384
1385 static void tg3_phy_stop(struct tg3 *tp)
1386 {
1387         if (!(tp->tg3_flags3 & TG3_FLG3_PHY_CONNECTED))
1388                 return;
1389
1390         phy_stop(tp->mdio_bus.phy_map[PHY_ADDR]);
1391 }
1392
1393 static void tg3_phy_fini(struct tg3 *tp)
1394 {
1395         if (tp->tg3_flags3 & TG3_FLG3_PHY_CONNECTED) {
1396                 phy_disconnect(tp->mdio_bus.phy_map[PHY_ADDR]);
1397                 tp->tg3_flags3 &= ~TG3_FLG3_PHY_CONNECTED;
1398         }
1399 }
1400
1401 static void tg3_phydsp_write(struct tg3 *tp, u32 reg, u32 val)
1402 {
1403         tg3_writephy(tp, MII_TG3_DSP_ADDRESS, reg);
1404         tg3_writephy(tp, MII_TG3_DSP_RW_PORT, val);
1405 }
1406
1407 static void tg3_phy_toggle_automdix(struct tg3 *tp, int enable)
1408 {
1409         u32 phy;
1410
1411         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS) ||
1412             (tp->tg3_flags2 & TG3_FLG2_ANY_SERDES))
1413                 return;
1414
1415         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
1416                 u32 ephy;
1417
1418                 if (!tg3_readphy(tp, MII_TG3_EPHY_TEST, &ephy)) {
1419                         tg3_writephy(tp, MII_TG3_EPHY_TEST,
1420                                      ephy | MII_TG3_EPHY_SHADOW_EN);
1421                         if (!tg3_readphy(tp, MII_TG3_EPHYTST_MISCCTRL, &phy)) {
1422                                 if (enable)
1423                                         phy |= MII_TG3_EPHYTST_MISCCTRL_MDIX;
1424                                 else
1425                                         phy &= ~MII_TG3_EPHYTST_MISCCTRL_MDIX;
1426                                 tg3_writephy(tp, MII_TG3_EPHYTST_MISCCTRL, phy);
1427                         }
1428                         tg3_writephy(tp, MII_TG3_EPHY_TEST, ephy);
1429                 }
1430         } else {
1431                 phy = MII_TG3_AUXCTL_MISC_RDSEL_MISC |
1432                       MII_TG3_AUXCTL_SHDWSEL_MISC;
1433                 if (!tg3_writephy(tp, MII_TG3_AUX_CTRL, phy) &&
1434                     !tg3_readphy(tp, MII_TG3_AUX_CTRL, &phy)) {
1435                         if (enable)
1436                                 phy |= MII_TG3_AUXCTL_MISC_FORCE_AMDIX;
1437                         else
1438                                 phy &= ~MII_TG3_AUXCTL_MISC_FORCE_AMDIX;
1439                         phy |= MII_TG3_AUXCTL_MISC_WREN;
1440                         tg3_writephy(tp, MII_TG3_AUX_CTRL, phy);
1441                 }
1442         }
1443 }
1444
1445 static void tg3_phy_set_wirespeed(struct tg3 *tp)
1446 {
1447         u32 val;
1448
1449         if (tp->tg3_flags2 & TG3_FLG2_NO_ETH_WIRE_SPEED)
1450                 return;
1451
1452         if (!tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x7007) &&
1453             !tg3_readphy(tp, MII_TG3_AUX_CTRL, &val))
1454                 tg3_writephy(tp, MII_TG3_AUX_CTRL,
1455                              (val | (1 << 15) | (1 << 4)));
1456 }
1457
1458 static void tg3_phy_apply_otp(struct tg3 *tp)
1459 {
1460         u32 otp, phy;
1461
1462         if (!tp->phy_otp)
1463                 return;
1464
1465         otp = tp->phy_otp;
1466
1467         /* Enable SM_DSP clock and tx 6dB coding. */
1468         phy = MII_TG3_AUXCTL_SHDWSEL_AUXCTL |
1469               MII_TG3_AUXCTL_ACTL_SMDSP_ENA |
1470               MII_TG3_AUXCTL_ACTL_TX_6DB;
1471         tg3_writephy(tp, MII_TG3_AUX_CTRL, phy);
1472
1473         phy = ((otp & TG3_OTP_AGCTGT_MASK) >> TG3_OTP_AGCTGT_SHIFT);
1474         phy |= MII_TG3_DSP_TAP1_AGCTGT_DFLT;
1475         tg3_phydsp_write(tp, MII_TG3_DSP_TAP1, phy);
1476
1477         phy = ((otp & TG3_OTP_HPFFLTR_MASK) >> TG3_OTP_HPFFLTR_SHIFT) |
1478               ((otp & TG3_OTP_HPFOVER_MASK) >> TG3_OTP_HPFOVER_SHIFT);
1479         tg3_phydsp_write(tp, MII_TG3_DSP_AADJ1CH0, phy);
1480
1481         phy = ((otp & TG3_OTP_LPFDIS_MASK) >> TG3_OTP_LPFDIS_SHIFT);
1482         phy |= MII_TG3_DSP_AADJ1CH3_ADCCKADJ;
1483         tg3_phydsp_write(tp, MII_TG3_DSP_AADJ1CH3, phy);
1484
1485         phy = ((otp & TG3_OTP_VDAC_MASK) >> TG3_OTP_VDAC_SHIFT);
1486         tg3_phydsp_write(tp, MII_TG3_DSP_EXP75, phy);
1487
1488         phy = ((otp & TG3_OTP_10BTAMP_MASK) >> TG3_OTP_10BTAMP_SHIFT);
1489         tg3_phydsp_write(tp, MII_TG3_DSP_EXP96, phy);
1490
1491         phy = ((otp & TG3_OTP_ROFF_MASK) >> TG3_OTP_ROFF_SHIFT) |
1492               ((otp & TG3_OTP_RCOFF_MASK) >> TG3_OTP_RCOFF_SHIFT);
1493         tg3_phydsp_write(tp, MII_TG3_DSP_EXP97, phy);
1494
1495         /* Turn off SM_DSP clock. */
1496         phy = MII_TG3_AUXCTL_SHDWSEL_AUXCTL |
1497               MII_TG3_AUXCTL_ACTL_TX_6DB;
1498         tg3_writephy(tp, MII_TG3_AUX_CTRL, phy);
1499 }
1500
1501 static int tg3_wait_macro_done(struct tg3 *tp)
1502 {
1503         int limit = 100;
1504
1505         while (limit--) {
1506                 u32 tmp32;
1507
1508                 if (!tg3_readphy(tp, 0x16, &tmp32)) {
1509                         if ((tmp32 & 0x1000) == 0)
1510                                 break;
1511                 }
1512         }
1513         if (limit <= 0)
1514                 return -EBUSY;
1515
1516         return 0;
1517 }
1518
1519 static int tg3_phy_write_and_check_testpat(struct tg3 *tp, int *resetp)
1520 {
1521         static const u32 test_pat[4][6] = {
1522         { 0x00005555, 0x00000005, 0x00002aaa, 0x0000000a, 0x00003456, 0x00000003 },
1523         { 0x00002aaa, 0x0000000a, 0x00003333, 0x00000003, 0x0000789a, 0x00000005 },
1524         { 0x00005a5a, 0x00000005, 0x00002a6a, 0x0000000a, 0x00001bcd, 0x00000003 },
1525         { 0x00002a5a, 0x0000000a, 0x000033c3, 0x00000003, 0x00002ef1, 0x00000005 }
1526         };
1527         int chan;
1528
1529         for (chan = 0; chan < 4; chan++) {
1530                 int i;
1531
1532                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
1533                              (chan * 0x2000) | 0x0200);
1534                 tg3_writephy(tp, 0x16, 0x0002);
1535
1536                 for (i = 0; i < 6; i++)
1537                         tg3_writephy(tp, MII_TG3_DSP_RW_PORT,
1538                                      test_pat[chan][i]);
1539
1540                 tg3_writephy(tp, 0x16, 0x0202);
1541                 if (tg3_wait_macro_done(tp)) {
1542                         *resetp = 1;
1543                         return -EBUSY;
1544                 }
1545
1546                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
1547                              (chan * 0x2000) | 0x0200);
1548                 tg3_writephy(tp, 0x16, 0x0082);
1549                 if (tg3_wait_macro_done(tp)) {
1550                         *resetp = 1;
1551                         return -EBUSY;
1552                 }
1553
1554                 tg3_writephy(tp, 0x16, 0x0802);
1555                 if (tg3_wait_macro_done(tp)) {
1556                         *resetp = 1;
1557                         return -EBUSY;
1558                 }
1559
1560                 for (i = 0; i < 6; i += 2) {
1561                         u32 low, high;
1562
1563                         if (tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &low) ||
1564                             tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &high) ||
1565                             tg3_wait_macro_done(tp)) {
1566                                 *resetp = 1;
1567                                 return -EBUSY;
1568                         }
1569                         low &= 0x7fff;
1570                         high &= 0x000f;
1571                         if (low != test_pat[chan][i] ||
1572                             high != test_pat[chan][i+1]) {
1573                                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000b);
1574                                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x4001);
1575                                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x4005);
1576
1577                                 return -EBUSY;
1578                         }
1579                 }
1580         }
1581
1582         return 0;
1583 }
1584
1585 static int tg3_phy_reset_chanpat(struct tg3 *tp)
1586 {
1587         int chan;
1588
1589         for (chan = 0; chan < 4; chan++) {
1590                 int i;
1591
1592                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
1593                              (chan * 0x2000) | 0x0200);
1594                 tg3_writephy(tp, 0x16, 0x0002);
1595                 for (i = 0; i < 6; i++)
1596                         tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x000);
1597                 tg3_writephy(tp, 0x16, 0x0202);
1598                 if (tg3_wait_macro_done(tp))
1599                         return -EBUSY;
1600         }
1601
1602         return 0;
1603 }
1604
1605 static int tg3_phy_reset_5703_4_5(struct tg3 *tp)
1606 {
1607         u32 reg32, phy9_orig;
1608         int retries, do_phy_reset, err;
1609
1610         retries = 10;
1611         do_phy_reset = 1;
1612         do {
1613                 if (do_phy_reset) {
1614                         err = tg3_bmcr_reset(tp);
1615                         if (err)
1616                                 return err;
1617                         do_phy_reset = 0;
1618                 }
1619
1620                 /* Disable transmitter and interrupt.  */
1621                 if (tg3_readphy(tp, MII_TG3_EXT_CTRL, &reg32))
1622                         continue;
1623
1624                 reg32 |= 0x3000;
1625                 tg3_writephy(tp, MII_TG3_EXT_CTRL, reg32);
1626
1627                 /* Set full-duplex, 1000 mbps.  */
1628                 tg3_writephy(tp, MII_BMCR,
1629                              BMCR_FULLDPLX | TG3_BMCR_SPEED1000);
1630
1631                 /* Set to master mode.  */
1632                 if (tg3_readphy(tp, MII_TG3_CTRL, &phy9_orig))
1633                         continue;
1634
1635                 tg3_writephy(tp, MII_TG3_CTRL,
1636                              (MII_TG3_CTRL_AS_MASTER |
1637                               MII_TG3_CTRL_ENABLE_AS_MASTER));
1638
1639                 /* Enable SM_DSP_CLOCK and 6dB.  */
1640                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0c00);
1641
1642                 /* Block the PHY control access.  */
1643                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8005);
1644                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0800);
1645
1646                 err = tg3_phy_write_and_check_testpat(tp, &do_phy_reset);
1647                 if (!err)
1648                         break;
1649         } while (--retries);
1650
1651         err = tg3_phy_reset_chanpat(tp);
1652         if (err)
1653                 return err;
1654
1655         tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8005);
1656         tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0000);
1657
1658         tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8200);
1659         tg3_writephy(tp, 0x16, 0x0000);
1660
1661         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
1662             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
1663                 /* Set Extended packet length bit for jumbo frames */
1664                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x4400);
1665         }
1666         else {
1667                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0400);
1668         }
1669
1670         tg3_writephy(tp, MII_TG3_CTRL, phy9_orig);
1671
1672         if (!tg3_readphy(tp, MII_TG3_EXT_CTRL, &reg32)) {
1673                 reg32 &= ~0x3000;
1674                 tg3_writephy(tp, MII_TG3_EXT_CTRL, reg32);
1675         } else if (!err)
1676                 err = -EBUSY;
1677
1678         return err;
1679 }
1680
1681 /* This will reset the tigon3 PHY if there is no valid
1682  * link unless the FORCE argument is non-zero.
1683  */
1684 static int tg3_phy_reset(struct tg3 *tp)
1685 {
1686         u32 cpmuctrl;
1687         u32 phy_status;
1688         int err;
1689
1690         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
1691                 u32 val;
1692
1693                 val = tr32(GRC_MISC_CFG);
1694                 tw32_f(GRC_MISC_CFG, val & ~GRC_MISC_CFG_EPHY_IDDQ);
1695                 udelay(40);
1696         }
1697         err  = tg3_readphy(tp, MII_BMSR, &phy_status);
1698         err |= tg3_readphy(tp, MII_BMSR, &phy_status);
1699         if (err != 0)
1700                 return -EBUSY;
1701
1702         if (netif_running(tp->dev) && netif_carrier_ok(tp->dev)) {
1703                 netif_carrier_off(tp->dev);
1704                 tg3_link_report(tp);
1705         }
1706
1707         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
1708             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
1709             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
1710                 err = tg3_phy_reset_5703_4_5(tp);
1711                 if (err)
1712                         return err;
1713                 goto out;
1714         }
1715
1716         cpmuctrl = 0;
1717         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 &&
1718             GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5784_AX) {
1719                 cpmuctrl = tr32(TG3_CPMU_CTRL);
1720                 if (cpmuctrl & CPMU_CTRL_GPHY_10MB_RXONLY)
1721                         tw32(TG3_CPMU_CTRL,
1722                              cpmuctrl & ~CPMU_CTRL_GPHY_10MB_RXONLY);
1723         }
1724
1725         err = tg3_bmcr_reset(tp);
1726         if (err)
1727                 return err;
1728
1729         if (cpmuctrl & CPMU_CTRL_GPHY_10MB_RXONLY) {
1730                 u32 phy;
1731
1732                 phy = MII_TG3_DSP_EXP8_AEDW | MII_TG3_DSP_EXP8_REJ2MHz;
1733                 tg3_phydsp_write(tp, MII_TG3_DSP_EXP8, phy);
1734
1735                 tw32(TG3_CPMU_CTRL, cpmuctrl);
1736         }
1737
1738         if (tp->tg3_flags3 & TG3_FLG3_5761_5784_AX_FIXES) {
1739                 u32 val;
1740
1741                 val = tr32(TG3_CPMU_LSPD_1000MB_CLK);
1742                 if ((val & CPMU_LSPD_1000MB_MACCLK_MASK) ==
1743                     CPMU_LSPD_1000MB_MACCLK_12_5) {
1744                         val &= ~CPMU_LSPD_1000MB_MACCLK_MASK;
1745                         udelay(40);
1746                         tw32_f(TG3_CPMU_LSPD_1000MB_CLK, val);
1747                 }
1748
1749                 /* Disable GPHY autopowerdown. */
1750                 tg3_writephy(tp, MII_TG3_MISC_SHDW,
1751                              MII_TG3_MISC_SHDW_WREN |
1752                              MII_TG3_MISC_SHDW_APD_SEL |
1753                              MII_TG3_MISC_SHDW_APD_WKTM_84MS);
1754         }
1755
1756         tg3_phy_apply_otp(tp);
1757
1758 out:
1759         if (tp->tg3_flags2 & TG3_FLG2_PHY_ADC_BUG) {
1760                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0c00);
1761                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x201f);
1762                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x2aaa);
1763                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000a);
1764                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0323);
1765                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0400);
1766         }
1767         if (tp->tg3_flags2 & TG3_FLG2_PHY_5704_A0_BUG) {
1768                 tg3_writephy(tp, 0x1c, 0x8d68);
1769                 tg3_writephy(tp, 0x1c, 0x8d68);
1770         }
1771         if (tp->tg3_flags2 & TG3_FLG2_PHY_BER_BUG) {
1772                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0c00);
1773                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000a);
1774                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x310b);
1775                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x201f);
1776                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x9506);
1777                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x401f);
1778                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x14e2);
1779                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0400);
1780         }
1781         else if (tp->tg3_flags2 & TG3_FLG2_PHY_JITTER_BUG) {
1782                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0c00);
1783                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000a);
1784                 if (tp->tg3_flags2 & TG3_FLG2_PHY_ADJUST_TRIM) {
1785                         tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x110b);
1786                         tg3_writephy(tp, MII_TG3_TEST1,
1787                                      MII_TG3_TEST1_TRIM_EN | 0x4);
1788                 } else
1789                         tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x010b);
1790                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0400);
1791         }
1792         /* Set Extended packet length bit (bit 14) on all chips that */
1793         /* support jumbo frames */
1794         if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5401) {
1795                 /* Cannot do read-modify-write on 5401 */
1796                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x4c20);
1797         } else if (tp->tg3_flags2 & TG3_FLG2_JUMBO_CAPABLE) {
1798                 u32 phy_reg;
1799
1800                 /* Set bit 14 with read-modify-write to preserve other bits */
1801                 if (!tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0007) &&
1802                     !tg3_readphy(tp, MII_TG3_AUX_CTRL, &phy_reg))
1803                         tg3_writephy(tp, MII_TG3_AUX_CTRL, phy_reg | 0x4000);
1804         }
1805
1806         /* Set phy register 0x10 bit 0 to high fifo elasticity to support
1807          * jumbo frames transmission.
1808          */
1809         if (tp->tg3_flags2 & TG3_FLG2_JUMBO_CAPABLE) {
1810                 u32 phy_reg;
1811
1812                 if (!tg3_readphy(tp, MII_TG3_EXT_CTRL, &phy_reg))
1813                     tg3_writephy(tp, MII_TG3_EXT_CTRL,
1814                                  phy_reg | MII_TG3_EXT_CTRL_FIFO_ELASTIC);
1815         }
1816
1817         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
1818                 /* adjust output voltage */
1819                 tg3_writephy(tp, MII_TG3_EPHY_PTEST, 0x12);
1820         }
1821
1822         tg3_phy_toggle_automdix(tp, 1);
1823         tg3_phy_set_wirespeed(tp);
1824         return 0;
1825 }
1826
1827 static void tg3_frob_aux_power(struct tg3 *tp)
1828 {
1829         struct tg3 *tp_peer = tp;
1830
1831         if ((tp->tg3_flags2 & TG3_FLG2_IS_NIC) == 0)
1832                 return;
1833
1834         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) ||
1835             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714)) {
1836                 struct net_device *dev_peer;
1837
1838                 dev_peer = pci_get_drvdata(tp->pdev_peer);
1839                 /* remove_one() may have been run on the peer. */
1840                 if (!dev_peer)
1841                         tp_peer = tp;
1842                 else
1843                         tp_peer = netdev_priv(dev_peer);
1844         }
1845
1846         if ((tp->tg3_flags & TG3_FLAG_WOL_ENABLE) != 0 ||
1847             (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) != 0 ||
1848             (tp_peer->tg3_flags & TG3_FLAG_WOL_ENABLE) != 0 ||
1849             (tp_peer->tg3_flags & TG3_FLAG_ENABLE_ASF) != 0) {
1850                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
1851                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
1852                         tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1853                                     (GRC_LCLCTRL_GPIO_OE0 |
1854                                      GRC_LCLCTRL_GPIO_OE1 |
1855                                      GRC_LCLCTRL_GPIO_OE2 |
1856                                      GRC_LCLCTRL_GPIO_OUTPUT0 |
1857                                      GRC_LCLCTRL_GPIO_OUTPUT1),
1858                                     100);
1859                 } else if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761) {
1860                         /* The 5761 non-e device swaps GPIO 0 and GPIO 2. */
1861                         u32 grc_local_ctrl = GRC_LCLCTRL_GPIO_OE0 |
1862                                              GRC_LCLCTRL_GPIO_OE1 |
1863                                              GRC_LCLCTRL_GPIO_OE2 |
1864                                              GRC_LCLCTRL_GPIO_OUTPUT0 |
1865                                              GRC_LCLCTRL_GPIO_OUTPUT1 |
1866                                              tp->grc_local_ctrl;
1867                         tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl, 100);
1868
1869                         grc_local_ctrl |= GRC_LCLCTRL_GPIO_OUTPUT2;
1870                         tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl, 100);
1871
1872                         grc_local_ctrl &= ~GRC_LCLCTRL_GPIO_OUTPUT0;
1873                         tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl, 100);
1874                 } else {
1875                         u32 no_gpio2;
1876                         u32 grc_local_ctrl = 0;
1877
1878                         if (tp_peer != tp &&
1879                             (tp_peer->tg3_flags & TG3_FLAG_INIT_COMPLETE) != 0)
1880                                 return;
1881
1882                         /* Workaround to prevent overdrawing Amps. */
1883                         if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
1884                             ASIC_REV_5714) {
1885                                 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE3;
1886                                 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1887                                             grc_local_ctrl, 100);
1888                         }
1889
1890                         /* On 5753 and variants, GPIO2 cannot be used. */
1891                         no_gpio2 = tp->nic_sram_data_cfg &
1892                                     NIC_SRAM_DATA_CFG_NO_GPIO2;
1893
1894                         grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE0 |
1895                                          GRC_LCLCTRL_GPIO_OE1 |
1896                                          GRC_LCLCTRL_GPIO_OE2 |
1897                                          GRC_LCLCTRL_GPIO_OUTPUT1 |
1898                                          GRC_LCLCTRL_GPIO_OUTPUT2;
1899                         if (no_gpio2) {
1900                                 grc_local_ctrl &= ~(GRC_LCLCTRL_GPIO_OE2 |
1901                                                     GRC_LCLCTRL_GPIO_OUTPUT2);
1902                         }
1903                         tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1904                                                     grc_local_ctrl, 100);
1905
1906                         grc_local_ctrl |= GRC_LCLCTRL_GPIO_OUTPUT0;
1907
1908                         tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1909                                                     grc_local_ctrl, 100);
1910
1911                         if (!no_gpio2) {
1912                                 grc_local_ctrl &= ~GRC_LCLCTRL_GPIO_OUTPUT2;
1913                                 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1914                                             grc_local_ctrl, 100);
1915                         }
1916                 }
1917         } else {
1918                 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
1919                     GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701) {
1920                         if (tp_peer != tp &&
1921                             (tp_peer->tg3_flags & TG3_FLAG_INIT_COMPLETE) != 0)
1922                                 return;
1923
1924                         tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1925                                     (GRC_LCLCTRL_GPIO_OE1 |
1926                                      GRC_LCLCTRL_GPIO_OUTPUT1), 100);
1927
1928                         tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1929                                     GRC_LCLCTRL_GPIO_OE1, 100);
1930
1931                         tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1932                                     (GRC_LCLCTRL_GPIO_OE1 |
1933                                      GRC_LCLCTRL_GPIO_OUTPUT1), 100);
1934                 }
1935         }
1936 }
1937
1938 static int tg3_5700_link_polarity(struct tg3 *tp, u32 speed)
1939 {
1940         if (tp->led_ctrl == LED_CTRL_MODE_PHY_2)
1941                 return 1;
1942         else if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5411) {
1943                 if (speed != SPEED_10)
1944                         return 1;
1945         } else if (speed == SPEED_10)
1946                 return 1;
1947
1948         return 0;
1949 }
1950
1951 static int tg3_setup_phy(struct tg3 *, int);
1952
1953 #define RESET_KIND_SHUTDOWN     0
1954 #define RESET_KIND_INIT         1
1955 #define RESET_KIND_SUSPEND      2
1956
1957 static void tg3_write_sig_post_reset(struct tg3 *, int);
1958 static int tg3_halt_cpu(struct tg3 *, u32);
1959 static int tg3_nvram_lock(struct tg3 *);
1960 static void tg3_nvram_unlock(struct tg3 *);
1961
1962 static void tg3_power_down_phy(struct tg3 *tp)
1963 {
1964         u32 val;
1965
1966         if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) {
1967                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
1968                         u32 sg_dig_ctrl = tr32(SG_DIG_CTRL);
1969                         u32 serdes_cfg = tr32(MAC_SERDES_CFG);
1970
1971                         sg_dig_ctrl |=
1972                                 SG_DIG_USING_HW_AUTONEG | SG_DIG_SOFT_RESET;
1973                         tw32(SG_DIG_CTRL, sg_dig_ctrl);
1974                         tw32(MAC_SERDES_CFG, serdes_cfg | (1 << 15));
1975                 }
1976                 return;
1977         }
1978
1979         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
1980                 tg3_bmcr_reset(tp);
1981                 val = tr32(GRC_MISC_CFG);
1982                 tw32_f(GRC_MISC_CFG, val | GRC_MISC_CFG_EPHY_IDDQ);
1983                 udelay(40);
1984                 return;
1985         } else if (!(tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB)) {
1986                 tg3_writephy(tp, MII_TG3_EXT_CTRL,
1987                              MII_TG3_EXT_CTRL_FORCE_LED_OFF);
1988                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x01b2);
1989         }
1990
1991         /* The PHY should not be powered down on some chips because
1992          * of bugs.
1993          */
1994         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
1995             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
1996             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780 &&
1997              (tp->tg3_flags2 & TG3_FLG2_MII_SERDES)))
1998                 return;
1999
2000         if (tp->tg3_flags3 & TG3_FLG3_5761_5784_AX_FIXES) {
2001                 val = tr32(TG3_CPMU_LSPD_1000MB_CLK);
2002                 val &= ~CPMU_LSPD_1000MB_MACCLK_MASK;
2003                 val |= CPMU_LSPD_1000MB_MACCLK_12_5;
2004                 tw32_f(TG3_CPMU_LSPD_1000MB_CLK, val);
2005         }
2006
2007         tg3_writephy(tp, MII_BMCR, BMCR_PDOWN);
2008 }
2009
2010 static int tg3_set_power_state(struct tg3 *tp, pci_power_t state)
2011 {
2012         u32 misc_host_ctrl;
2013
2014         /* Make sure register accesses (indirect or otherwise)
2015          * will function correctly.
2016          */
2017         pci_write_config_dword(tp->pdev,
2018                                TG3PCI_MISC_HOST_CTRL,
2019                                tp->misc_host_ctrl);
2020
2021         switch (state) {
2022         case PCI_D0:
2023                 pci_enable_wake(tp->pdev, state, false);
2024                 pci_set_power_state(tp->pdev, PCI_D0);
2025
2026                 /* Switch out of Vaux if it is a NIC */
2027                 if (tp->tg3_flags2 & TG3_FLG2_IS_NIC)
2028                         tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl, 100);
2029
2030                 return 0;
2031
2032         case PCI_D1:
2033         case PCI_D2:
2034         case PCI_D3hot:
2035                 break;
2036
2037         default:
2038                 printk(KERN_ERR PFX "%s: Invalid power state (D%d) requested\n",
2039                         tp->dev->name, state);
2040                 return -EINVAL;
2041         }
2042         misc_host_ctrl = tr32(TG3PCI_MISC_HOST_CTRL);
2043         tw32(TG3PCI_MISC_HOST_CTRL,
2044              misc_host_ctrl | MISC_HOST_CTRL_MASK_PCI_INT);
2045
2046         if (tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB) {
2047                 if ((tp->tg3_flags3 & TG3_FLG3_PHY_CONNECTED) &&
2048                     !tp->link_config.phy_is_low_power) {
2049                         struct phy_device *phydev;
2050                         u32 advertising;
2051
2052                         phydev = tp->mdio_bus.phy_map[PHY_ADDR];
2053
2054                         tp->link_config.phy_is_low_power = 1;
2055
2056                         tp->link_config.orig_speed = phydev->speed;
2057                         tp->link_config.orig_duplex = phydev->duplex;
2058                         tp->link_config.orig_autoneg = phydev->autoneg;
2059                         tp->link_config.orig_advertising = phydev->advertising;
2060
2061                         advertising = ADVERTISED_TP |
2062                                       ADVERTISED_Pause |
2063                                       ADVERTISED_Autoneg |
2064                                       ADVERTISED_10baseT_Half;
2065
2066                         if ((tp->tg3_flags & TG3_FLAG_ENABLE_ASF) ||
2067                             (tp->tg3_flags & TG3_FLAG_WOL_ENABLE)) {
2068                                 if (tp->tg3_flags & TG3_FLAG_WOL_SPEED_100MB)
2069                                         advertising |=
2070                                                 ADVERTISED_100baseT_Half |
2071                                                 ADVERTISED_100baseT_Full |
2072                                                 ADVERTISED_10baseT_Full;
2073                                 else
2074                                         advertising |= ADVERTISED_10baseT_Full;
2075                         }
2076
2077                         phydev->advertising = advertising;
2078
2079                         phy_start_aneg(phydev);
2080                 }
2081         } else {
2082                 if (tp->link_config.phy_is_low_power == 0) {
2083                         tp->link_config.phy_is_low_power = 1;
2084                         tp->link_config.orig_speed = tp->link_config.speed;
2085                         tp->link_config.orig_duplex = tp->link_config.duplex;
2086                         tp->link_config.orig_autoneg = tp->link_config.autoneg;
2087                 }
2088
2089                 if (!(tp->tg3_flags2 & TG3_FLG2_ANY_SERDES)) {
2090                         tp->link_config.speed = SPEED_10;
2091                         tp->link_config.duplex = DUPLEX_HALF;
2092                         tp->link_config.autoneg = AUTONEG_ENABLE;
2093                         tg3_setup_phy(tp, 0);
2094                 }
2095         }
2096
2097         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
2098                 u32 val;
2099
2100                 val = tr32(GRC_VCPU_EXT_CTRL);
2101                 tw32(GRC_VCPU_EXT_CTRL, val | GRC_VCPU_EXT_CTRL_DISABLE_WOL);
2102         } else if (!(tp->tg3_flags & TG3_FLAG_ENABLE_ASF)) {
2103                 int i;
2104                 u32 val;
2105
2106                 for (i = 0; i < 200; i++) {
2107                         tg3_read_mem(tp, NIC_SRAM_FW_ASF_STATUS_MBOX, &val);
2108                         if (val == ~NIC_SRAM_FIRMWARE_MBOX_MAGIC1)
2109                                 break;
2110                         msleep(1);
2111                 }
2112         }
2113         if (tp->tg3_flags & TG3_FLAG_WOL_CAP)
2114                 tg3_write_mem(tp, NIC_SRAM_WOL_MBOX, WOL_SIGNATURE |
2115                                                      WOL_DRV_STATE_SHUTDOWN |
2116                                                      WOL_DRV_WOL |
2117                                                      WOL_SET_MAGIC_PKT);
2118
2119         if (tp->tg3_flags & TG3_FLAG_WOL_ENABLE) {
2120                 u32 mac_mode;
2121
2122                 if (!(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)) {
2123                         if (!(tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB)) {
2124                                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x5a);
2125                                 udelay(40);
2126                         }
2127
2128                         if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES)
2129                                 mac_mode = MAC_MODE_PORT_MODE_GMII;
2130                         else
2131                                 mac_mode = MAC_MODE_PORT_MODE_MII;
2132
2133                         mac_mode |= tp->mac_mode & MAC_MODE_LINK_POLARITY;
2134                         if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
2135                             ASIC_REV_5700) {
2136                                 u32 speed = (tp->tg3_flags &
2137                                              TG3_FLAG_WOL_SPEED_100MB) ?
2138                                              SPEED_100 : SPEED_10;
2139                                 if (tg3_5700_link_polarity(tp, speed))
2140                                         mac_mode |= MAC_MODE_LINK_POLARITY;
2141                                 else
2142                                         mac_mode &= ~MAC_MODE_LINK_POLARITY;
2143                         }
2144                 } else {
2145                         mac_mode = MAC_MODE_PORT_MODE_TBI;
2146                 }
2147
2148                 if (!(tp->tg3_flags2 & TG3_FLG2_5750_PLUS))
2149                         tw32(MAC_LED_CTRL, tp->led_ctrl);
2150
2151                 if (pci_pme_capable(tp->pdev, state) &&
2152                      (tp->tg3_flags & TG3_FLAG_WOL_ENABLE))
2153                         mac_mode |= MAC_MODE_MAGIC_PKT_ENABLE;
2154
2155                 if (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE) {
2156                         mac_mode |= tp->mac_mode &
2157                                     (MAC_MODE_APE_TX_EN | MAC_MODE_APE_RX_EN);
2158                         if (mac_mode & MAC_MODE_APE_TX_EN)
2159                                 mac_mode |= MAC_MODE_TDE_ENABLE;
2160                 }
2161
2162                 tw32_f(MAC_MODE, mac_mode);
2163                 udelay(100);
2164
2165                 tw32_f(MAC_RX_MODE, RX_MODE_ENABLE);
2166                 udelay(10);
2167         }
2168
2169         if (!(tp->tg3_flags & TG3_FLAG_WOL_SPEED_100MB) &&
2170             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
2171              GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)) {
2172                 u32 base_val;
2173
2174                 base_val = tp->pci_clock_ctrl;
2175                 base_val |= (CLOCK_CTRL_RXCLK_DISABLE |
2176                              CLOCK_CTRL_TXCLK_DISABLE);
2177
2178                 tw32_wait_f(TG3PCI_CLOCK_CTRL, base_val | CLOCK_CTRL_ALTCLK |
2179                             CLOCK_CTRL_PWRDOWN_PLL133, 40);
2180         } else if ((tp->tg3_flags2 & TG3_FLG2_5780_CLASS) ||
2181                    (tp->tg3_flags & TG3_FLAG_CPMU_PRESENT) ||
2182                    (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)) {
2183                 /* do nothing */
2184         } else if (!((tp->tg3_flags2 & TG3_FLG2_5750_PLUS) &&
2185                      (tp->tg3_flags & TG3_FLAG_ENABLE_ASF))) {
2186                 u32 newbits1, newbits2;
2187
2188                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
2189                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
2190                         newbits1 = (CLOCK_CTRL_RXCLK_DISABLE |
2191                                     CLOCK_CTRL_TXCLK_DISABLE |
2192                                     CLOCK_CTRL_ALTCLK);
2193                         newbits2 = newbits1 | CLOCK_CTRL_44MHZ_CORE;
2194                 } else if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
2195                         newbits1 = CLOCK_CTRL_625_CORE;
2196                         newbits2 = newbits1 | CLOCK_CTRL_ALTCLK;
2197                 } else {
2198                         newbits1 = CLOCK_CTRL_ALTCLK;
2199                         newbits2 = newbits1 | CLOCK_CTRL_44MHZ_CORE;
2200                 }
2201
2202                 tw32_wait_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl | newbits1,
2203                             40);
2204
2205                 tw32_wait_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl | newbits2,
2206                             40);
2207
2208                 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
2209                         u32 newbits3;
2210
2211                         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
2212                             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
2213                                 newbits3 = (CLOCK_CTRL_RXCLK_DISABLE |
2214                                             CLOCK_CTRL_TXCLK_DISABLE |
2215                                             CLOCK_CTRL_44MHZ_CORE);
2216                         } else {
2217                                 newbits3 = CLOCK_CTRL_44MHZ_CORE;
2218                         }
2219
2220                         tw32_wait_f(TG3PCI_CLOCK_CTRL,
2221                                     tp->pci_clock_ctrl | newbits3, 40);
2222                 }
2223         }
2224
2225         if (!(tp->tg3_flags & TG3_FLAG_WOL_ENABLE) &&
2226             !(tp->tg3_flags & TG3_FLAG_ENABLE_ASF) &&
2227             !(tp->tg3_flags3 & TG3_FLG3_ENABLE_APE))
2228                 tg3_power_down_phy(tp);
2229
2230         tg3_frob_aux_power(tp);
2231
2232         /* Workaround for unstable PLL clock */
2233         if ((GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_AX) ||
2234             (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_BX)) {
2235                 u32 val = tr32(0x7d00);
2236
2237                 val &= ~((1 << 16) | (1 << 4) | (1 << 2) | (1 << 1) | 1);
2238                 tw32(0x7d00, val);
2239                 if (!(tp->tg3_flags & TG3_FLAG_ENABLE_ASF)) {
2240                         int err;
2241
2242                         err = tg3_nvram_lock(tp);
2243                         tg3_halt_cpu(tp, RX_CPU_BASE);
2244                         if (!err)
2245                                 tg3_nvram_unlock(tp);
2246                 }
2247         }
2248
2249         tg3_write_sig_post_reset(tp, RESET_KIND_SHUTDOWN);
2250
2251         if (tp->tg3_flags & TG3_FLAG_WOL_ENABLE)
2252                 pci_enable_wake(tp->pdev, state, true);
2253
2254         /* Finally, set the new power state. */
2255         pci_set_power_state(tp->pdev, state);
2256
2257         return 0;
2258 }
2259
2260 static void tg3_aux_stat_to_speed_duplex(struct tg3 *tp, u32 val, u16 *speed, u8 *duplex)
2261 {
2262         switch (val & MII_TG3_AUX_STAT_SPDMASK) {
2263         case MII_TG3_AUX_STAT_10HALF:
2264                 *speed = SPEED_10;
2265                 *duplex = DUPLEX_HALF;
2266                 break;
2267
2268         case MII_TG3_AUX_STAT_10FULL:
2269                 *speed = SPEED_10;
2270                 *duplex = DUPLEX_FULL;
2271                 break;
2272
2273         case MII_TG3_AUX_STAT_100HALF:
2274                 *speed = SPEED_100;
2275                 *duplex = DUPLEX_HALF;
2276                 break;
2277
2278         case MII_TG3_AUX_STAT_100FULL:
2279                 *speed = SPEED_100;
2280                 *duplex = DUPLEX_FULL;
2281                 break;
2282
2283         case MII_TG3_AUX_STAT_1000HALF:
2284                 *speed = SPEED_1000;
2285                 *duplex = DUPLEX_HALF;
2286                 break;
2287
2288         case MII_TG3_AUX_STAT_1000FULL:
2289                 *speed = SPEED_1000;
2290                 *duplex = DUPLEX_FULL;
2291                 break;
2292
2293         default:
2294                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
2295                         *speed = (val & MII_TG3_AUX_STAT_100) ? SPEED_100 :
2296                                  SPEED_10;
2297                         *duplex = (val & MII_TG3_AUX_STAT_FULL) ? DUPLEX_FULL :
2298                                   DUPLEX_HALF;
2299                         break;
2300                 }
2301                 *speed = SPEED_INVALID;
2302                 *duplex = DUPLEX_INVALID;
2303                 break;
2304         }
2305 }
2306
2307 static void tg3_phy_copper_begin(struct tg3 *tp)
2308 {
2309         u32 new_adv;
2310         int i;
2311
2312         if (tp->link_config.phy_is_low_power) {
2313                 /* Entering low power mode.  Disable gigabit and
2314                  * 100baseT advertisements.
2315                  */
2316                 tg3_writephy(tp, MII_TG3_CTRL, 0);
2317
2318                 new_adv = (ADVERTISE_10HALF | ADVERTISE_10FULL |
2319                            ADVERTISE_CSMA | ADVERTISE_PAUSE_CAP);
2320                 if (tp->tg3_flags & TG3_FLAG_WOL_SPEED_100MB)
2321                         new_adv |= (ADVERTISE_100HALF | ADVERTISE_100FULL);
2322
2323                 tg3_writephy(tp, MII_ADVERTISE, new_adv);
2324         } else if (tp->link_config.speed == SPEED_INVALID) {
2325                 if (tp->tg3_flags & TG3_FLAG_10_100_ONLY)
2326                         tp->link_config.advertising &=
2327                                 ~(ADVERTISED_1000baseT_Half |
2328                                   ADVERTISED_1000baseT_Full);
2329
2330                 new_adv = ADVERTISE_CSMA;
2331                 if (tp->link_config.advertising & ADVERTISED_10baseT_Half)
2332                         new_adv |= ADVERTISE_10HALF;
2333                 if (tp->link_config.advertising & ADVERTISED_10baseT_Full)
2334                         new_adv |= ADVERTISE_10FULL;
2335                 if (tp->link_config.advertising & ADVERTISED_100baseT_Half)
2336                         new_adv |= ADVERTISE_100HALF;
2337                 if (tp->link_config.advertising & ADVERTISED_100baseT_Full)
2338                         new_adv |= ADVERTISE_100FULL;
2339
2340                 new_adv |= tg3_advert_flowctrl_1000T(tp->link_config.flowctrl);
2341
2342                 tg3_writephy(tp, MII_ADVERTISE, new_adv);
2343
2344                 if (tp->link_config.advertising &
2345                     (ADVERTISED_1000baseT_Half | ADVERTISED_1000baseT_Full)) {
2346                         new_adv = 0;
2347                         if (tp->link_config.advertising & ADVERTISED_1000baseT_Half)
2348                                 new_adv |= MII_TG3_CTRL_ADV_1000_HALF;
2349                         if (tp->link_config.advertising & ADVERTISED_1000baseT_Full)
2350                                 new_adv |= MII_TG3_CTRL_ADV_1000_FULL;
2351                         if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY) &&
2352                             (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
2353                              tp->pci_chip_rev_id == CHIPREV_ID_5701_B0))
2354                                 new_adv |= (MII_TG3_CTRL_AS_MASTER |
2355                                             MII_TG3_CTRL_ENABLE_AS_MASTER);
2356                         tg3_writephy(tp, MII_TG3_CTRL, new_adv);
2357                 } else {
2358                         tg3_writephy(tp, MII_TG3_CTRL, 0);
2359                 }
2360         } else {
2361                 new_adv = tg3_advert_flowctrl_1000T(tp->link_config.flowctrl);
2362                 new_adv |= ADVERTISE_CSMA;
2363
2364                 /* Asking for a specific link mode. */
2365                 if (tp->link_config.speed == SPEED_1000) {
2366                         tg3_writephy(tp, MII_ADVERTISE, new_adv);
2367
2368                         if (tp->link_config.duplex == DUPLEX_FULL)
2369                                 new_adv = MII_TG3_CTRL_ADV_1000_FULL;
2370                         else
2371                                 new_adv = MII_TG3_CTRL_ADV_1000_HALF;
2372                         if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
2373                             tp->pci_chip_rev_id == CHIPREV_ID_5701_B0)
2374                                 new_adv |= (MII_TG3_CTRL_AS_MASTER |
2375                                             MII_TG3_CTRL_ENABLE_AS_MASTER);
2376                 } else {
2377                         if (tp->link_config.speed == SPEED_100) {
2378                                 if (tp->link_config.duplex == DUPLEX_FULL)
2379                                         new_adv |= ADVERTISE_100FULL;
2380                                 else
2381                                         new_adv |= ADVERTISE_100HALF;
2382                         } else {
2383                                 if (tp->link_config.duplex == DUPLEX_FULL)
2384                                         new_adv |= ADVERTISE_10FULL;
2385                                 else
2386                                         new_adv |= ADVERTISE_10HALF;
2387                         }
2388                         tg3_writephy(tp, MII_ADVERTISE, new_adv);
2389
2390                         new_adv = 0;
2391                 }
2392
2393                 tg3_writephy(tp, MII_TG3_CTRL, new_adv);
2394         }
2395
2396         if (tp->link_config.autoneg == AUTONEG_DISABLE &&
2397             tp->link_config.speed != SPEED_INVALID) {
2398                 u32 bmcr, orig_bmcr;
2399
2400                 tp->link_config.active_speed = tp->link_config.speed;
2401                 tp->link_config.active_duplex = tp->link_config.duplex;
2402
2403                 bmcr = 0;
2404                 switch (tp->link_config.speed) {
2405                 default:
2406                 case SPEED_10:
2407                         break;
2408
2409                 case SPEED_100:
2410                         bmcr |= BMCR_SPEED100;
2411                         break;
2412
2413                 case SPEED_1000:
2414                         bmcr |= TG3_BMCR_SPEED1000;
2415                         break;
2416                 }
2417
2418                 if (tp->link_config.duplex == DUPLEX_FULL)
2419                         bmcr |= BMCR_FULLDPLX;
2420
2421                 if (!tg3_readphy(tp, MII_BMCR, &orig_bmcr) &&
2422                     (bmcr != orig_bmcr)) {
2423                         tg3_writephy(tp, MII_BMCR, BMCR_LOOPBACK);
2424                         for (i = 0; i < 1500; i++) {
2425                                 u32 tmp;
2426
2427                                 udelay(10);
2428                                 if (tg3_readphy(tp, MII_BMSR, &tmp) ||
2429                                     tg3_readphy(tp, MII_BMSR, &tmp))
2430                                         continue;
2431                                 if (!(tmp & BMSR_LSTATUS)) {
2432                                         udelay(40);
2433                                         break;
2434                                 }
2435                         }
2436                         tg3_writephy(tp, MII_BMCR, bmcr);
2437                         udelay(40);
2438                 }
2439         } else {
2440                 tg3_writephy(tp, MII_BMCR,
2441                              BMCR_ANENABLE | BMCR_ANRESTART);
2442         }
2443 }
2444
2445 static int tg3_init_5401phy_dsp(struct tg3 *tp)
2446 {
2447         int err;
2448
2449         /* Turn off tap power management. */
2450         /* Set Extended packet length bit */
2451         err  = tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x4c20);
2452
2453         err |= tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x0012);
2454         err |= tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x1804);
2455
2456         err |= tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x0013);
2457         err |= tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x1204);
2458
2459         err |= tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8006);
2460         err |= tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0132);
2461
2462         err |= tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8006);
2463         err |= tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0232);
2464
2465         err |= tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x201f);
2466         err |= tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0a20);
2467
2468         udelay(40);
2469
2470         return err;
2471 }
2472
2473 static int tg3_copper_is_advertising_all(struct tg3 *tp, u32 mask)
2474 {
2475         u32 adv_reg, all_mask = 0;
2476
2477         if (mask & ADVERTISED_10baseT_Half)
2478                 all_mask |= ADVERTISE_10HALF;
2479         if (mask & ADVERTISED_10baseT_Full)
2480                 all_mask |= ADVERTISE_10FULL;
2481         if (mask & ADVERTISED_100baseT_Half)
2482                 all_mask |= ADVERTISE_100HALF;
2483         if (mask & ADVERTISED_100baseT_Full)
2484                 all_mask |= ADVERTISE_100FULL;
2485
2486         if (tg3_readphy(tp, MII_ADVERTISE, &adv_reg))
2487                 return 0;
2488
2489         if ((adv_reg & all_mask) != all_mask)
2490                 return 0;
2491         if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY)) {
2492                 u32 tg3_ctrl;
2493
2494                 all_mask = 0;
2495                 if (mask & ADVERTISED_1000baseT_Half)
2496                         all_mask |= ADVERTISE_1000HALF;
2497                 if (mask & ADVERTISED_1000baseT_Full)
2498                         all_mask |= ADVERTISE_1000FULL;
2499
2500                 if (tg3_readphy(tp, MII_TG3_CTRL, &tg3_ctrl))
2501                         return 0;
2502
2503                 if ((tg3_ctrl & all_mask) != all_mask)
2504                         return 0;
2505         }
2506         return 1;
2507 }
2508
2509 static int tg3_adv_1000T_flowctrl_ok(struct tg3 *tp, u32 *lcladv, u32 *rmtadv)
2510 {
2511         u32 curadv, reqadv;
2512
2513         if (tg3_readphy(tp, MII_ADVERTISE, lcladv))
2514                 return 1;
2515
2516         curadv = *lcladv & (ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM);
2517         reqadv = tg3_advert_flowctrl_1000T(tp->link_config.flowctrl);
2518
2519         if (tp->link_config.active_duplex == DUPLEX_FULL) {
2520                 if (curadv != reqadv)
2521                         return 0;
2522
2523                 if (tp->tg3_flags & TG3_FLAG_PAUSE_AUTONEG)
2524                         tg3_readphy(tp, MII_LPA, rmtadv);
2525         } else {
2526                 /* Reprogram the advertisement register, even if it
2527                  * does not affect the current link.  If the link
2528                  * gets renegotiated in the future, we can save an
2529                  * additional renegotiation cycle by advertising
2530                  * it correctly in the first place.
2531                  */
2532                 if (curadv != reqadv) {
2533                         *lcladv &= ~(ADVERTISE_PAUSE_CAP |
2534                                      ADVERTISE_PAUSE_ASYM);
2535                         tg3_writephy(tp, MII_ADVERTISE, *lcladv | reqadv);
2536                 }
2537         }
2538
2539         return 1;
2540 }
2541
2542 static int tg3_setup_copper_phy(struct tg3 *tp, int force_reset)
2543 {
2544         int current_link_up;
2545         u32 bmsr, dummy;
2546         u32 lcl_adv, rmt_adv;
2547         u16 current_speed;
2548         u8 current_duplex;
2549         int i, err;
2550
2551         tw32(MAC_EVENT, 0);
2552
2553         tw32_f(MAC_STATUS,
2554              (MAC_STATUS_SYNC_CHANGED |
2555               MAC_STATUS_CFG_CHANGED |
2556               MAC_STATUS_MI_COMPLETION |
2557               MAC_STATUS_LNKSTATE_CHANGED));
2558         udelay(40);
2559
2560         if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
2561                 tw32_f(MAC_MI_MODE,
2562                      (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
2563                 udelay(80);
2564         }
2565
2566         tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x02);
2567
2568         /* Some third-party PHYs need to be reset on link going
2569          * down.
2570          */
2571         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
2572              GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
2573              GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) &&
2574             netif_carrier_ok(tp->dev)) {
2575                 tg3_readphy(tp, MII_BMSR, &bmsr);
2576                 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
2577                     !(bmsr & BMSR_LSTATUS))
2578                         force_reset = 1;
2579         }
2580         if (force_reset)
2581                 tg3_phy_reset(tp);
2582
2583         if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5401) {
2584                 tg3_readphy(tp, MII_BMSR, &bmsr);
2585                 if (tg3_readphy(tp, MII_BMSR, &bmsr) ||
2586                     !(tp->tg3_flags & TG3_FLAG_INIT_COMPLETE))
2587                         bmsr = 0;
2588
2589                 if (!(bmsr & BMSR_LSTATUS)) {
2590                         err = tg3_init_5401phy_dsp(tp);
2591                         if (err)
2592                                 return err;
2593
2594                         tg3_readphy(tp, MII_BMSR, &bmsr);
2595                         for (i = 0; i < 1000; i++) {
2596                                 udelay(10);
2597                                 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
2598                                     (bmsr & BMSR_LSTATUS)) {
2599                                         udelay(40);
2600                                         break;
2601                                 }
2602                         }
2603
2604                         if ((tp->phy_id & PHY_ID_REV_MASK) == PHY_REV_BCM5401_B0 &&
2605                             !(bmsr & BMSR_LSTATUS) &&
2606                             tp->link_config.active_speed == SPEED_1000) {
2607                                 err = tg3_phy_reset(tp);
2608                                 if (!err)
2609                                         err = tg3_init_5401phy_dsp(tp);
2610                                 if (err)
2611                                         return err;
2612                         }
2613                 }
2614         } else if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
2615                    tp->pci_chip_rev_id == CHIPREV_ID_5701_B0) {
2616                 /* 5701 {A0,B0} CRC bug workaround */
2617                 tg3_writephy(tp, 0x15, 0x0a75);
2618                 tg3_writephy(tp, 0x1c, 0x8c68);
2619                 tg3_writephy(tp, 0x1c, 0x8d68);
2620                 tg3_writephy(tp, 0x1c, 0x8c68);
2621         }
2622
2623         /* Clear pending interrupts... */
2624         tg3_readphy(tp, MII_TG3_ISTAT, &dummy);
2625         tg3_readphy(tp, MII_TG3_ISTAT, &dummy);
2626
2627         if (tp->tg3_flags & TG3_FLAG_USE_MI_INTERRUPT)
2628                 tg3_writephy(tp, MII_TG3_IMASK, ~MII_TG3_INT_LINKCHG);
2629         else if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5906)
2630                 tg3_writephy(tp, MII_TG3_IMASK, ~0);
2631
2632         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
2633             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
2634                 if (tp->led_ctrl == LED_CTRL_MODE_PHY_1)
2635                         tg3_writephy(tp, MII_TG3_EXT_CTRL,
2636                                      MII_TG3_EXT_CTRL_LNK3_LED_MODE);
2637                 else
2638                         tg3_writephy(tp, MII_TG3_EXT_CTRL, 0);
2639         }
2640
2641         current_link_up = 0;
2642         current_speed = SPEED_INVALID;
2643         current_duplex = DUPLEX_INVALID;
2644
2645         if (tp->tg3_flags2 & TG3_FLG2_CAPACITIVE_COUPLING) {
2646                 u32 val;
2647
2648                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x4007);
2649                 tg3_readphy(tp, MII_TG3_AUX_CTRL, &val);
2650                 if (!(val & (1 << 10))) {
2651                         val |= (1 << 10);
2652                         tg3_writephy(tp, MII_TG3_AUX_CTRL, val);
2653                         goto relink;
2654                 }
2655         }
2656
2657         bmsr = 0;
2658         for (i = 0; i < 100; i++) {
2659                 tg3_readphy(tp, MII_BMSR, &bmsr);
2660                 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
2661                     (bmsr & BMSR_LSTATUS))
2662                         break;
2663                 udelay(40);
2664         }
2665
2666         if (bmsr & BMSR_LSTATUS) {
2667                 u32 aux_stat, bmcr;
2668
2669                 tg3_readphy(tp, MII_TG3_AUX_STAT, &aux_stat);
2670                 for (i = 0; i < 2000; i++) {
2671                         udelay(10);
2672                         if (!tg3_readphy(tp, MII_TG3_AUX_STAT, &aux_stat) &&
2673                             aux_stat)
2674                                 break;
2675                 }
2676
2677                 tg3_aux_stat_to_speed_duplex(tp, aux_stat,
2678                                              &current_speed,
2679                                              &current_duplex);
2680
2681                 bmcr = 0;
2682                 for (i = 0; i < 200; i++) {
2683                         tg3_readphy(tp, MII_BMCR, &bmcr);
2684                         if (tg3_readphy(tp, MII_BMCR, &bmcr))
2685                                 continue;
2686                         if (bmcr && bmcr != 0x7fff)
2687                                 break;
2688                         udelay(10);
2689                 }
2690
2691                 lcl_adv = 0;
2692                 rmt_adv = 0;
2693
2694                 tp->link_config.active_speed = current_speed;
2695                 tp->link_config.active_duplex = current_duplex;
2696
2697                 if (tp->link_config.autoneg == AUTONEG_ENABLE) {
2698                         if ((bmcr & BMCR_ANENABLE) &&
2699                             tg3_copper_is_advertising_all(tp,
2700                                                 tp->link_config.advertising)) {
2701                                 if (tg3_adv_1000T_flowctrl_ok(tp, &lcl_adv,
2702                                                                   &rmt_adv))
2703                                         current_link_up = 1;
2704                         }
2705                 } else {
2706                         if (!(bmcr & BMCR_ANENABLE) &&
2707                             tp->link_config.speed == current_speed &&
2708                             tp->link_config.duplex == current_duplex &&
2709                             tp->link_config.flowctrl ==
2710                             tp->link_config.active_flowctrl) {
2711                                 current_link_up = 1;
2712                         }
2713                 }
2714
2715                 if (current_link_up == 1 &&
2716                     tp->link_config.active_duplex == DUPLEX_FULL)
2717                         tg3_setup_flow_control(tp, lcl_adv, rmt_adv);
2718         }
2719
2720 relink:
2721         if (current_link_up == 0 || tp->link_config.phy_is_low_power) {
2722                 u32 tmp;
2723
2724                 tg3_phy_copper_begin(tp);
2725
2726                 tg3_readphy(tp, MII_BMSR, &tmp);
2727                 if (!tg3_readphy(tp, MII_BMSR, &tmp) &&
2728                     (tmp & BMSR_LSTATUS))
2729                         current_link_up = 1;
2730         }
2731
2732         tp->mac_mode &= ~MAC_MODE_PORT_MODE_MASK;
2733         if (current_link_up == 1) {
2734                 if (tp->link_config.active_speed == SPEED_100 ||
2735                     tp->link_config.active_speed == SPEED_10)
2736                         tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
2737                 else
2738                         tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
2739         } else
2740                 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
2741
2742         tp->mac_mode &= ~MAC_MODE_HALF_DUPLEX;
2743         if (tp->link_config.active_duplex == DUPLEX_HALF)
2744                 tp->mac_mode |= MAC_MODE_HALF_DUPLEX;
2745
2746         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700) {
2747                 if (current_link_up == 1 &&
2748                     tg3_5700_link_polarity(tp, tp->link_config.active_speed))
2749                         tp->mac_mode |= MAC_MODE_LINK_POLARITY;
2750                 else
2751                         tp->mac_mode &= ~MAC_MODE_LINK_POLARITY;
2752         }
2753
2754         /* ??? Without this setting Netgear GA302T PHY does not
2755          * ??? send/receive packets...
2756          */
2757         if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5411 &&
2758             tp->pci_chip_rev_id == CHIPREV_ID_5700_ALTIMA) {
2759                 tp->mi_mode |= MAC_MI_MODE_AUTO_POLL;
2760                 tw32_f(MAC_MI_MODE, tp->mi_mode);
2761                 udelay(80);
2762         }
2763
2764         tw32_f(MAC_MODE, tp->mac_mode);
2765         udelay(40);
2766
2767         if (tp->tg3_flags & TG3_FLAG_USE_LINKCHG_REG) {
2768                 /* Polled via timer. */
2769                 tw32_f(MAC_EVENT, 0);
2770         } else {
2771                 tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
2772         }
2773         udelay(40);
2774
2775         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 &&
2776             current_link_up == 1 &&
2777             tp->link_config.active_speed == SPEED_1000 &&
2778             ((tp->tg3_flags & TG3_FLAG_PCIX_MODE) ||
2779              (tp->tg3_flags & TG3_FLAG_PCI_HIGH_SPEED))) {
2780                 udelay(120);
2781                 tw32_f(MAC_STATUS,
2782                      (MAC_STATUS_SYNC_CHANGED |
2783                       MAC_STATUS_CFG_CHANGED));
2784                 udelay(40);
2785                 tg3_write_mem(tp,
2786                               NIC_SRAM_FIRMWARE_MBOX,
2787                               NIC_SRAM_FIRMWARE_MBOX_MAGIC2);
2788         }
2789
2790         if (current_link_up != netif_carrier_ok(tp->dev)) {
2791                 if (current_link_up)
2792                         netif_carrier_on(tp->dev);
2793                 else
2794                         netif_carrier_off(tp->dev);
2795                 tg3_link_report(tp);
2796         }
2797
2798         return 0;
2799 }
2800
2801 struct tg3_fiber_aneginfo {
2802         int state;
2803 #define ANEG_STATE_UNKNOWN              0
2804 #define ANEG_STATE_AN_ENABLE            1
2805 #define ANEG_STATE_RESTART_INIT         2
2806 #define ANEG_STATE_RESTART              3
2807 #define ANEG_STATE_DISABLE_LINK_OK      4
2808 #define ANEG_STATE_ABILITY_DETECT_INIT  5
2809 #define ANEG_STATE_ABILITY_DETECT       6
2810 #define ANEG_STATE_ACK_DETECT_INIT      7
2811 #define ANEG_STATE_ACK_DETECT           8
2812 #define ANEG_STATE_COMPLETE_ACK_INIT    9
2813 #define ANEG_STATE_COMPLETE_ACK         10
2814 #define ANEG_STATE_IDLE_DETECT_INIT     11
2815 #define ANEG_STATE_IDLE_DETECT          12
2816 #define ANEG_STATE_LINK_OK              13
2817 #define ANEG_STATE_NEXT_PAGE_WAIT_INIT  14
2818 #define ANEG_STATE_NEXT_PAGE_WAIT       15
2819
2820         u32 flags;
2821 #define MR_AN_ENABLE            0x00000001
2822 #define MR_RESTART_AN           0x00000002
2823 #define MR_AN_COMPLETE          0x00000004
2824 #define MR_PAGE_RX              0x00000008
2825 #define MR_NP_LOADED            0x00000010
2826 #define MR_TOGGLE_TX            0x00000020
2827 #define MR_LP_ADV_FULL_DUPLEX   0x00000040
2828 #define MR_LP_ADV_HALF_DUPLEX   0x00000080
2829 #define MR_LP_ADV_SYM_PAUSE     0x00000100
2830 #define MR_LP_ADV_ASYM_PAUSE    0x00000200
2831 #define MR_LP_ADV_REMOTE_FAULT1 0x00000400
2832 #define MR_LP_ADV_REMOTE_FAULT2 0x00000800
2833 #define MR_LP_ADV_NEXT_PAGE     0x00001000
2834 #define MR_TOGGLE_RX            0x00002000
2835 #define MR_NP_RX                0x00004000
2836
2837 #define MR_LINK_OK              0x80000000
2838
2839         unsigned long link_time, cur_time;
2840
2841         u32 ability_match_cfg;
2842         int ability_match_count;
2843
2844         char ability_match, idle_match, ack_match;
2845
2846         u32 txconfig, rxconfig;
2847 #define ANEG_CFG_NP             0x00000080
2848 #define ANEG_CFG_ACK            0x00000040
2849 #define ANEG_CFG_RF2            0x00000020
2850 #define ANEG_CFG_RF1            0x00000010
2851 #define ANEG_CFG_PS2            0x00000001
2852 #define ANEG_CFG_PS1            0x00008000
2853 #define ANEG_CFG_HD             0x00004000
2854 #define ANEG_CFG_FD             0x00002000
2855 #define ANEG_CFG_INVAL          0x00001f06
2856
2857 };
2858 #define ANEG_OK         0
2859 #define ANEG_DONE       1
2860 #define ANEG_TIMER_ENAB 2
2861 #define ANEG_FAILED     -1
2862
2863 #define ANEG_STATE_SETTLE_TIME  10000
2864
2865 static int tg3_fiber_aneg_smachine(struct tg3 *tp,
2866                                    struct tg3_fiber_aneginfo *ap)
2867 {
2868         u16 flowctrl;
2869         unsigned long delta;
2870         u32 rx_cfg_reg;
2871         int ret;
2872
2873         if (ap->state == ANEG_STATE_UNKNOWN) {
2874                 ap->rxconfig = 0;
2875                 ap->link_time = 0;
2876                 ap->cur_time = 0;
2877                 ap->ability_match_cfg = 0;
2878                 ap->ability_match_count = 0;
2879                 ap->ability_match = 0;
2880                 ap->idle_match = 0;
2881                 ap->ack_match = 0;
2882         }
2883         ap->cur_time++;
2884
2885         if (tr32(MAC_STATUS) & MAC_STATUS_RCVD_CFG) {
2886                 rx_cfg_reg = tr32(MAC_RX_AUTO_NEG);
2887
2888                 if (rx_cfg_reg != ap->ability_match_cfg) {
2889                         ap->ability_match_cfg = rx_cfg_reg;
2890                         ap->ability_match = 0;
2891                         ap->ability_match_count = 0;
2892                 } else {
2893                         if (++ap->ability_match_count > 1) {
2894                                 ap->ability_match = 1;
2895                                 ap->ability_match_cfg = rx_cfg_reg;
2896                         }
2897                 }
2898                 if (rx_cfg_reg & ANEG_CFG_ACK)
2899                         ap->ack_match = 1;
2900                 else
2901                         ap->ack_match = 0;
2902
2903                 ap->idle_match = 0;
2904         } else {
2905                 ap->idle_match = 1;
2906                 ap->ability_match_cfg = 0;
2907                 ap->ability_match_count = 0;
2908                 ap->ability_match = 0;
2909                 ap->ack_match = 0;
2910
2911                 rx_cfg_reg = 0;
2912         }
2913
2914         ap->rxconfig = rx_cfg_reg;
2915         ret = ANEG_OK;
2916
2917         switch(ap->state) {
2918         case ANEG_STATE_UNKNOWN:
2919                 if (ap->flags & (MR_AN_ENABLE | MR_RESTART_AN))
2920                         ap->state = ANEG_STATE_AN_ENABLE;
2921
2922                 /* fallthru */
2923         case ANEG_STATE_AN_ENABLE:
2924                 ap->flags &= ~(MR_AN_COMPLETE | MR_PAGE_RX);
2925                 if (ap->flags & MR_AN_ENABLE) {
2926                         ap->link_time = 0;
2927                         ap->cur_time = 0;
2928                         ap->ability_match_cfg = 0;
2929                         ap->ability_match_count = 0;
2930                         ap->ability_match = 0;
2931                         ap->idle_match = 0;
2932                         ap->ack_match = 0;
2933
2934                         ap->state = ANEG_STATE_RESTART_INIT;
2935                 } else {
2936                         ap->state = ANEG_STATE_DISABLE_LINK_OK;
2937                 }
2938                 break;
2939
2940         case ANEG_STATE_RESTART_INIT:
2941                 ap->link_time = ap->cur_time;
2942                 ap->flags &= ~(MR_NP_LOADED);
2943                 ap->txconfig = 0;
2944                 tw32(MAC_TX_AUTO_NEG, 0);
2945                 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
2946                 tw32_f(MAC_MODE, tp->mac_mode);
2947                 udelay(40);
2948
2949                 ret = ANEG_TIMER_ENAB;
2950                 ap->state = ANEG_STATE_RESTART;
2951
2952                 /* fallthru */
2953         case ANEG_STATE_RESTART:
2954                 delta = ap->cur_time - ap->link_time;
2955                 if (delta > ANEG_STATE_SETTLE_TIME) {
2956                         ap->state = ANEG_STATE_ABILITY_DETECT_INIT;
2957                 } else {
2958                         ret = ANEG_TIMER_ENAB;
2959                 }
2960                 break;
2961
2962         case ANEG_STATE_DISABLE_LINK_OK:
2963                 ret = ANEG_DONE;
2964                 break;
2965
2966         case ANEG_STATE_ABILITY_DETECT_INIT:
2967                 ap->flags &= ~(MR_TOGGLE_TX);
2968                 ap->txconfig = ANEG_CFG_FD;
2969                 flowctrl = tg3_advert_flowctrl_1000X(tp->link_config.flowctrl);
2970                 if (flowctrl & ADVERTISE_1000XPAUSE)
2971                         ap->txconfig |= ANEG_CFG_PS1;
2972                 if (flowctrl & ADVERTISE_1000XPSE_ASYM)
2973                         ap->txconfig |= ANEG_CFG_PS2;
2974                 tw32(MAC_TX_AUTO_NEG, ap->txconfig);
2975                 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
2976                 tw32_f(MAC_MODE, tp->mac_mode);
2977                 udelay(40);
2978
2979                 ap->state = ANEG_STATE_ABILITY_DETECT;
2980                 break;
2981
2982         case ANEG_STATE_ABILITY_DETECT:
2983                 if (ap->ability_match != 0 && ap->rxconfig != 0) {
2984                         ap->state = ANEG_STATE_ACK_DETECT_INIT;
2985                 }
2986                 break;
2987
2988         case ANEG_STATE_ACK_DETECT_INIT:
2989                 ap->txconfig |= ANEG_CFG_ACK;
2990                 tw32(MAC_TX_AUTO_NEG, ap->txconfig);
2991                 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
2992                 tw32_f(MAC_MODE, tp->mac_mode);
2993                 udelay(40);
2994
2995                 ap->state = ANEG_STATE_ACK_DETECT;
2996
2997                 /* fallthru */
2998         case ANEG_STATE_ACK_DETECT:
2999                 if (ap->ack_match != 0) {
3000                         if ((ap->rxconfig & ~ANEG_CFG_ACK) ==
3001                             (ap->ability_match_cfg & ~ANEG_CFG_ACK)) {
3002                                 ap->state = ANEG_STATE_COMPLETE_ACK_INIT;
3003                         } else {
3004                                 ap->state = ANEG_STATE_AN_ENABLE;
3005                         }
3006                 } else if (ap->ability_match != 0 &&
3007                            ap->rxconfig == 0) {
3008                         ap->state = ANEG_STATE_AN_ENABLE;
3009                 }
3010                 break;
3011
3012         case ANEG_STATE_COMPLETE_ACK_INIT:
3013                 if (ap->rxconfig & ANEG_CFG_INVAL) {
3014                         ret = ANEG_FAILED;
3015                         break;
3016                 }
3017                 ap->flags &= ~(MR_LP_ADV_FULL_DUPLEX |
3018                                MR_LP_ADV_HALF_DUPLEX |
3019                                MR_LP_ADV_SYM_PAUSE |
3020                                MR_LP_ADV_ASYM_PAUSE |
3021                                MR_LP_ADV_REMOTE_FAULT1 |
3022                                MR_LP_ADV_REMOTE_FAULT2 |
3023                                MR_LP_ADV_NEXT_PAGE |
3024                                MR_TOGGLE_RX |
3025                                MR_NP_RX);
3026                 if (ap->rxconfig & ANEG_CFG_FD)
3027                         ap->flags |= MR_LP_ADV_FULL_DUPLEX;
3028                 if (ap->rxconfig & ANEG_CFG_HD)
3029                         ap->flags |= MR_LP_ADV_HALF_DUPLEX;
3030                 if (ap->rxconfig & ANEG_CFG_PS1)
3031                         ap->flags |= MR_LP_ADV_SYM_PAUSE;
3032                 if (ap->rxconfig & ANEG_CFG_PS2)
3033                         ap->flags |= MR_LP_ADV_ASYM_PAUSE;
3034                 if (ap->rxconfig & ANEG_CFG_RF1)
3035                         ap->flags |= MR_LP_ADV_REMOTE_FAULT1;
3036                 if (ap->rxconfig & ANEG_CFG_RF2)
3037                         ap->flags |= MR_LP_ADV_REMOTE_FAULT2;
3038                 if (ap->rxconfig & ANEG_CFG_NP)
3039                         ap->flags |= MR_LP_ADV_NEXT_PAGE;
3040
3041                 ap->link_time = ap->cur_time;
3042
3043                 ap->flags ^= (MR_TOGGLE_TX);
3044                 if (ap->rxconfig & 0x0008)
3045                         ap->flags |= MR_TOGGLE_RX;
3046                 if (ap->rxconfig & ANEG_CFG_NP)
3047                         ap->flags |= MR_NP_RX;
3048                 ap->flags |= MR_PAGE_RX;
3049
3050                 ap->state = ANEG_STATE_COMPLETE_ACK;
3051                 ret = ANEG_TIMER_ENAB;
3052                 break;
3053
3054         case ANEG_STATE_COMPLETE_ACK:
3055                 if (ap->ability_match != 0 &&
3056                     ap->rxconfig == 0) {
3057                         ap->state = ANEG_STATE_AN_ENABLE;
3058                         break;
3059                 }
3060                 delta = ap->cur_time - ap->link_time;
3061                 if (delta > ANEG_STATE_SETTLE_TIME) {
3062                         if (!(ap->flags & (MR_LP_ADV_NEXT_PAGE))) {
3063                                 ap->state = ANEG_STATE_IDLE_DETECT_INIT;
3064                         } else {
3065                                 if ((ap->txconfig & ANEG_CFG_NP) == 0 &&
3066                                     !(ap->flags & MR_NP_RX)) {
3067                                         ap->state = ANEG_STATE_IDLE_DETECT_INIT;
3068                                 } else {
3069                                         ret = ANEG_FAILED;
3070                                 }
3071                         }
3072                 }
3073                 break;
3074
3075         case ANEG_STATE_IDLE_DETECT_INIT:
3076                 ap->link_time = ap->cur_time;
3077                 tp->mac_mode &= ~MAC_MODE_SEND_CONFIGS;
3078                 tw32_f(MAC_MODE, tp->mac_mode);
3079                 udelay(40);
3080
3081                 ap->state = ANEG_STATE_IDLE_DETECT;
3082                 ret = ANEG_TIMER_ENAB;
3083                 break;
3084
3085         case ANEG_STATE_IDLE_DETECT:
3086                 if (ap->ability_match != 0 &&
3087                     ap->rxconfig == 0) {
3088                         ap->state = ANEG_STATE_AN_ENABLE;
3089                         break;
3090                 }
3091                 delta = ap->cur_time - ap->link_time;
3092                 if (delta > ANEG_STATE_SETTLE_TIME) {
3093                         /* XXX another gem from the Broadcom driver :( */
3094                         ap->state = ANEG_STATE_LINK_OK;
3095                 }
3096                 break;
3097
3098         case ANEG_STATE_LINK_OK:
3099                 ap->flags |= (MR_AN_COMPLETE | MR_LINK_OK);
3100                 ret = ANEG_DONE;
3101                 break;
3102
3103         case ANEG_STATE_NEXT_PAGE_WAIT_INIT:
3104                 /* ??? unimplemented */
3105                 break;
3106
3107         case ANEG_STATE_NEXT_PAGE_WAIT:
3108                 /* ??? unimplemented */
3109                 break;
3110
3111         default:
3112                 ret = ANEG_FAILED;
3113                 break;
3114         }
3115
3116         return ret;
3117 }
3118
3119 static int fiber_autoneg(struct tg3 *tp, u32 *txflags, u32 *rxflags)
3120 {
3121         int res = 0;
3122         struct tg3_fiber_aneginfo aninfo;
3123         int status = ANEG_FAILED;
3124         unsigned int tick;
3125         u32 tmp;
3126
3127         tw32_f(MAC_TX_AUTO_NEG, 0);
3128
3129         tmp = tp->mac_mode & ~MAC_MODE_PORT_MODE_MASK;
3130         tw32_f(MAC_MODE, tmp | MAC_MODE_PORT_MODE_GMII);
3131         udelay(40);
3132
3133         tw32_f(MAC_MODE, tp->mac_mode | MAC_MODE_SEND_CONFIGS);
3134         udelay(40);
3135
3136         memset(&aninfo, 0, sizeof(aninfo));
3137         aninfo.flags |= MR_AN_ENABLE;
3138         aninfo.state = ANEG_STATE_UNKNOWN;
3139         aninfo.cur_time = 0;
3140         tick = 0;
3141         while (++tick < 195000) {
3142                 status = tg3_fiber_aneg_smachine(tp, &aninfo);
3143                 if (status == ANEG_DONE || status == ANEG_FAILED)
3144                         break;
3145
3146                 udelay(1);
3147         }
3148
3149         tp->mac_mode &= ~MAC_MODE_SEND_CONFIGS;
3150         tw32_f(MAC_MODE, tp->mac_mode);
3151         udelay(40);
3152
3153         *txflags = aninfo.txconfig;
3154         *rxflags = aninfo.flags;
3155
3156         if (status == ANEG_DONE &&
3157             (aninfo.flags & (MR_AN_COMPLETE | MR_LINK_OK |
3158                              MR_LP_ADV_FULL_DUPLEX)))
3159                 res = 1;
3160
3161         return res;
3162 }
3163
3164 static void tg3_init_bcm8002(struct tg3 *tp)
3165 {
3166         u32 mac_status = tr32(MAC_STATUS);
3167         int i;
3168
3169         /* Reset when initting first time or we have a link. */
3170         if ((tp->tg3_flags & TG3_FLAG_INIT_COMPLETE) &&
3171             !(mac_status & MAC_STATUS_PCS_SYNCED))
3172                 return;
3173
3174         /* Set PLL lock range. */
3175         tg3_writephy(tp, 0x16, 0x8007);
3176
3177         /* SW reset */
3178         tg3_writephy(tp, MII_BMCR, BMCR_RESET);
3179
3180         /* Wait for reset to complete. */
3181         /* XXX schedule_timeout() ... */
3182         for (i = 0; i < 500; i++)
3183                 udelay(10);
3184
3185         /* Config mode; select PMA/Ch 1 regs. */
3186         tg3_writephy(tp, 0x10, 0x8411);
3187
3188         /* Enable auto-lock and comdet, select txclk for tx. */
3189         tg3_writephy(tp, 0x11, 0x0a10);
3190
3191         tg3_writephy(tp, 0x18, 0x00a0);
3192         tg3_writephy(tp, 0x16, 0x41ff);
3193
3194         /* Assert and deassert POR. */
3195         tg3_writephy(tp, 0x13, 0x0400);
3196         udelay(40);
3197         tg3_writephy(tp, 0x13, 0x0000);
3198
3199         tg3_writephy(tp, 0x11, 0x0a50);
3200         udelay(40);
3201         tg3_writephy(tp, 0x11, 0x0a10);
3202
3203         /* Wait for signal to stabilize */
3204         /* XXX schedule_timeout() ... */
3205         for (i = 0; i < 15000; i++)
3206                 udelay(10);
3207
3208         /* Deselect the channel register so we can read the PHYID
3209          * later.
3210          */
3211         tg3_writephy(tp, 0x10, 0x8011);
3212 }
3213
3214 static int tg3_setup_fiber_hw_autoneg(struct tg3 *tp, u32 mac_status)
3215 {
3216         u16 flowctrl;
3217         u32 sg_dig_ctrl, sg_dig_status;
3218         u32 serdes_cfg, expected_sg_dig_ctrl;
3219         int workaround, port_a;
3220         int current_link_up;
3221
3222         serdes_cfg = 0;
3223         expected_sg_dig_ctrl = 0;
3224         workaround = 0;
3225         port_a = 1;
3226         current_link_up = 0;
3227
3228         if (tp->pci_chip_rev_id != CHIPREV_ID_5704_A0 &&
3229             tp->pci_chip_rev_id != CHIPREV_ID_5704_A1) {
3230                 workaround = 1;
3231                 if (tr32(TG3PCI_DUAL_MAC_CTRL) & DUAL_MAC_CTRL_ID)
3232                         port_a = 0;
3233
3234                 /* preserve bits 0-11,13,14 for signal pre-emphasis */
3235                 /* preserve bits 20-23 for voltage regulator */
3236                 serdes_cfg = tr32(MAC_SERDES_CFG) & 0x00f06fff;
3237         }
3238
3239         sg_dig_ctrl = tr32(SG_DIG_CTRL);
3240
3241         if (tp->link_config.autoneg != AUTONEG_ENABLE) {
3242                 if (sg_dig_ctrl & SG_DIG_USING_HW_AUTONEG) {
3243                         if (workaround) {
3244                                 u32 val = serdes_cfg;
3245
3246                                 if (port_a)
3247                                         val |= 0xc010000;
3248                                 else
3249                                         val |= 0x4010000;
3250                                 tw32_f(MAC_SERDES_CFG, val);
3251                         }
3252
3253                         tw32_f(SG_DIG_CTRL, SG_DIG_COMMON_SETUP);
3254                 }
3255                 if (mac_status & MAC_STATUS_PCS_SYNCED) {
3256                         tg3_setup_flow_control(tp, 0, 0);
3257                         current_link_up = 1;
3258                 }
3259                 goto out;
3260         }
3261
3262         /* Want auto-negotiation.  */
3263         expected_sg_dig_ctrl = SG_DIG_USING_HW_AUTONEG | SG_DIG_COMMON_SETUP;
3264
3265         flowctrl = tg3_advert_flowctrl_1000X(tp->link_config.flowctrl);
3266         if (flowctrl & ADVERTISE_1000XPAUSE)
3267                 expected_sg_dig_ctrl |= SG_DIG_PAUSE_CAP;
3268         if (flowctrl & ADVERTISE_1000XPSE_ASYM)
3269                 expected_sg_dig_ctrl |= SG_DIG_ASYM_PAUSE;
3270
3271         if (sg_dig_ctrl != expected_sg_dig_ctrl) {
3272                 if ((tp->tg3_flags2 & TG3_FLG2_PARALLEL_DETECT) &&
3273                     tp->serdes_counter &&
3274                     ((mac_status & (MAC_STATUS_PCS_SYNCED |
3275                                     MAC_STATUS_RCVD_CFG)) ==
3276                      MAC_STATUS_PCS_SYNCED)) {
3277                         tp->serdes_counter--;
3278                         current_link_up = 1;
3279                         goto out;
3280                 }
3281 restart_autoneg:
3282                 if (workaround)
3283                         tw32_f(MAC_SERDES_CFG, serdes_cfg | 0xc011000);
3284                 tw32_f(SG_DIG_CTRL, expected_sg_dig_ctrl | SG_DIG_SOFT_RESET);
3285                 udelay(5);
3286                 tw32_f(SG_DIG_CTRL, expected_sg_dig_ctrl);
3287
3288                 tp->serdes_counter = SERDES_AN_TIMEOUT_5704S;
3289                 tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
3290         } else if (mac_status & (MAC_STATUS_PCS_SYNCED |
3291                                  MAC_STATUS_SIGNAL_DET)) {
3292                 sg_dig_status = tr32(SG_DIG_STATUS);
3293                 mac_status = tr32(MAC_STATUS);
3294
3295                 if ((sg_dig_status & SG_DIG_AUTONEG_COMPLETE) &&
3296                     (mac_status & MAC_STATUS_PCS_SYNCED)) {
3297                         u32 local_adv = 0, remote_adv = 0;
3298
3299                         if (sg_dig_ctrl & SG_DIG_PAUSE_CAP)
3300                                 local_adv |= ADVERTISE_1000XPAUSE;
3301                         if (sg_dig_ctrl & SG_DIG_ASYM_PAUSE)
3302                                 local_adv |= ADVERTISE_1000XPSE_ASYM;
3303
3304                         if (sg_dig_status & SG_DIG_PARTNER_PAUSE_CAPABLE)
3305                                 remote_adv |= LPA_1000XPAUSE;
3306                         if (sg_dig_status & SG_DIG_PARTNER_ASYM_PAUSE)
3307                                 remote_adv |= LPA_1000XPAUSE_ASYM;
3308
3309                         tg3_setup_flow_control(tp, local_adv, remote_adv);
3310                         current_link_up = 1;
3311                         tp->serdes_counter = 0;
3312                         tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
3313                 } else if (!(sg_dig_status & SG_DIG_AUTONEG_COMPLETE)) {
3314                         if (tp->serdes_counter)
3315                                 tp->serdes_counter--;
3316                         else {
3317                                 if (workaround) {
3318                                         u32 val = serdes_cfg;
3319
3320                                         if (port_a)
3321                                                 val |= 0xc010000;
3322                                         else
3323                                                 val |= 0x4010000;
3324
3325                                         tw32_f(MAC_SERDES_CFG, val);
3326                                 }
3327
3328                                 tw32_f(SG_DIG_CTRL, SG_DIG_COMMON_SETUP);
3329                                 udelay(40);
3330
3331                                 /* Link parallel detection - link is up */
3332                                 /* only if we have PCS_SYNC and not */
3333                                 /* receiving config code words */
3334                                 mac_status = tr32(MAC_STATUS);
3335                                 if ((mac_status & MAC_STATUS_PCS_SYNCED) &&
3336                                     !(mac_status & MAC_STATUS_RCVD_CFG)) {
3337                                         tg3_setup_flow_control(tp, 0, 0);
3338                                         current_link_up = 1;
3339                                         tp->tg3_flags2 |=
3340                                                 TG3_FLG2_PARALLEL_DETECT;
3341                                         tp->serdes_counter =
3342                                                 SERDES_PARALLEL_DET_TIMEOUT;
3343                                 } else
3344                                         goto restart_autoneg;
3345                         }
3346                 }
3347         } else {
3348                 tp->serdes_counter = SERDES_AN_TIMEOUT_5704S;
3349                 tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
3350         }
3351
3352 out:
3353         return current_link_up;
3354 }
3355
3356 static int tg3_setup_fiber_by_hand(struct tg3 *tp, u32 mac_status)
3357 {
3358         int current_link_up = 0;
3359
3360         if (!(mac_status & MAC_STATUS_PCS_SYNCED))
3361                 goto out;
3362
3363         if (tp->link_config.autoneg == AUTONEG_ENABLE) {
3364                 u32 txflags, rxflags;
3365                 int i;
3366
3367                 if (fiber_autoneg(tp, &txflags, &rxflags)) {
3368                         u32 local_adv = 0, remote_adv = 0;
3369
3370                         if (txflags & ANEG_CFG_PS1)
3371                                 local_adv |= ADVERTISE_1000XPAUSE;
3372                         if (txflags & ANEG_CFG_PS2)
3373                                 local_adv |= ADVERTISE_1000XPSE_ASYM;
3374
3375                         if (rxflags & MR_LP_ADV_SYM_PAUSE)
3376                                 remote_adv |= LPA_1000XPAUSE;
3377                         if (rxflags & MR_LP_ADV_ASYM_PAUSE)
3378                                 remote_adv |= LPA_1000XPAUSE_ASYM;
3379
3380                         tg3_setup_flow_control(tp, local_adv, remote_adv);
3381
3382                         current_link_up = 1;
3383                 }
3384                 for (i = 0; i < 30; i++) {
3385                         udelay(20);
3386                         tw32_f(MAC_STATUS,
3387                                (MAC_STATUS_SYNC_CHANGED |
3388                                 MAC_STATUS_CFG_CHANGED));
3389                         udelay(40);
3390                         if ((tr32(MAC_STATUS) &
3391                              (MAC_STATUS_SYNC_CHANGED |
3392                               MAC_STATUS_CFG_CHANGED)) == 0)
3393                                 break;
3394                 }
3395
3396                 mac_status = tr32(MAC_STATUS);
3397                 if (current_link_up == 0 &&
3398                     (mac_status & MAC_STATUS_PCS_SYNCED) &&
3399                     !(mac_status & MAC_STATUS_RCVD_CFG))
3400                         current_link_up = 1;
3401         } else {
3402                 tg3_setup_flow_control(tp, 0, 0);
3403
3404                 /* Forcing 1000FD link up. */
3405                 current_link_up = 1;
3406
3407                 tw32_f(MAC_MODE, (tp->mac_mode | MAC_MODE_SEND_CONFIGS));
3408                 udelay(40);
3409
3410                 tw32_f(MAC_MODE, tp->mac_mode);
3411                 udelay(40);
3412         }
3413
3414 out:
3415         return current_link_up;
3416 }
3417
3418 static int tg3_setup_fiber_phy(struct tg3 *tp, int force_reset)
3419 {
3420         u32 orig_pause_cfg;
3421         u16 orig_active_speed;
3422         u8 orig_active_duplex;
3423         u32 mac_status;
3424         int current_link_up;
3425         int i;
3426
3427         orig_pause_cfg = tp->link_config.active_flowctrl;
3428         orig_active_speed = tp->link_config.active_speed;
3429         orig_active_duplex = tp->link_config.active_duplex;
3430
3431         if (!(tp->tg3_flags2 & TG3_FLG2_HW_AUTONEG) &&
3432             netif_carrier_ok(tp->dev) &&
3433             (tp->tg3_flags & TG3_FLAG_INIT_COMPLETE)) {
3434                 mac_status = tr32(MAC_STATUS);
3435                 mac_status &= (MAC_STATUS_PCS_SYNCED |
3436                                MAC_STATUS_SIGNAL_DET |
3437                                MAC_STATUS_CFG_CHANGED |
3438                                MAC_STATUS_RCVD_CFG);
3439                 if (mac_status == (MAC_STATUS_PCS_SYNCED |
3440                                    MAC_STATUS_SIGNAL_DET)) {
3441                         tw32_f(MAC_STATUS, (MAC_STATUS_SYNC_CHANGED |
3442                                             MAC_STATUS_CFG_CHANGED));
3443                         return 0;
3444                 }
3445         }
3446
3447         tw32_f(MAC_TX_AUTO_NEG, 0);
3448
3449         tp->mac_mode &= ~(MAC_MODE_PORT_MODE_MASK | MAC_MODE_HALF_DUPLEX);
3450         tp->mac_mode |= MAC_MODE_PORT_MODE_TBI;
3451         tw32_f(MAC_MODE, tp->mac_mode);
3452         udelay(40);
3453
3454         if (tp->phy_id == PHY_ID_BCM8002)
3455                 tg3_init_bcm8002(tp);
3456
3457         /* Enable link change event even when serdes polling.  */
3458         tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
3459         udelay(40);
3460
3461         current_link_up = 0;
3462         mac_status = tr32(MAC_STATUS);
3463
3464         if (tp->tg3_flags2 & TG3_FLG2_HW_AUTONEG)
3465                 current_link_up = tg3_setup_fiber_hw_autoneg(tp, mac_status);
3466         else
3467                 current_link_up = tg3_setup_fiber_by_hand(tp, mac_status);
3468
3469         tp->hw_status->status =
3470                 (SD_STATUS_UPDATED |
3471                  (tp->hw_status->status & ~SD_STATUS_LINK_CHG));
3472
3473         for (i = 0; i < 100; i++) {
3474                 tw32_f(MAC_STATUS, (MAC_STATUS_SYNC_CHANGED |
3475                                     MAC_STATUS_CFG_CHANGED));
3476                 udelay(5);
3477                 if ((tr32(MAC_STATUS) & (MAC_STATUS_SYNC_CHANGED |
3478                                          MAC_STATUS_CFG_CHANGED |
3479                                          MAC_STATUS_LNKSTATE_CHANGED)) == 0)
3480                         break;
3481         }
3482
3483         mac_status = tr32(MAC_STATUS);
3484         if ((mac_status & MAC_STATUS_PCS_SYNCED) == 0) {
3485                 current_link_up = 0;
3486                 if (tp->link_config.autoneg == AUTONEG_ENABLE &&
3487                     tp->serdes_counter == 0) {
3488                         tw32_f(MAC_MODE, (tp->mac_mode |
3489                                           MAC_MODE_SEND_CONFIGS));
3490                         udelay(1);
3491                         tw32_f(MAC_MODE, tp->mac_mode);
3492                 }
3493         }
3494
3495         if (current_link_up == 1) {
3496                 tp->link_config.active_speed = SPEED_1000;
3497                 tp->link_config.active_duplex = DUPLEX_FULL;
3498                 tw32(MAC_LED_CTRL, (tp->led_ctrl |
3499                                     LED_CTRL_LNKLED_OVERRIDE |
3500                                     LED_CTRL_1000MBPS_ON));
3501         } else {
3502                 tp->link_config.active_speed = SPEED_INVALID;
3503                 tp->link_config.active_duplex = DUPLEX_INVALID;
3504                 tw32(MAC_LED_CTRL, (tp->led_ctrl |
3505                                     LED_CTRL_LNKLED_OVERRIDE |
3506                                     LED_CTRL_TRAFFIC_OVERRIDE));
3507         }
3508
3509         if (current_link_up != netif_carrier_ok(tp->dev)) {
3510                 if (current_link_up)
3511                         netif_carrier_on(tp->dev);
3512                 else
3513                         netif_carrier_off(tp->dev);
3514                 tg3_link_report(tp);
3515         } else {
3516                 u32 now_pause_cfg = tp->link_config.active_flowctrl;
3517                 if (orig_pause_cfg != now_pause_cfg ||
3518                     orig_active_speed != tp->link_config.active_speed ||
3519                     orig_active_duplex != tp->link_config.active_duplex)
3520                         tg3_link_report(tp);
3521         }
3522
3523         return 0;
3524 }
3525
3526 static int tg3_setup_fiber_mii_phy(struct tg3 *tp, int force_reset)
3527 {
3528         int current_link_up, err = 0;
3529         u32 bmsr, bmcr;
3530         u16 current_speed;
3531         u8 current_duplex;
3532         u32 local_adv, remote_adv;
3533
3534         tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
3535         tw32_f(MAC_MODE, tp->mac_mode);
3536         udelay(40);
3537
3538         tw32(MAC_EVENT, 0);
3539
3540         tw32_f(MAC_STATUS,
3541              (MAC_STATUS_SYNC_CHANGED |
3542               MAC_STATUS_CFG_CHANGED |
3543               MAC_STATUS_MI_COMPLETION |
3544               MAC_STATUS_LNKSTATE_CHANGED));
3545         udelay(40);
3546
3547         if (force_reset)
3548                 tg3_phy_reset(tp);
3549
3550         current_link_up = 0;
3551         current_speed = SPEED_INVALID;
3552         current_duplex = DUPLEX_INVALID;
3553
3554         err |= tg3_readphy(tp, MII_BMSR, &bmsr);
3555         err |= tg3_readphy(tp, MII_BMSR, &bmsr);
3556         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) {
3557                 if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
3558                         bmsr |= BMSR_LSTATUS;
3559                 else
3560                         bmsr &= ~BMSR_LSTATUS;
3561         }
3562
3563         err |= tg3_readphy(tp, MII_BMCR, &bmcr);
3564
3565         if ((tp->link_config.autoneg == AUTONEG_ENABLE) && !force_reset &&
3566             (tp->tg3_flags2 & TG3_FLG2_PARALLEL_DETECT)) {
3567                 /* do nothing, just check for link up at the end */
3568         } else if (tp->link_config.autoneg == AUTONEG_ENABLE) {
3569                 u32 adv, new_adv;
3570
3571                 err |= tg3_readphy(tp, MII_ADVERTISE, &adv);
3572                 new_adv = adv & ~(ADVERTISE_1000XFULL | ADVERTISE_1000XHALF |
3573                                   ADVERTISE_1000XPAUSE |
3574                                   ADVERTISE_1000XPSE_ASYM |
3575                                   ADVERTISE_SLCT);
3576
3577                 new_adv |= tg3_advert_flowctrl_1000X(tp->link_config.flowctrl);
3578
3579                 if (tp->link_config.advertising & ADVERTISED_1000baseT_Half)
3580                         new_adv |= ADVERTISE_1000XHALF;
3581                 if (tp->link_config.advertising & ADVERTISED_1000baseT_Full)
3582                         new_adv |= ADVERTISE_1000XFULL;
3583
3584                 if ((new_adv != adv) || !(bmcr & BMCR_ANENABLE)) {
3585                         tg3_writephy(tp, MII_ADVERTISE, new_adv);
3586                         bmcr |= BMCR_ANENABLE | BMCR_ANRESTART;
3587                         tg3_writephy(tp, MII_BMCR, bmcr);
3588
3589                         tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
3590                         tp->serdes_counter = SERDES_AN_TIMEOUT_5714S;
3591                         tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
3592
3593                         return err;
3594                 }
3595         } else {
3596                 u32 new_bmcr;
3597
3598                 bmcr &= ~BMCR_SPEED1000;
3599                 new_bmcr = bmcr & ~(BMCR_ANENABLE | BMCR_FULLDPLX);
3600
3601                 if (tp->link_config.duplex == DUPLEX_FULL)
3602                         new_bmcr |= BMCR_FULLDPLX;
3603
3604                 if (new_bmcr != bmcr) {
3605                         /* BMCR_SPEED1000 is a reserved bit that needs
3606                          * to be set on write.
3607                          */
3608                         new_bmcr |= BMCR_SPEED1000;
3609
3610                         /* Force a linkdown */
3611                         if (netif_carrier_ok(tp->dev)) {
3612                                 u32 adv;
3613
3614                                 err |= tg3_readphy(tp, MII_ADVERTISE, &adv);
3615                                 adv &= ~(ADVERTISE_1000XFULL |
3616                                          ADVERTISE_1000XHALF |
3617                                          ADVERTISE_SLCT);
3618                                 tg3_writephy(tp, MII_ADVERTISE, adv);
3619                                 tg3_writephy(tp, MII_BMCR, bmcr |
3620                                                            BMCR_ANRESTART |
3621                                                            BMCR_ANENABLE);
3622                                 udelay(10);
3623                                 netif_carrier_off(tp->dev);
3624                         }
3625                         tg3_writephy(tp, MII_BMCR, new_bmcr);
3626                         bmcr = new_bmcr;
3627                         err |= tg3_readphy(tp, MII_BMSR, &bmsr);
3628                         err |= tg3_readphy(tp, MII_BMSR, &bmsr);
3629                         if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
3630                             ASIC_REV_5714) {
3631                                 if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
3632                                         bmsr |= BMSR_LSTATUS;
3633                                 else
3634                                         bmsr &= ~BMSR_LSTATUS;
3635                         }
3636                         tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
3637                 }
3638         }
3639
3640         if (bmsr & BMSR_LSTATUS) {
3641                 current_speed = SPEED_1000;
3642                 current_link_up = 1;
3643                 if (bmcr & BMCR_FULLDPLX)
3644                         current_duplex = DUPLEX_FULL;
3645                 else
3646                         current_duplex = DUPLEX_HALF;
3647
3648                 local_adv = 0;
3649                 remote_adv = 0;
3650
3651                 if (bmcr & BMCR_ANENABLE) {
3652                         u32 common;
3653
3654                         err |= tg3_readphy(tp, MII_ADVERTISE, &local_adv);
3655                         err |= tg3_readphy(tp, MII_LPA, &remote_adv);
3656                         common = local_adv & remote_adv;
3657                         if (common & (ADVERTISE_1000XHALF |
3658                                       ADVERTISE_1000XFULL)) {
3659                                 if (common & ADVERTISE_1000XFULL)
3660                                         current_duplex = DUPLEX_FULL;
3661                                 else
3662                                         current_duplex = DUPLEX_HALF;
3663                         }
3664                         else
3665                                 current_link_up = 0;
3666                 }
3667         }
3668
3669         if (current_link_up == 1 && current_duplex == DUPLEX_FULL)
3670                 tg3_setup_flow_control(tp, local_adv, remote_adv);
3671
3672         tp->mac_mode &= ~MAC_MODE_HALF_DUPLEX;
3673         if (tp->link_config.active_duplex == DUPLEX_HALF)
3674                 tp->mac_mode |= MAC_MODE_HALF_DUPLEX;
3675
3676         tw32_f(MAC_MODE, tp->mac_mode);
3677         udelay(40);
3678
3679         tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
3680
3681         tp->link_config.active_speed = current_speed;
3682         tp->link_config.active_duplex = current_duplex;
3683
3684         if (current_link_up != netif_carrier_ok(tp->dev)) {
3685                 if (current_link_up)
3686                         netif_carrier_on(tp->dev);
3687                 else {
3688                         netif_carrier_off(tp->dev);
3689                         tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
3690                 }
3691                 tg3_link_report(tp);
3692         }
3693         return err;
3694 }
3695
3696 static void tg3_serdes_parallel_detect(struct tg3 *tp)
3697 {
3698         if (tp->serdes_counter) {
3699                 /* Give autoneg time to complete. */
3700                 tp->serdes_counter--;
3701                 return;
3702         }
3703         if (!netif_carrier_ok(tp->dev) &&
3704             (tp->link_config.autoneg == AUTONEG_ENABLE)) {
3705                 u32 bmcr;
3706
3707                 tg3_readphy(tp, MII_BMCR, &bmcr);
3708                 if (bmcr & BMCR_ANENABLE) {
3709                         u32 phy1, phy2;
3710
3711                         /* Select shadow register 0x1f */
3712                         tg3_writephy(tp, 0x1c, 0x7c00);
3713                         tg3_readphy(tp, 0x1c, &phy1);
3714
3715                         /* Select expansion interrupt status register */
3716                         tg3_writephy(tp, 0x17, 0x0f01);
3717                         tg3_readphy(tp, 0x15, &phy2);
3718                         tg3_readphy(tp, 0x15, &phy2);
3719
3720                         if ((phy1 & 0x10) && !(phy2 & 0x20)) {
3721                                 /* We have signal detect and not receiving
3722                                  * config code words, link is up by parallel
3723                                  * detection.
3724                                  */
3725
3726                                 bmcr &= ~BMCR_ANENABLE;
3727                                 bmcr |= BMCR_SPEED1000 | BMCR_FULLDPLX;
3728                                 tg3_writephy(tp, MII_BMCR, bmcr);
3729                                 tp->tg3_flags2 |= TG3_FLG2_PARALLEL_DETECT;
3730                         }
3731                 }
3732         }
3733         else if (netif_carrier_ok(tp->dev) &&
3734                  (tp->link_config.autoneg == AUTONEG_ENABLE) &&
3735                  (tp->tg3_flags2 & TG3_FLG2_PARALLEL_DETECT)) {
3736                 u32 phy2;
3737
3738                 /* Select expansion interrupt status register */
3739                 tg3_writephy(tp, 0x17, 0x0f01);
3740                 tg3_readphy(tp, 0x15, &phy2);
3741                 if (phy2 & 0x20) {
3742                         u32 bmcr;
3743
3744                         /* Config code words received, turn on autoneg. */
3745                         tg3_readphy(tp, MII_BMCR, &bmcr);
3746                         tg3_writephy(tp, MII_BMCR, bmcr | BMCR_ANENABLE);
3747
3748                         tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
3749
3750                 }
3751         }
3752 }
3753
3754 static int tg3_setup_phy(struct tg3 *tp, int force_reset)
3755 {
3756         int err;
3757
3758         if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) {
3759                 err = tg3_setup_fiber_phy(tp, force_reset);
3760         } else if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES) {
3761                 err = tg3_setup_fiber_mii_phy(tp, force_reset);
3762         } else {
3763                 err = tg3_setup_copper_phy(tp, force_reset);
3764         }
3765
3766         if (tp->pci_chip_rev_id == CHIPREV_ID_5784_A0 ||
3767             tp->pci_chip_rev_id == CHIPREV_ID_5784_A1) {
3768                 u32 val, scale;
3769
3770                 val = tr32(TG3_CPMU_CLCK_STAT) & CPMU_CLCK_STAT_MAC_CLCK_MASK;
3771                 if (val == CPMU_CLCK_STAT_MAC_CLCK_62_5)
3772                         scale = 65;
3773                 else if (val == CPMU_CLCK_STAT_MAC_CLCK_6_25)
3774                         scale = 6;
3775                 else
3776                         scale = 12;
3777
3778                 val = tr32(GRC_MISC_CFG) & ~GRC_MISC_CFG_PRESCALAR_MASK;
3779                 val |= (scale << GRC_MISC_CFG_PRESCALAR_SHIFT);
3780                 tw32(GRC_MISC_CFG, val);
3781         }
3782
3783         if (tp->link_config.active_speed == SPEED_1000 &&
3784             tp->link_config.active_duplex == DUPLEX_HALF)
3785                 tw32(MAC_TX_LENGTHS,
3786                      ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
3787                       (6 << TX_LENGTHS_IPG_SHIFT) |
3788                       (0xff << TX_LENGTHS_SLOT_TIME_SHIFT)));
3789         else
3790                 tw32(MAC_TX_LENGTHS,
3791                      ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
3792                       (6 << TX_LENGTHS_IPG_SHIFT) |
3793                       (32 << TX_LENGTHS_SLOT_TIME_SHIFT)));
3794
3795         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
3796                 if (netif_carrier_ok(tp->dev)) {
3797                         tw32(HOSTCC_STAT_COAL_TICKS,
3798                              tp->coal.stats_block_coalesce_usecs);
3799                 } else {
3800                         tw32(HOSTCC_STAT_COAL_TICKS, 0);
3801                 }
3802         }
3803
3804         if (tp->tg3_flags & TG3_FLAG_ASPM_WORKAROUND) {
3805                 u32 val = tr32(PCIE_PWR_MGMT_THRESH);
3806                 if (!netif_carrier_ok(tp->dev))
3807                         val = (val & ~PCIE_PWR_MGMT_L1_THRESH_MSK) |
3808                               tp->pwrmgmt_thresh;
3809                 else
3810                         val |= PCIE_PWR_MGMT_L1_THRESH_MSK;
3811                 tw32(PCIE_PWR_MGMT_THRESH, val);
3812         }
3813
3814         return err;
3815 }
3816
3817 /* This is called whenever we suspect that the system chipset is re-
3818  * ordering the sequence of MMIO to the tx send mailbox. The symptom
3819  * is bogus tx completions. We try to recover by setting the
3820  * TG3_FLAG_MBOX_WRITE_REORDER flag and resetting the chip later
3821  * in the workqueue.
3822  */
3823 static void tg3_tx_recover(struct tg3 *tp)
3824 {
3825         BUG_ON((tp->tg3_flags & TG3_FLAG_MBOX_WRITE_REORDER) ||
3826                tp->write32_tx_mbox == tg3_write_indirect_mbox);
3827
3828         printk(KERN_WARNING PFX "%s: The system may be re-ordering memory-"
3829                "mapped I/O cycles to the network device, attempting to "
3830                "recover. Please report the problem to the driver maintainer "
3831                "and include system chipset information.\n", tp->dev->name);
3832
3833         spin_lock(&tp->lock);
3834         tp->tg3_flags |= TG3_FLAG_TX_RECOVERY_PENDING;
3835         spin_unlock(&tp->lock);
3836 }
3837
3838 static inline u32 tg3_tx_avail(struct tg3 *tp)
3839 {
3840         smp_mb();
3841         return (tp->tx_pending -
3842                 ((tp->tx_prod - tp->tx_cons) & (TG3_TX_RING_SIZE - 1)));
3843 }
3844
3845 /* Tigon3 never reports partial packet sends.  So we do not
3846  * need special logic to handle SKBs that have not had all
3847  * of their frags sent yet, like SunGEM does.
3848  */
3849 static void tg3_tx(struct tg3 *tp)
3850 {
3851         u32 hw_idx = tp->hw_status->idx[0].tx_consumer;
3852         u32 sw_idx = tp->tx_cons;
3853
3854         while (sw_idx != hw_idx) {
3855                 struct tx_ring_info *ri = &tp->tx_buffers[sw_idx];
3856                 struct sk_buff *skb = ri->skb;
3857                 int i, tx_bug = 0;
3858
3859                 if (unlikely(skb == NULL)) {
3860                         tg3_tx_recover(tp);
3861                         return;
3862                 }
3863
3864                 pci_unmap_single(tp->pdev,
3865                                  pci_unmap_addr(ri, mapping),
3866                                  skb_headlen(skb),
3867                                  PCI_DMA_TODEVICE);
3868
3869                 ri->skb = NULL;
3870
3871                 sw_idx = NEXT_TX(sw_idx);
3872
3873                 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
3874                         ri = &tp->tx_buffers[sw_idx];
3875                         if (unlikely(ri->skb != NULL || sw_idx == hw_idx))
3876                                 tx_bug = 1;
3877
3878                         pci_unmap_page(tp->pdev,
3879                                        pci_unmap_addr(ri, mapping),
3880                                        skb_shinfo(skb)->frags[i].size,
3881                                        PCI_DMA_TODEVICE);
3882
3883                         sw_idx = NEXT_TX(sw_idx);
3884                 }
3885
3886                 dev_kfree_skb(skb);
3887
3888                 if (unlikely(tx_bug)) {
3889                         tg3_tx_recover(tp);
3890                         return;
3891                 }
3892         }
3893
3894         tp->tx_cons = sw_idx;
3895
3896         /* Need to make the tx_cons update visible to tg3_start_xmit()
3897          * before checking for netif_queue_stopped().  Without the
3898          * memory barrier, there is a small possibility that tg3_start_xmit()
3899          * will miss it and cause the queue to be stopped forever.
3900          */
3901         smp_mb();
3902
3903         if (unlikely(netif_queue_stopped(tp->dev) &&
3904                      (tg3_tx_avail(tp) > TG3_TX_WAKEUP_THRESH(tp)))) {
3905                 netif_tx_lock(tp->dev);
3906                 if (netif_queue_stopped(tp->dev) &&
3907                     (tg3_tx_avail(tp) > TG3_TX_WAKEUP_THRESH(tp)))
3908                         netif_wake_queue(tp->dev);
3909                 netif_tx_unlock(tp->dev);
3910         }
3911 }
3912
3913 /* Returns size of skb allocated or < 0 on error.
3914  *
3915  * We only need to fill in the address because the other members
3916  * of the RX descriptor are invariant, see tg3_init_rings.
3917  *
3918  * Note the purposeful assymetry of cpu vs. chip accesses.  For
3919  * posting buffers we only dirty the first cache line of the RX
3920  * descriptor (containing the address).  Whereas for the RX status
3921  * buffers the cpu only reads the last cacheline of the RX descriptor
3922  * (to fetch the error flags, vlan tag, checksum, and opaque cookie).
3923  */
3924 static int tg3_alloc_rx_skb(struct tg3 *tp, u32 opaque_key,
3925                             int src_idx, u32 dest_idx_unmasked)
3926 {
3927         struct tg3_rx_buffer_desc *desc;
3928         struct ring_info *map, *src_map;
3929         struct sk_buff *skb;
3930         dma_addr_t mapping;
3931         int skb_size, dest_idx;
3932
3933         src_map = NULL;
3934         switch (opaque_key) {
3935         case RXD_OPAQUE_RING_STD:
3936                 dest_idx = dest_idx_unmasked % TG3_RX_RING_SIZE;
3937                 desc = &tp->rx_std[dest_idx];
3938                 map = &tp->rx_std_buffers[dest_idx];
3939                 if (src_idx >= 0)
3940                         src_map = &tp->rx_std_buffers[src_idx];
3941                 skb_size = tp->rx_pkt_buf_sz;
3942                 break;
3943
3944         case RXD_OPAQUE_RING_JUMBO:
3945                 dest_idx = dest_idx_unmasked % TG3_RX_JUMBO_RING_SIZE;
3946                 desc = &tp->rx_jumbo[dest_idx];
3947                 map = &tp->rx_jumbo_buffers[dest_idx];
3948                 if (src_idx >= 0)
3949                         src_map = &tp->rx_jumbo_buffers[src_idx];
3950                 skb_size = RX_JUMBO_PKT_BUF_SZ;
3951                 break;
3952
3953         default:
3954                 return -EINVAL;
3955         }
3956
3957         /* Do not overwrite any of the map or rp information
3958          * until we are sure we can commit to a new buffer.
3959          *
3960          * Callers depend upon this behavior and assume that
3961          * we leave everything unchanged if we fail.
3962          */
3963         skb = netdev_alloc_skb(tp->dev, skb_size);
3964         if (skb == NULL)
3965                 return -ENOMEM;
3966
3967         skb_reserve(skb, tp->rx_offset);
3968
3969         mapping = pci_map_single(tp->pdev, skb->data,
3970                                  skb_size - tp->rx_offset,
3971                                  PCI_DMA_FROMDEVICE);
3972
3973         map->skb = skb;
3974         pci_unmap_addr_set(map, mapping, mapping);
3975
3976         if (src_map != NULL)
3977                 src_map->skb = NULL;
3978
3979         desc->addr_hi = ((u64)mapping >> 32);
3980         desc->addr_lo = ((u64)mapping & 0xffffffff);
3981
3982         return skb_size;
3983 }
3984
3985 /* We only need to move over in the address because the other
3986  * members of the RX descriptor are invariant.  See notes above
3987  * tg3_alloc_rx_skb for full details.
3988  */
3989 static void tg3_recycle_rx(struct tg3 *tp, u32 opaque_key,
3990                            int src_idx, u32 dest_idx_unmasked)
3991 {
3992         struct tg3_rx_buffer_desc *src_desc, *dest_desc;
3993         struct ring_info *src_map, *dest_map;
3994         int dest_idx;
3995
3996         switch (opaque_key) {
3997         case RXD_OPAQUE_RING_STD:
3998                 dest_idx = dest_idx_unmasked % TG3_RX_RING_SIZE;
3999                 dest_desc = &tp->rx_std[dest_idx];
4000                 dest_map = &tp->rx_std_buffers[dest_idx];
4001                 src_desc = &tp->rx_std[src_idx];
4002                 src_map = &tp->rx_std_buffers[src_idx];
4003                 break;
4004
4005         case RXD_OPAQUE_RING_JUMBO:
4006                 dest_idx = dest_idx_unmasked % TG3_RX_JUMBO_RING_SIZE;
4007                 dest_desc = &tp->rx_jumbo[dest_idx];
4008                 dest_map = &tp->rx_jumbo_buffers[dest_idx];
4009                 src_desc = &tp->rx_jumbo[src_idx];
4010                 src_map = &tp->rx_jumbo_buffers[src_idx];
4011                 break;
4012
4013         default:
4014                 return;
4015         }
4016
4017         dest_map->skb = src_map->skb;
4018         pci_unmap_addr_set(dest_map, mapping,
4019                            pci_unmap_addr(src_map, mapping));
4020         dest_desc->addr_hi = src_desc->addr_hi;
4021         dest_desc->addr_lo = src_desc->addr_lo;
4022
4023         src_map->skb = NULL;
4024 }
4025
4026 #if TG3_VLAN_TAG_USED
4027 static int tg3_vlan_rx(struct tg3 *tp, struct sk_buff *skb, u16 vlan_tag)
4028 {
4029         return vlan_hwaccel_receive_skb(skb, tp->vlgrp, vlan_tag);
4030 }
4031 #endif
4032
4033 /* The RX ring scheme is composed of multiple rings which post fresh
4034  * buffers to the chip, and one special ring the chip uses to report
4035  * status back to the host.
4036  *
4037  * The special ring reports the status of received packets to the
4038  * host.  The chip does not write into the original descriptor the
4039  * RX buffer was obtained from.  The chip simply takes the original
4040  * descriptor as provided by the host, updates the status and length
4041  * field, then writes this into the next status ring entry.
4042  *
4043  * Each ring the host uses to post buffers to the chip is described
4044  * by a TG3_BDINFO entry in the chips SRAM area.  When a packet arrives,
4045  * it is first placed into the on-chip ram.  When the packet's length
4046  * is known, it walks down the TG3_BDINFO entries to select the ring.
4047  * Each TG3_BDINFO specifies a MAXLEN field and the first TG3_BDINFO
4048  * which is within the range of the new packet's length is chosen.
4049  *
4050  * The "separate ring for rx status" scheme may sound queer, but it makes
4051  * sense from a cache coherency perspective.  If only the host writes
4052  * to the buffer post rings, and only the chip writes to the rx status
4053  * rings, then cache lines never move beyond shared-modified state.
4054  * If both the host and chip were to write into the same ring, cache line
4055  * eviction could occur since both entities want it in an exclusive state.
4056  */
4057 static int tg3_rx(struct tg3 *tp, int budget)
4058 {
4059         u32 work_mask, rx_std_posted = 0;
4060         u32 sw_idx = tp->rx_rcb_ptr;
4061         u16 hw_idx;
4062         int received;
4063
4064         hw_idx = tp->hw_status->idx[0].rx_producer;
4065         /*
4066          * We need to order the read of hw_idx and the read of
4067          * the opaque cookie.
4068          */
4069         rmb();
4070         work_mask = 0;
4071         received = 0;
4072         while (sw_idx != hw_idx && budget > 0) {
4073                 struct tg3_rx_buffer_desc *desc = &tp->rx_rcb[sw_idx];
4074                 unsigned int len;
4075                 struct sk_buff *skb;
4076                 dma_addr_t dma_addr;
4077                 u32 opaque_key, desc_idx, *post_ptr;
4078
4079                 desc_idx = desc->opaque & RXD_OPAQUE_INDEX_MASK;
4080                 opaque_key = desc->opaque & RXD_OPAQUE_RING_MASK;
4081                 if (opaque_key == RXD_OPAQUE_RING_STD) {
4082                         dma_addr = pci_unmap_addr(&tp->rx_std_buffers[desc_idx],
4083                                                   mapping);
4084                         skb = tp->rx_std_buffers[desc_idx].skb;
4085                         post_ptr = &tp->rx_std_ptr;
4086                         rx_std_posted++;
4087                 } else if (opaque_key == RXD_OPAQUE_RING_JUMBO) {
4088                         dma_addr = pci_unmap_addr(&tp->rx_jumbo_buffers[desc_idx],
4089                                                   mapping);
4090                         skb = tp->rx_jumbo_buffers[desc_idx].skb;
4091                         post_ptr = &tp->rx_jumbo_ptr;
4092                 }
4093                 else {
4094                         goto next_pkt_nopost;
4095                 }
4096
4097                 work_mask |= opaque_key;
4098
4099                 if ((desc->err_vlan & RXD_ERR_MASK) != 0 &&
4100                     (desc->err_vlan != RXD_ERR_ODD_NIBBLE_RCVD_MII)) {
4101                 drop_it:
4102                         tg3_recycle_rx(tp, opaque_key,
4103                                        desc_idx, *post_ptr);
4104                 drop_it_no_recycle:
4105                         /* Other statistics kept track of by card. */
4106                         tp->net_stats.rx_dropped++;
4107                         goto next_pkt;
4108                 }
4109
4110                 len = ((desc->idx_len & RXD_LEN_MASK) >> RXD_LEN_SHIFT) - 4; /* omit crc */
4111
4112                 if (len > RX_COPY_THRESHOLD
4113                         && tp->rx_offset == 2
4114                         /* rx_offset != 2 iff this is a 5701 card running
4115                          * in PCI-X mode [see tg3_get_invariants()] */
4116                 ) {
4117                         int skb_size;
4118
4119                         skb_size = tg3_alloc_rx_skb(tp, opaque_key,
4120                                                     desc_idx, *post_ptr);
4121                         if (skb_size < 0)
4122                                 goto drop_it;
4123
4124                         pci_unmap_single(tp->pdev, dma_addr,
4125                                          skb_size - tp->rx_offset,
4126                                          PCI_DMA_FROMDEVICE);
4127
4128                         skb_put(skb, len);
4129                 } else {
4130                         struct sk_buff *copy_skb;
4131
4132                         tg3_recycle_rx(tp, opaque_key,
4133                                        desc_idx, *post_ptr);
4134
4135                         copy_skb = netdev_alloc_skb(tp->dev, len + 2);
4136                         if (copy_skb == NULL)
4137                                 goto drop_it_no_recycle;
4138
4139                         skb_reserve(copy_skb, 2);
4140                         skb_put(copy_skb, len);
4141                         pci_dma_sync_single_for_cpu(tp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE);
4142                         skb_copy_from_linear_data(skb, copy_skb->data, len);
4143                         pci_dma_sync_single_for_device(tp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE);
4144
4145                         /* We'll reuse the original ring buffer. */
4146                         skb = copy_skb;
4147                 }
4148
4149                 if ((tp->tg3_flags & TG3_FLAG_RX_CHECKSUMS) &&
4150                     (desc->type_flags & RXD_FLAG_TCPUDP_CSUM) &&
4151                     (((desc->ip_tcp_csum & RXD_TCPCSUM_MASK)
4152                       >> RXD_TCPCSUM_SHIFT) == 0xffff))
4153                         skb->ip_summed = CHECKSUM_UNNECESSARY;
4154                 else
4155                         skb->ip_summed = CHECKSUM_NONE;
4156
4157                 skb->protocol = eth_type_trans(skb, tp->dev);
4158 #if TG3_VLAN_TAG_USED
4159                 if (tp->vlgrp != NULL &&
4160                     desc->type_flags & RXD_FLAG_VLAN) {
4161                         tg3_vlan_rx(tp, skb,
4162                                     desc->err_vlan & RXD_VLAN_MASK);
4163                 } else
4164 #endif
4165                         netif_receive_skb(skb);
4166
4167                 tp->dev->last_rx = jiffies;
4168                 received++;
4169                 budget--;
4170
4171 next_pkt:
4172                 (*post_ptr)++;
4173
4174                 if (unlikely(rx_std_posted >= tp->rx_std_max_post)) {
4175                         u32 idx = *post_ptr % TG3_RX_RING_SIZE;
4176
4177                         tw32_rx_mbox(MAILBOX_RCV_STD_PROD_IDX +
4178                                      TG3_64BIT_REG_LOW, idx);
4179                         work_mask &= ~RXD_OPAQUE_RING_STD;
4180                         rx_std_posted = 0;
4181                 }
4182 next_pkt_nopost:
4183                 sw_idx++;
4184                 sw_idx &= (TG3_RX_RCB_RING_SIZE(tp) - 1);
4185
4186                 /* Refresh hw_idx to see if there is new work */
4187                 if (sw_idx == hw_idx) {
4188                         hw_idx = tp->hw_status->idx[0].rx_producer;
4189                         rmb();
4190                 }
4191         }
4192
4193         /* ACK the status ring. */
4194         tp->rx_rcb_ptr = sw_idx;
4195         tw32_rx_mbox(MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW, sw_idx);
4196
4197         /* Refill RX ring(s). */
4198         if (work_mask & RXD_OPAQUE_RING_STD) {
4199                 sw_idx = tp->rx_std_ptr % TG3_RX_RING_SIZE;
4200                 tw32_rx_mbox(MAILBOX_RCV_STD_PROD_IDX + TG3_64BIT_REG_LOW,
4201                              sw_idx);
4202         }
4203         if (work_mask & RXD_OPAQUE_RING_JUMBO) {
4204                 sw_idx = tp->rx_jumbo_ptr % TG3_RX_JUMBO_RING_SIZE;
4205                 tw32_rx_mbox(MAILBOX_RCV_JUMBO_PROD_IDX + TG3_64BIT_REG_LOW,
4206                              sw_idx);
4207         }
4208         mmiowb();
4209
4210         return received;
4211 }
4212
4213 static int tg3_poll_work(struct tg3 *tp, int work_done, int budget)
4214 {
4215         struct tg3_hw_status *sblk = tp->hw_status;
4216
4217         /* handle link change and other phy events */
4218         if (!(tp->tg3_flags &
4219               (TG3_FLAG_USE_LINKCHG_REG |
4220                TG3_FLAG_POLL_SERDES))) {
4221                 if (sblk->status & SD_STATUS_LINK_CHG) {
4222                         sblk->status = SD_STATUS_UPDATED |
4223                                 (sblk->status & ~SD_STATUS_LINK_CHG);
4224                         spin_lock(&tp->lock);
4225                         if (tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB) {
4226                                 tw32_f(MAC_STATUS,
4227                                      (MAC_STATUS_SYNC_CHANGED |
4228                                       MAC_STATUS_CFG_CHANGED |
4229                                       MAC_STATUS_MI_COMPLETION |
4230                                       MAC_STATUS_LNKSTATE_CHANGED));
4231                                 udelay(40);
4232                         } else
4233                                 tg3_setup_phy(tp, 0);
4234                         spin_unlock(&tp->lock);
4235                 }
4236         }
4237
4238         /* run TX completion thread */
4239         if (sblk->idx[0].tx_consumer != tp->tx_cons) {
4240                 tg3_tx(tp);
4241                 if (unlikely(tp->tg3_flags & TG3_FLAG_TX_RECOVERY_PENDING))
4242                         return work_done;
4243         }
4244
4245         /* run RX thread, within the bounds set by NAPI.
4246          * All RX "locking" is done by ensuring outside
4247          * code synchronizes with tg3->napi.poll()
4248          */
4249         if (sblk->idx[0].rx_producer != tp->rx_rcb_ptr)
4250                 work_done += tg3_rx(tp, budget - work_done);
4251
4252         return work_done;
4253 }
4254
4255 static int tg3_poll(struct napi_struct *napi, int budget)
4256 {
4257         struct tg3 *tp = container_of(napi, struct tg3, napi);
4258         int work_done = 0;
4259         struct tg3_hw_status *sblk = tp->hw_status;
4260
4261         while (1) {
4262                 work_done = tg3_poll_work(tp, work_done, budget);
4263
4264                 if (unlikely(tp->tg3_flags & TG3_FLAG_TX_RECOVERY_PENDING))
4265                         goto tx_recovery;
4266
4267                 if (unlikely(work_done >= budget))
4268                         break;
4269
4270                 if (tp->tg3_flags & TG3_FLAG_TAGGED_STATUS) {
4271                         /* tp->last_tag is used in tg3_restart_ints() below
4272                          * to tell the hw how much work has been processed,
4273                          * so we must read it before checking for more work.
4274                          */
4275                         tp->last_tag = sblk->status_tag;
4276                         rmb();
4277                 } else
4278                         sblk->status &= ~SD_STATUS_UPDATED;
4279
4280                 if (likely(!tg3_has_work(tp))) {
4281                         netif_rx_complete(tp->dev, napi);
4282                         tg3_restart_ints(tp);
4283                         break;
4284                 }
4285         }
4286
4287         return work_done;
4288
4289 tx_recovery:
4290         /* work_done is guaranteed to be less than budget. */
4291         netif_rx_complete(tp->dev, napi);
4292         schedule_work(&tp->reset_task);
4293         return work_done;
4294 }
4295
4296 static void tg3_irq_quiesce(struct tg3 *tp)
4297 {
4298         BUG_ON(tp->irq_sync);
4299
4300         tp->irq_sync = 1;
4301         smp_mb();
4302
4303         synchronize_irq(tp->pdev->irq);
4304 }
4305
4306 static inline int tg3_irq_sync(struct tg3 *tp)
4307 {
4308         return tp->irq_sync;
4309 }
4310
4311 /* Fully shutdown all tg3 driver activity elsewhere in the system.
4312  * If irq_sync is non-zero, then the IRQ handler must be synchronized
4313  * with as well.  Most of the time, this is not necessary except when
4314  * shutting down the device.
4315  */
4316 static inline void tg3_full_lock(struct tg3 *tp, int irq_sync)
4317 {
4318         spin_lock_bh(&tp->lock);
4319         if (irq_sync)
4320                 tg3_irq_quiesce(tp);
4321 }
4322
4323 static inline void tg3_full_unlock(struct tg3 *tp)
4324 {
4325         spin_unlock_bh(&tp->lock);
4326 }
4327
4328 /* One-shot MSI handler - Chip automatically disables interrupt
4329  * after sending MSI so driver doesn't have to do it.
4330  */
4331 static irqreturn_t tg3_msi_1shot(int irq, void *dev_id)
4332 {
4333         struct net_device *dev = dev_id;
4334         struct tg3 *tp = netdev_priv(dev);
4335
4336         prefetch(tp->hw_status);
4337         prefetch(&tp->rx_rcb[tp->rx_rcb_ptr]);
4338
4339         if (likely(!tg3_irq_sync(tp)))
4340                 netif_rx_schedule(dev, &tp->napi);
4341
4342         return IRQ_HANDLED;
4343 }
4344
4345 /* MSI ISR - No need to check for interrupt sharing and no need to
4346  * flush status block and interrupt mailbox. PCI ordering rules
4347  * guarantee that MSI will arrive after the status block.
4348  */
4349 static irqreturn_t tg3_msi(int irq, void *dev_id)
4350 {
4351         struct net_device *dev = dev_id;
4352         struct tg3 *tp = netdev_priv(dev);
4353
4354         prefetch(tp->hw_status);
4355         prefetch(&tp->rx_rcb[tp->rx_rcb_ptr]);
4356         /*
4357          * Writing any value to intr-mbox-0 clears PCI INTA# and
4358          * chip-internal interrupt pending events.
4359          * Writing non-zero to intr-mbox-0 additional tells the
4360          * NIC to stop sending us irqs, engaging "in-intr-handler"
4361          * event coalescing.
4362          */
4363         tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
4364         if (likely(!tg3_irq_sync(tp)))
4365                 netif_rx_schedule(dev, &tp->napi);
4366
4367         return IRQ_RETVAL(1);
4368 }
4369
4370 static irqreturn_t tg3_interrupt(int irq, void *dev_id)
4371 {
4372         struct net_device *dev = dev_id;
4373         struct tg3 *tp = netdev_priv(dev);
4374         struct tg3_hw_status *sblk = tp->hw_status;
4375         unsigned int handled = 1;
4376
4377         /* In INTx mode, it is possible for the interrupt to arrive at
4378          * the CPU before the status block posted prior to the interrupt.
4379          * Reading the PCI State register will confirm whether the
4380          * interrupt is ours and will flush the status block.
4381          */
4382         if (unlikely(!(sblk->status & SD_STATUS_UPDATED))) {
4383                 if ((tp->tg3_flags & TG3_FLAG_CHIP_RESETTING) ||
4384                     (tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
4385                         handled = 0;
4386                         goto out;
4387                 }
4388         }
4389
4390         /*
4391          * Writing any value to intr-mbox-0 clears PCI INTA# and
4392          * chip-internal interrupt pending events.
4393          * Writing non-zero to intr-mbox-0 additional tells the
4394          * NIC to stop sending us irqs, engaging "in-intr-handler"
4395          * event coalescing.
4396          *
4397          * Flush the mailbox to de-assert the IRQ immediately to prevent
4398          * spurious interrupts.  The flush impacts performance but
4399          * excessive spurious interrupts can be worse in some cases.
4400          */
4401         tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
4402         if (tg3_irq_sync(tp))
4403                 goto out;
4404         sblk->status &= ~SD_STATUS_UPDATED;
4405         if (likely(tg3_has_work(tp))) {
4406                 prefetch(&tp->rx_rcb[tp->rx_rcb_ptr]);
4407                 netif_rx_schedule(dev, &tp->napi);
4408         } else {
4409                 /* No work, shared interrupt perhaps?  re-enable
4410                  * interrupts, and flush that PCI write
4411                  */
4412                 tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
4413                                0x00000000);
4414         }
4415 out:
4416         return IRQ_RETVAL(handled);
4417 }
4418
4419 static irqreturn_t tg3_interrupt_tagged(int irq, void *dev_id)
4420 {
4421         struct net_device *dev = dev_id;
4422         struct tg3 *tp = netdev_priv(dev);
4423         struct tg3_hw_status *sblk = tp->hw_status;
4424         unsigned int handled = 1;
4425
4426         /* In INTx mode, it is possible for the interrupt to arrive at
4427          * the CPU before the status block posted prior to the interrupt.
4428          * Reading the PCI State register will confirm whether the
4429          * interrupt is ours and will flush the status block.
4430          */
4431         if (unlikely(sblk->status_tag == tp->last_tag)) {
4432                 if ((tp->tg3_flags & TG3_FLAG_CHIP_RESETTING) ||
4433                     (tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
4434                         handled = 0;
4435                         goto out;
4436                 }
4437         }
4438
4439         /*
4440          * writing any value to intr-mbox-0 clears PCI INTA# and
4441          * chip-internal interrupt pending events.
4442          * writing non-zero to intr-mbox-0 additional tells the
4443          * NIC to stop sending us irqs, engaging "in-intr-handler"
4444          * event coalescing.
4445          *
4446          * Flush the mailbox to de-assert the IRQ immediately to prevent
4447          * spurious interrupts.  The flush impacts performance but
4448          * excessive spurious interrupts can be worse in some cases.
4449          */
4450         tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
4451         if (tg3_irq_sync(tp))
4452                 goto out;
4453         if (netif_rx_schedule_prep(dev, &tp->napi)) {
4454                 prefetch(&tp->rx_rcb[tp->rx_rcb_ptr]);
4455                 /* Update last_tag to mark that this status has been
4456                  * seen. Because interrupt may be shared, we may be
4457                  * racing with tg3_poll(), so only update last_tag
4458                  * if tg3_poll() is not scheduled.
4459                  */
4460                 tp->last_tag = sblk->status_tag;
4461                 __netif_rx_schedule(dev, &tp->napi);
4462         }
4463 out:
4464         return IRQ_RETVAL(handled);
4465 }
4466
4467 /* ISR for interrupt test */
4468 static irqreturn_t tg3_test_isr(int irq, void *dev_id)
4469 {
4470         struct net_device *dev = dev_id;
4471         struct tg3 *tp = netdev_priv(dev);
4472         struct tg3_hw_status *sblk = tp->hw_status;
4473
4474         if ((sblk->status & SD_STATUS_UPDATED) ||
4475             !(tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
4476                 tg3_disable_ints(tp);
4477                 return IRQ_RETVAL(1);
4478         }
4479         return IRQ_RETVAL(0);
4480 }
4481
4482 static int tg3_init_hw(struct tg3 *, int);
4483 static int tg3_halt(struct tg3 *, int, int);
4484
4485 /* Restart hardware after configuration changes, self-test, etc.
4486  * Invoked with tp->lock held.
4487  */
4488 static int tg3_restart_hw(struct tg3 *tp, int reset_phy)
4489         __releases(tp->lock)
4490         __acquires(tp->lock)
4491 {
4492         int err;
4493
4494         err = tg3_init_hw(tp, reset_phy);
4495         if (err) {
4496                 printk(KERN_ERR PFX "%s: Failed to re-initialize device, "
4497                        "aborting.\n", tp->dev->name);
4498                 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
4499                 tg3_full_unlock(tp);
4500                 del_timer_sync(&tp->timer);
4501                 tp->irq_sync = 0;
4502                 napi_enable(&tp->napi);
4503                 dev_close(tp->dev);
4504                 tg3_full_lock(tp, 0);
4505         }
4506         return err;
4507 }
4508
4509 #ifdef CONFIG_NET_POLL_CONTROLLER
4510 static void tg3_poll_controller(struct net_device *dev)
4511 {
4512         struct tg3 *tp = netdev_priv(dev);
4513
4514         tg3_interrupt(tp->pdev->irq, dev);
4515 }
4516 #endif
4517
4518 static void tg3_reset_task(struct work_struct *work)
4519 {
4520         struct tg3 *tp = container_of(work, struct tg3, reset_task);
4521         int err;
4522         unsigned int restart_timer;
4523
4524         tg3_full_lock(tp, 0);
4525
4526         if (!netif_running(tp->dev)) {
4527                 tg3_full_unlock(tp);
4528                 return;
4529         }
4530
4531         tg3_full_unlock(tp);
4532
4533         tg3_phy_stop(tp);
4534
4535         tg3_netif_stop(tp);
4536
4537         tg3_full_lock(tp, 1);
4538
4539         restart_timer = tp->tg3_flags2 & TG3_FLG2_RESTART_TIMER;
4540         tp->tg3_flags2 &= ~TG3_FLG2_RESTART_TIMER;
4541
4542         if (tp->tg3_flags & TG3_FLAG_TX_RECOVERY_PENDING) {
4543                 tp->write32_tx_mbox = tg3_write32_tx_mbox;
4544                 tp->write32_rx_mbox = tg3_write_flush_reg32;
4545                 tp->tg3_flags |= TG3_FLAG_MBOX_WRITE_REORDER;
4546                 tp->tg3_flags &= ~TG3_FLAG_TX_RECOVERY_PENDING;
4547         }
4548
4549         tg3_halt(tp, RESET_KIND_SHUTDOWN, 0);
4550         err = tg3_init_hw(tp, 1);
4551         if (err)
4552                 goto out;
4553
4554         tg3_netif_start(tp);
4555
4556         if (restart_timer)
4557                 mod_timer(&tp->timer, jiffies + 1);
4558
4559 out:
4560         tg3_full_unlock(tp);
4561
4562         if (!err)
4563                 tg3_phy_start(tp);
4564 }
4565
4566 static void tg3_dump_short_state(struct tg3 *tp)
4567 {
4568         printk(KERN_ERR PFX "DEBUG: MAC_TX_STATUS[%08x] MAC_RX_STATUS[%08x]\n",
4569                tr32(MAC_TX_STATUS), tr32(MAC_RX_STATUS));
4570         printk(KERN_ERR PFX "DEBUG: RDMAC_STATUS[%08x] WDMAC_STATUS[%08x]\n",
4571                tr32(RDMAC_STATUS), tr32(WDMAC_STATUS));
4572 }
4573
4574 static void tg3_tx_timeout(struct net_device *dev)
4575 {
4576         struct tg3 *tp = netdev_priv(dev);
4577
4578         if (netif_msg_tx_err(tp)) {
4579                 printk(KERN_ERR PFX "%s: transmit timed out, resetting\n",
4580                        dev->name);
4581                 tg3_dump_short_state(tp);
4582         }
4583
4584         schedule_work(&tp->reset_task);
4585 }
4586
4587 /* Test for DMA buffers crossing any 4GB boundaries: 4G, 8G, etc */
4588 static inline int tg3_4g_overflow_test(dma_addr_t mapping, int len)
4589 {
4590         u32 base = (u32) mapping & 0xffffffff;
4591
4592         return ((base > 0xffffdcc0) &&
4593                 (base + len + 8 < base));
4594 }
4595
4596 /* Test for DMA addresses > 40-bit */
4597 static inline int tg3_40bit_overflow_test(struct tg3 *tp, dma_addr_t mapping,
4598                                           int len)
4599 {
4600 #if defined(CONFIG_HIGHMEM) && (BITS_PER_LONG == 64)
4601         if (tp->tg3_flags & TG3_FLAG_40BIT_DMA_BUG)
4602                 return (((u64) mapping + len) > DMA_40BIT_MASK);
4603         return 0;
4604 #else
4605         return 0;
4606 #endif
4607 }
4608
4609 static void tg3_set_txd(struct tg3 *, int, dma_addr_t, int, u32, u32);
4610
4611 /* Workaround 4GB and 40-bit hardware DMA bugs. */
4612 static int tigon3_dma_hwbug_workaround(struct tg3 *tp, struct sk_buff *skb,
4613                                        u32 last_plus_one, u32 *start,
4614                                        u32 base_flags, u32 mss)
4615 {
4616         struct sk_buff *new_skb;
4617         dma_addr_t new_addr = 0;
4618         u32 entry = *start;
4619         int i, ret = 0;
4620
4621         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701)
4622                 new_skb = skb_copy(skb, GFP_ATOMIC);
4623         else {
4624                 int more_headroom = 4 - ((unsigned long)skb->data & 3);
4625
4626                 new_skb = skb_copy_expand(skb,
4627                                           skb_headroom(skb) + more_headroom,
4628                                           skb_tailroom(skb), GFP_ATOMIC);
4629         }
4630
4631         if (!new_skb) {
4632                 ret = -1;
4633         } else {
4634                 /* New SKB is guaranteed to be linear. */
4635                 entry = *start;
4636                 new_addr = pci_map_single(tp->pdev, new_skb->data, new_skb->len,
4637                                           PCI_DMA_TODEVICE);
4638                 /* Make sure new skb does not cross any 4G boundaries.
4639                  * Drop the packet if it does.
4640                  */
4641                 if (tg3_4g_overflow_test(new_addr, new_skb->len)) {
4642                         ret = -1;
4643                         dev_kfree_skb(new_skb);
4644                         new_skb = NULL;
4645                 } else {
4646                         tg3_set_txd(tp, entry, new_addr, new_skb->len,
4647                                     base_flags, 1 | (mss << 1));
4648                         *start = NEXT_TX(entry);
4649                 }
4650         }
4651
4652         /* Now clean up the sw ring entries. */
4653         i = 0;
4654         while (entry != last_plus_one) {
4655                 int len;
4656
4657                 if (i == 0)
4658                         len = skb_headlen(skb);
4659                 else
4660                         len = skb_shinfo(skb)->frags[i-1].size;
4661                 pci_unmap_single(tp->pdev,
4662                                  pci_unmap_addr(&tp->tx_buffers[entry], mapping),
4663                                  len, PCI_DMA_TODEVICE);
4664                 if (i == 0) {
4665                         tp->tx_buffers[entry].skb = new_skb;
4666                         pci_unmap_addr_set(&tp->tx_buffers[entry], mapping, new_addr);
4667                 } else {
4668                         tp->tx_buffers[entry].skb = NULL;
4669                 }
4670                 entry = NEXT_TX(entry);
4671                 i++;
4672         }
4673
4674         dev_kfree_skb(skb);
4675
4676         return ret;
4677 }
4678
4679 static void tg3_set_txd(struct tg3 *tp, int entry,
4680                         dma_addr_t mapping, int len, u32 flags,
4681                         u32 mss_and_is_end)
4682 {
4683         struct tg3_tx_buffer_desc *txd = &tp->tx_ring[entry];
4684         int is_end = (mss_and_is_end & 0x1);
4685         u32 mss = (mss_and_is_end >> 1);
4686         u32 vlan_tag = 0;
4687
4688         if (is_end)
4689                 flags |= TXD_FLAG_END;
4690         if (flags & TXD_FLAG_VLAN) {
4691                 vlan_tag = flags >> 16;
4692                 flags &= 0xffff;
4693         }
4694         vlan_tag |= (mss << TXD_MSS_SHIFT);
4695
4696         txd->addr_hi = ((u64) mapping >> 32);
4697         txd->addr_lo = ((u64) mapping & 0xffffffff);
4698         txd->len_flags = (len << TXD_LEN_SHIFT) | flags;
4699         txd->vlan_tag = vlan_tag << TXD_VLAN_TAG_SHIFT;
4700 }
4701
4702 /* hard_start_xmit for devices that don't have any bugs and
4703  * support TG3_FLG2_HW_TSO_2 only.
4704  */
4705 static int tg3_start_xmit(struct sk_buff *skb, struct net_device *dev)
4706 {
4707         struct tg3 *tp = netdev_priv(dev);
4708         dma_addr_t mapping;
4709         u32 len, entry, base_flags, mss;
4710
4711         len = skb_headlen(skb);
4712
4713         /* We are running in BH disabled context with netif_tx_lock
4714          * and TX reclaim runs via tp->napi.poll inside of a software
4715          * interrupt.  Furthermore, IRQ processing runs lockless so we have
4716          * no IRQ context deadlocks to worry about either.  Rejoice!
4717          */
4718         if (unlikely(tg3_tx_avail(tp) <= (skb_shinfo(skb)->nr_frags + 1))) {
4719                 if (!netif_queue_stopped(dev)) {
4720                         netif_stop_queue(dev);
4721
4722                         /* This is a hard error, log it. */
4723                         printk(KERN_ERR PFX "%s: BUG! Tx Ring full when "
4724                                "queue awake!\n", dev->name);
4725                 }
4726                 return NETDEV_TX_BUSY;
4727         }
4728
4729         entry = tp->tx_prod;
4730         base_flags = 0;
4731         mss = 0;
4732         if ((mss = skb_shinfo(skb)->gso_size) != 0) {
4733                 int tcp_opt_len, ip_tcp_len;
4734
4735                 if (skb_header_cloned(skb) &&
4736                     pskb_expand_head(skb, 0, 0, GFP_ATOMIC)) {
4737                         dev_kfree_skb(skb);
4738                         goto out_unlock;
4739                 }
4740
4741                 if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6)
4742                         mss |= (skb_headlen(skb) - ETH_HLEN) << 9;
4743                 else {
4744                         struct iphdr *iph = ip_hdr(skb);
4745
4746                         tcp_opt_len = tcp_optlen(skb);
4747                         ip_tcp_len = ip_hdrlen(skb) + sizeof(struct tcphdr);
4748
4749                         iph->check = 0;
4750                         iph->tot_len = htons(mss + ip_tcp_len + tcp_opt_len);
4751                         mss |= (ip_tcp_len + tcp_opt_len) << 9;
4752                 }
4753
4754                 base_flags |= (TXD_FLAG_CPU_PRE_DMA |
4755                                TXD_FLAG_CPU_POST_DMA);
4756
4757                 tcp_hdr(skb)->check = 0;
4758
4759         }
4760         else if (skb->ip_summed == CHECKSUM_PARTIAL)
4761                 base_flags |= TXD_FLAG_TCPUDP_CSUM;
4762 #if TG3_VLAN_TAG_USED
4763         if (tp->vlgrp != NULL && vlan_tx_tag_present(skb))
4764                 base_flags |= (TXD_FLAG_VLAN |
4765                                (vlan_tx_tag_get(skb) << 16));
4766 #endif
4767
4768         /* Queue skb data, a.k.a. the main skb fragment. */
4769         mapping = pci_map_single(tp->pdev, skb->data, len, PCI_DMA_TODEVICE);
4770
4771         tp->tx_buffers[entry].skb = skb;
4772         pci_unmap_addr_set(&tp->tx_buffers[entry], mapping, mapping);
4773
4774         tg3_set_txd(tp, entry, mapping, len, base_flags,
4775                     (skb_shinfo(skb)->nr_frags == 0) | (mss << 1));
4776
4777         entry = NEXT_TX(entry);
4778
4779         /* Now loop through additional data fragments, and queue them. */
4780         if (skb_shinfo(skb)->nr_frags > 0) {
4781                 unsigned int i, last;
4782
4783                 last = skb_shinfo(skb)->nr_frags - 1;
4784                 for (i = 0; i <= last; i++) {
4785                         skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
4786
4787                         len = frag->size;
4788                         mapping = pci_map_page(tp->pdev,
4789                                                frag->page,
4790                                                frag->page_offset,
4791                                                len, PCI_DMA_TODEVICE);
4792
4793                         tp->tx_buffers[entry].skb = NULL;
4794                         pci_unmap_addr_set(&tp->tx_buffers[entry], mapping, mapping);
4795
4796                         tg3_set_txd(tp, entry, mapping, len,
4797                                     base_flags, (i == last) | (mss << 1));
4798
4799                         entry = NEXT_TX(entry);
4800                 }
4801         }
4802
4803         /* Packets are ready, update Tx producer idx local and on card. */
4804         tw32_tx_mbox((MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW), entry);
4805
4806         tp->tx_prod = entry;
4807         if (unlikely(tg3_tx_avail(tp) <= (MAX_SKB_FRAGS + 1))) {
4808                 netif_stop_queue(dev);
4809                 if (tg3_tx_avail(tp) > TG3_TX_WAKEUP_THRESH(tp))
4810                         netif_wake_queue(tp->dev);
4811         }
4812
4813 out_unlock:
4814         mmiowb();
4815
4816         dev->trans_start = jiffies;
4817
4818         return NETDEV_TX_OK;
4819 }
4820
4821 static int tg3_start_xmit_dma_bug(struct sk_buff *, struct net_device *);
4822
4823 /* Use GSO to workaround a rare TSO bug that may be triggered when the
4824  * TSO header is greater than 80 bytes.
4825  */
4826 static int tg3_tso_bug(struct tg3 *tp, struct sk_buff *skb)
4827 {
4828         struct sk_buff *segs, *nskb;
4829
4830         /* Estimate the number of fragments in the worst case */
4831         if (unlikely(tg3_tx_avail(tp) <= (skb_shinfo(skb)->gso_segs * 3))) {
4832                 netif_stop_queue(tp->dev);
4833                 if (tg3_tx_avail(tp) <= (skb_shinfo(skb)->gso_segs * 3))
4834                         return NETDEV_TX_BUSY;
4835
4836                 netif_wake_queue(tp->dev);
4837         }
4838
4839         segs = skb_gso_segment(skb, tp->dev->features & ~NETIF_F_TSO);
4840         if (IS_ERR(segs))
4841                 goto tg3_tso_bug_end;
4842
4843         do {
4844                 nskb = segs;
4845                 segs = segs->next;
4846                 nskb->next = NULL;
4847                 tg3_start_xmit_dma_bug(nskb, tp->dev);
4848         } while (segs);
4849
4850 tg3_tso_bug_end:
4851         dev_kfree_skb(skb);
4852
4853         return NETDEV_TX_OK;
4854 }
4855
4856 /* hard_start_xmit for devices that have the 4G bug and/or 40-bit bug and
4857  * support TG3_FLG2_HW_TSO_1 or firmware TSO only.
4858  */
4859 static int tg3_start_xmit_dma_bug(struct sk_buff *skb, struct net_device *dev)
4860 {
4861         struct tg3 *tp = netdev_priv(dev);
4862         dma_addr_t mapping;
4863         u32 len, entry, base_flags, mss;
4864         int would_hit_hwbug;
4865
4866         len = skb_headlen(skb);
4867
4868         /* We are running in BH disabled context with netif_tx_lock
4869          * and TX reclaim runs via tp->napi.poll inside of a software
4870          * interrupt.  Furthermore, IRQ processing runs lockless so we have
4871          * no IRQ context deadlocks to worry about either.  Rejoice!
4872          */
4873         if (unlikely(tg3_tx_avail(tp) <= (skb_shinfo(skb)->nr_frags + 1))) {
4874                 if (!netif_queue_stopped(dev)) {
4875                         netif_stop_queue(dev);
4876
4877                         /* This is a hard error, log it. */
4878                         printk(KERN_ERR PFX "%s: BUG! Tx Ring full when "
4879                                "queue awake!\n", dev->name);
4880                 }
4881                 return NETDEV_TX_BUSY;
4882         }
4883
4884         entry = tp->tx_prod;
4885         base_flags = 0;
4886         if (skb->ip_summed == CHECKSUM_PARTIAL)
4887                 base_flags |= TXD_FLAG_TCPUDP_CSUM;
4888         mss = 0;
4889         if ((mss = skb_shinfo(skb)->gso_size) != 0) {
4890                 struct iphdr *iph;
4891                 int tcp_opt_len, ip_tcp_len, hdr_len;
4892
4893                 if (skb_header_cloned(skb) &&
4894                     pskb_expand_head(skb, 0, 0, GFP_ATOMIC)) {
4895                         dev_kfree_skb(skb);
4896                         goto out_unlock;
4897                 }
4898
4899                 tcp_opt_len = tcp_optlen(skb);
4900                 ip_tcp_len = ip_hdrlen(skb) + sizeof(struct tcphdr);
4901
4902                 hdr_len = ip_tcp_len + tcp_opt_len;
4903                 if (unlikely((ETH_HLEN + hdr_len) > 80) &&
4904                              (tp->tg3_flags2 & TG3_FLG2_TSO_BUG))
4905                         return (tg3_tso_bug(tp, skb));
4906
4907                 base_flags |= (TXD_FLAG_CPU_PRE_DMA |
4908                                TXD_FLAG_CPU_POST_DMA);
4909
4910                 iph = ip_hdr(skb);
4911                 iph->check = 0;
4912                 iph->tot_len = htons(mss + hdr_len);
4913                 if (tp->tg3_flags2 & TG3_FLG2_HW_TSO) {
4914                         tcp_hdr(skb)->check = 0;
4915                         base_flags &= ~TXD_FLAG_TCPUDP_CSUM;
4916                 } else
4917                         tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr,
4918                                                                  iph->daddr, 0,
4919                                                                  IPPROTO_TCP,
4920                                                                  0);
4921
4922                 if ((tp->tg3_flags2 & TG3_FLG2_HW_TSO) ||
4923                     (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705)) {
4924                         if (tcp_opt_len || iph->ihl > 5) {
4925                                 int tsflags;
4926
4927                                 tsflags = (iph->ihl - 5) + (tcp_opt_len >> 2);
4928                                 mss |= (tsflags << 11);
4929                         }
4930                 } else {
4931                         if (tcp_opt_len || iph->ihl > 5) {
4932                                 int tsflags;
4933
4934                                 tsflags = (iph->ihl - 5) + (tcp_opt_len >> 2);
4935                                 base_flags |= tsflags << 12;
4936                         }
4937                 }
4938         }
4939 #if TG3_VLAN_TAG_USED
4940         if (tp->vlgrp != NULL && vlan_tx_tag_present(skb))
4941                 base_flags |= (TXD_FLAG_VLAN |
4942                                (vlan_tx_tag_get(skb) << 16));
4943 #endif
4944
4945         /* Queue skb data, a.k.a. the main skb fragment. */
4946         mapping = pci_map_single(tp->pdev, skb->data, len, PCI_DMA_TODEVICE);
4947
4948         tp->tx_buffers[entry].skb = skb;
4949         pci_unmap_addr_set(&tp->tx_buffers[entry], mapping, mapping);
4950
4951         would_hit_hwbug = 0;
4952
4953         if (tp->tg3_flags3 & TG3_FLG3_5701_DMA_BUG)
4954                 would_hit_hwbug = 1;
4955         else if (tg3_4g_overflow_test(mapping, len))
4956                 would_hit_hwbug = 1;
4957
4958         tg3_set_txd(tp, entry, mapping, len, base_flags,
4959                     (skb_shinfo(skb)->nr_frags == 0) | (mss << 1));
4960
4961         entry = NEXT_TX(entry);
4962
4963         /* Now loop through additional data fragments, and queue them. */
4964         if (skb_shinfo(skb)->nr_frags > 0) {
4965                 unsigned int i, last;
4966
4967                 last = skb_shinfo(skb)->nr_frags - 1;
4968                 for (i = 0; i <= last; i++) {
4969                         skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
4970
4971                         len = frag->size;
4972                         mapping = pci_map_page(tp->pdev,
4973                                                frag->page,
4974                                                frag->page_offset,
4975                                                len, PCI_DMA_TODEVICE);
4976
4977                         tp->tx_buffers[entry].skb = NULL;
4978                         pci_unmap_addr_set(&tp->tx_buffers[entry], mapping, mapping);
4979
4980                         if (tg3_4g_overflow_test(mapping, len))
4981                                 would_hit_hwbug = 1;
4982
4983                         if (tg3_40bit_overflow_test(tp, mapping, len))
4984                                 would_hit_hwbug = 1;
4985
4986                         if (tp->tg3_flags2 & TG3_FLG2_HW_TSO)
4987                                 tg3_set_txd(tp, entry, mapping, len,
4988                                             base_flags, (i == last)|(mss << 1));
4989                         else
4990                                 tg3_set_txd(tp, entry, mapping, len,
4991                                             base_flags, (i == last));
4992
4993                         entry = NEXT_TX(entry);
4994                 }
4995         }
4996
4997         if (would_hit_hwbug) {
4998                 u32 last_plus_one = entry;
4999                 u32 start;
5000
5001                 start = entry - 1 - skb_shinfo(skb)->nr_frags;
5002                 start &= (TG3_TX_RING_SIZE - 1);
5003
5004                 /* If the workaround fails due to memory/mapping
5005                  * failure, silently drop this packet.
5006                  */
5007                 if (tigon3_dma_hwbug_workaround(tp, skb, last_plus_one,
5008                                                 &start, base_flags, mss))
5009                         goto out_unlock;
5010
5011                 entry = start;
5012         }
5013
5014         /* Packets are ready, update Tx producer idx local and on card. */
5015         tw32_tx_mbox((MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW), entry);
5016
5017         tp->tx_prod = entry;
5018         if (unlikely(tg3_tx_avail(tp) <= (MAX_SKB_FRAGS + 1))) {
5019                 netif_stop_queue(dev);
5020                 if (tg3_tx_avail(tp) > TG3_TX_WAKEUP_THRESH(tp))
5021                         netif_wake_queue(tp->dev);
5022         }
5023
5024 out_unlock:
5025         mmiowb();
5026
5027         dev->trans_start = jiffies;
5028
5029         return NETDEV_TX_OK;
5030 }
5031
5032 static inline void tg3_set_mtu(struct net_device *dev, struct tg3 *tp,
5033                                int new_mtu)
5034 {
5035         dev->mtu = new_mtu;
5036
5037         if (new_mtu > ETH_DATA_LEN) {
5038                 if (tp->tg3_flags2 & TG3_FLG2_5780_CLASS) {
5039                         tp->tg3_flags2 &= ~TG3_FLG2_TSO_CAPABLE;
5040                         ethtool_op_set_tso(dev, 0);
5041                 }
5042                 else
5043                         tp->tg3_flags |= TG3_FLAG_JUMBO_RING_ENABLE;
5044         } else {
5045                 if (tp->tg3_flags2 & TG3_FLG2_5780_CLASS)
5046                         tp->tg3_flags2 |= TG3_FLG2_TSO_CAPABLE;
5047                 tp->tg3_flags &= ~TG3_FLAG_JUMBO_RING_ENABLE;
5048         }
5049 }
5050
5051 static int tg3_change_mtu(struct net_device *dev, int new_mtu)
5052 {
5053         struct tg3 *tp = netdev_priv(dev);
5054         int err;
5055
5056         if (new_mtu < TG3_MIN_MTU || new_mtu > TG3_MAX_MTU(tp))
5057                 return -EINVAL;
5058
5059         if (!netif_running(dev)) {
5060                 /* We'll just catch it later when the
5061                  * device is up'd.
5062                  */
5063                 tg3_set_mtu(dev, tp, new_mtu);
5064                 return 0;
5065         }
5066
5067         tg3_phy_stop(tp);
5068
5069         tg3_netif_stop(tp);
5070
5071         tg3_full_lock(tp, 1);
5072
5073         tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
5074
5075         tg3_set_mtu(dev, tp, new_mtu);
5076
5077         err = tg3_restart_hw(tp, 0);
5078
5079         if (!err)
5080                 tg3_netif_start(tp);
5081
5082         tg3_full_unlock(tp);
5083
5084         if (!err)
5085                 tg3_phy_start(tp);
5086
5087         return err;
5088 }
5089
5090 /* Free up pending packets in all rx/tx rings.
5091  *
5092  * The chip has been shut down and the driver detached from
5093  * the networking, so no interrupts or new tx packets will
5094  * end up in the driver.  tp->{tx,}lock is not held and we are not
5095  * in an interrupt context and thus may sleep.
5096  */
5097 static void tg3_free_rings(struct tg3 *tp)
5098 {
5099         struct ring_info *rxp;
5100         int i;
5101
5102         for (i = 0; i < TG3_RX_RING_SIZE; i++) {
5103                 rxp = &tp->rx_std_buffers[i];
5104
5105                 if (rxp->skb == NULL)
5106                         continue;
5107                 pci_unmap_single(tp->pdev,
5108                                  pci_unmap_addr(rxp, mapping),
5109                                  tp->rx_pkt_buf_sz - tp->rx_offset,
5110                                  PCI_DMA_FROMDEVICE);
5111                 dev_kfree_skb_any(rxp->skb);
5112                 rxp->skb = NULL;
5113         }
5114
5115         for (i = 0; i < TG3_RX_JUMBO_RING_SIZE; i++) {
5116                 rxp = &tp->rx_jumbo_buffers[i];
5117
5118                 if (rxp->skb == NULL)
5119                         continue;
5120                 pci_unmap_single(tp->pdev,
5121                                  pci_unmap_addr(rxp, mapping),
5122                                  RX_JUMBO_PKT_BUF_SZ - tp->rx_offset,
5123                                  PCI_DMA_FROMDEVICE);
5124                 dev_kfree_skb_any(rxp->skb);
5125                 rxp->skb = NULL;
5126         }
5127
5128         for (i = 0; i < TG3_TX_RING_SIZE; ) {
5129                 struct tx_ring_info *txp;
5130                 struct sk_buff *skb;
5131                 int j;
5132
5133                 txp = &tp->tx_buffers[i];
5134                 skb = txp->skb;
5135
5136                 if (skb == NULL) {
5137                         i++;
5138                         continue;
5139                 }
5140
5141                 pci_unmap_single(tp->pdev,
5142                                  pci_unmap_addr(txp, mapping),
5143                                  skb_headlen(skb),
5144                                  PCI_DMA_TODEVICE);
5145                 txp->skb = NULL;
5146
5147                 i++;
5148
5149                 for (j = 0; j < skb_shinfo(skb)->nr_frags; j++) {
5150                         txp = &tp->tx_buffers[i & (TG3_TX_RING_SIZE - 1)];
5151                         pci_unmap_page(tp->pdev,
5152                                        pci_unmap_addr(txp, mapping),
5153                                        skb_shinfo(skb)->frags[j].size,
5154                                        PCI_DMA_TODEVICE);
5155                         i++;
5156                 }
5157
5158                 dev_kfree_skb_any(skb);
5159         }
5160 }
5161
5162 /* Initialize tx/rx rings for packet processing.
5163  *
5164  * The chip has been shut down and the driver detached from
5165  * the networking, so no interrupts or new tx packets will
5166  * end up in the driver.  tp->{tx,}lock are held and thus
5167  * we may not sleep.
5168  */
5169 static int tg3_init_rings(struct tg3 *tp)
5170 {
5171         u32 i;
5172
5173         /* Free up all the SKBs. */
5174         tg3_free_rings(tp);
5175
5176         /* Zero out all descriptors. */
5177         memset(tp->rx_std, 0, TG3_RX_RING_BYTES);
5178         memset(tp->rx_jumbo, 0, TG3_RX_JUMBO_RING_BYTES);
5179         memset(tp->rx_rcb, 0, TG3_RX_RCB_RING_BYTES(tp));
5180         memset(tp->tx_ring, 0, TG3_TX_RING_BYTES);
5181
5182         tp->rx_pkt_buf_sz = RX_PKT_BUF_SZ;
5183         if ((tp->tg3_flags2 & TG3_FLG2_5780_CLASS) &&
5184             (tp->dev->mtu > ETH_DATA_LEN))
5185                 tp->rx_pkt_buf_sz = RX_JUMBO_PKT_BUF_SZ;
5186
5187         /* Initialize invariants of the rings, we only set this
5188          * stuff once.  This works because the card does not
5189          * write into the rx buffer posting rings.
5190          */
5191         for (i = 0; i < TG3_RX_RING_SIZE; i++) {
5192                 struct tg3_rx_buffer_desc *rxd;
5193
5194                 rxd = &tp->rx_std[i];
5195                 rxd->idx_len = (tp->rx_pkt_buf_sz - tp->rx_offset - 64)
5196                         << RXD_LEN_SHIFT;
5197                 rxd->type_flags = (RXD_FLAG_END << RXD_FLAGS_SHIFT);
5198                 rxd->opaque = (RXD_OPAQUE_RING_STD |
5199                                (i << RXD_OPAQUE_INDEX_SHIFT));
5200         }
5201
5202         if (tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE) {
5203                 for (i = 0; i < TG3_RX_JUMBO_RING_SIZE; i++) {
5204                         struct tg3_rx_buffer_desc *rxd;
5205
5206                         rxd = &tp->rx_jumbo[i];
5207                         rxd->idx_len = (RX_JUMBO_PKT_BUF_SZ - tp->rx_offset - 64)
5208                                 << RXD_LEN_SHIFT;
5209                         rxd->type_flags = (RXD_FLAG_END << RXD_FLAGS_SHIFT) |
5210                                 RXD_FLAG_JUMBO;
5211                         rxd->opaque = (RXD_OPAQUE_RING_JUMBO |
5212                                (i << RXD_OPAQUE_INDEX_SHIFT));
5213                 }
5214         }
5215
5216         /* Now allocate fresh SKBs for each rx ring. */
5217         for (i = 0; i < tp->rx_pending; i++) {
5218                 if (tg3_alloc_rx_skb(tp, RXD_OPAQUE_RING_STD, -1, i) < 0) {
5219                         printk(KERN_WARNING PFX
5220                                "%s: Using a smaller RX standard ring, "
5221                                "only %d out of %d buffers were allocated "
5222                                "successfully.\n",
5223                                tp->dev->name, i, tp->rx_pending);
5224                         if (i == 0)
5225                                 return -ENOMEM;
5226                         tp->rx_pending = i;
5227                         break;
5228                 }
5229         }
5230
5231         if (tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE) {
5232                 for (i = 0; i < tp->rx_jumbo_pending; i++) {
5233                         if (tg3_alloc_rx_skb(tp, RXD_OPAQUE_RING_JUMBO,
5234                                              -1, i) < 0) {
5235                                 printk(KERN_WARNING PFX
5236                                        "%s: Using a smaller RX jumbo ring, "
5237                                        "only %d out of %d buffers were "
5238                                        "allocated successfully.\n",
5239                                        tp->dev->name, i, tp->rx_jumbo_pending);
5240                                 if (i == 0) {
5241                                         tg3_free_rings(tp);
5242                                         return -ENOMEM;
5243                                 }
5244                                 tp->rx_jumbo_pending = i;
5245                                 break;
5246                         }
5247                 }
5248         }
5249         return 0;
5250 }
5251
5252 /*
5253  * Must not be invoked with interrupt sources disabled and
5254  * the hardware shutdown down.
5255  */
5256 static void tg3_free_consistent(struct tg3 *tp)
5257 {
5258         kfree(tp->rx_std_buffers);
5259         tp->rx_std_buffers = NULL;
5260         if (tp->rx_std) {
5261                 pci_free_consistent(tp->pdev, TG3_RX_RING_BYTES,
5262                                     tp->rx_std, tp->rx_std_mapping);
5263                 tp->rx_std = NULL;
5264         }
5265         if (tp->rx_jumbo) {
5266                 pci_free_consistent(tp->pdev, TG3_RX_JUMBO_RING_BYTES,
5267                                     tp->rx_jumbo, tp->rx_jumbo_mapping);
5268                 tp->rx_jumbo = NULL;
5269         }
5270         if (tp->rx_rcb) {
5271                 pci_free_consistent(tp->pdev, TG3_RX_RCB_RING_BYTES(tp),
5272                                     tp->rx_rcb, tp->rx_rcb_mapping);
5273                 tp->rx_rcb = NULL;
5274         }
5275         if (tp->tx_ring) {
5276                 pci_free_consistent(tp->pdev, TG3_TX_RING_BYTES,
5277                         tp->tx_ring, tp->tx_desc_mapping);
5278                 tp->tx_ring = NULL;
5279         }
5280         if (tp->hw_status) {
5281                 pci_free_consistent(tp->pdev, TG3_HW_STATUS_SIZE,
5282                                     tp->hw_status, tp->status_mapping);
5283                 tp->hw_status = NULL;
5284         }
5285         if (tp->hw_stats) {
5286                 pci_free_consistent(tp->pdev, sizeof(struct tg3_hw_stats),
5287                                     tp->hw_stats, tp->stats_mapping);
5288                 tp->hw_stats = NULL;
5289         }
5290 }
5291
5292 /*
5293  * Must not be invoked with interrupt sources disabled and
5294  * the hardware shutdown down.  Can sleep.
5295  */
5296 static int tg3_alloc_consistent(struct tg3 *tp)
5297 {
5298         tp->rx_std_buffers = kzalloc((sizeof(struct ring_info) *
5299                                       (TG3_RX_RING_SIZE +
5300                                        TG3_RX_JUMBO_RING_SIZE)) +
5301                                      (sizeof(struct tx_ring_info) *
5302                                       TG3_TX_RING_SIZE),
5303                                      GFP_KERNEL);
5304         if (!tp->rx_std_buffers)
5305                 return -ENOMEM;
5306
5307         tp->rx_jumbo_buffers = &tp->rx_std_buffers[TG3_RX_RING_SIZE];
5308         tp->tx_buffers = (struct tx_ring_info *)
5309                 &tp->rx_jumbo_buffers[TG3_RX_JUMBO_RING_SIZE];
5310
5311         tp->rx_std = pci_alloc_consistent(tp->pdev, TG3_RX_RING_BYTES,
5312                                           &tp->rx_std_mapping);
5313         if (!tp->rx_std)
5314                 goto err_out;
5315
5316         tp->rx_jumbo = pci_alloc_consistent(tp->pdev, TG3_RX_JUMBO_RING_BYTES,
5317                                             &tp->rx_jumbo_mapping);
5318
5319         if (!tp->rx_jumbo)
5320                 goto err_out;
5321
5322         tp->rx_rcb = pci_alloc_consistent(tp->pdev, TG3_RX_RCB_RING_BYTES(tp),
5323                                           &tp->rx_rcb_mapping);
5324         if (!tp->rx_rcb)
5325                 goto err_out;
5326
5327         tp->tx_ring = pci_alloc_consistent(tp->pdev, TG3_TX_RING_BYTES,
5328                                            &tp->tx_desc_mapping);
5329         if (!tp->tx_ring)
5330                 goto err_out;
5331
5332         tp->hw_status = pci_alloc_consistent(tp->pdev,
5333                                              TG3_HW_STATUS_SIZE,
5334                                              &tp->status_mapping);
5335         if (!tp->hw_status)
5336                 goto err_out;
5337
5338         tp->hw_stats = pci_alloc_consistent(tp->pdev,
5339                                             sizeof(struct tg3_hw_stats),
5340                                             &tp->stats_mapping);
5341         if (!tp->hw_stats)
5342                 goto err_out;
5343
5344         memset(tp->hw_status, 0, TG3_HW_STATUS_SIZE);
5345         memset(tp->hw_stats, 0, sizeof(struct tg3_hw_stats));
5346
5347         return 0;
5348
5349 err_out:
5350         tg3_free_consistent(tp);
5351         return -ENOMEM;
5352 }
5353
5354 #define MAX_WAIT_CNT 1000
5355
5356 /* To stop a block, clear the enable bit and poll till it
5357  * clears.  tp->lock is held.
5358  */
5359 static int tg3_stop_block(struct tg3 *tp, unsigned long ofs, u32 enable_bit, int silent)
5360 {
5361         unsigned int i;
5362         u32 val;
5363
5364         if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
5365                 switch (ofs) {
5366                 case RCVLSC_MODE:
5367                 case DMAC_MODE:
5368                 case MBFREE_MODE:
5369                 case BUFMGR_MODE:
5370                 case MEMARB_MODE:
5371                         /* We can't enable/disable these bits of the
5372                          * 5705/5750, just say success.
5373                          */
5374                         return 0;
5375
5376                 default:
5377                         break;
5378                 }
5379         }
5380
5381         val = tr32(ofs);
5382         val &= ~enable_bit;
5383         tw32_f(ofs, val);
5384
5385         for (i = 0; i < MAX_WAIT_CNT; i++) {
5386                 udelay(100);
5387                 val = tr32(ofs);
5388                 if ((val & enable_bit) == 0)
5389                         break;
5390         }
5391
5392         if (i == MAX_WAIT_CNT && !silent) {
5393                 printk(KERN_ERR PFX "tg3_stop_block timed out, "
5394                        "ofs=%lx enable_bit=%x\n",
5395                        ofs, enable_bit);
5396                 return -ENODEV;
5397         }
5398
5399         return 0;
5400 }
5401
5402 /* tp->lock is held. */
5403 static int tg3_abort_hw(struct tg3 *tp, int silent)
5404 {
5405         int i, err;
5406
5407         tg3_disable_ints(tp);
5408
5409         tp->rx_mode &= ~RX_MODE_ENABLE;
5410         tw32_f(MAC_RX_MODE, tp->rx_mode);
5411         udelay(10);
5412
5413         err  = tg3_stop_block(tp, RCVBDI_MODE, RCVBDI_MODE_ENABLE, silent);
5414         err |= tg3_stop_block(tp, RCVLPC_MODE, RCVLPC_MODE_ENABLE, silent);
5415         err |= tg3_stop_block(tp, RCVLSC_MODE, RCVLSC_MODE_ENABLE, silent);
5416         err |= tg3_stop_block(tp, RCVDBDI_MODE, RCVDBDI_MODE_ENABLE, silent);
5417         err |= tg3_stop_block(tp, RCVDCC_MODE, RCVDCC_MODE_ENABLE, silent);
5418         err |= tg3_stop_block(tp, RCVCC_MODE, RCVCC_MODE_ENABLE, silent);
5419
5420         err |= tg3_stop_block(tp, SNDBDS_MODE, SNDBDS_MODE_ENABLE, silent);
5421         err |= tg3_stop_block(tp, SNDBDI_MODE, SNDBDI_MODE_ENABLE, silent);
5422         err |= tg3_stop_block(tp, SNDDATAI_MODE, SNDDATAI_MODE_ENABLE, silent);
5423         err |= tg3_stop_block(tp, RDMAC_MODE, RDMAC_MODE_ENABLE, silent);
5424         err |= tg3_stop_block(tp, SNDDATAC_MODE, SNDDATAC_MODE_ENABLE, silent);
5425         err |= tg3_stop_block(tp, DMAC_MODE, DMAC_MODE_ENABLE, silent);
5426         err |= tg3_stop_block(tp, SNDBDC_MODE, SNDBDC_MODE_ENABLE, silent);
5427
5428         tp->mac_mode &= ~MAC_MODE_TDE_ENABLE;
5429         tw32_f(MAC_MODE, tp->mac_mode);
5430         udelay(40);
5431
5432         tp->tx_mode &= ~TX_MODE_ENABLE;
5433         tw32_f(MAC_TX_MODE, tp->tx_mode);
5434
5435         for (i = 0; i < MAX_WAIT_CNT; i++) {
5436                 udelay(100);
5437                 if (!(tr32(MAC_TX_MODE) & TX_MODE_ENABLE))
5438                         break;
5439         }
5440         if (i >= MAX_WAIT_CNT) {
5441                 printk(KERN_ERR PFX "tg3_abort_hw timed out for %s, "
5442                        "TX_MODE_ENABLE will not clear MAC_TX_MODE=%08x\n",
5443                        tp->dev->name, tr32(MAC_TX_MODE));
5444                 err |= -ENODEV;
5445         }
5446
5447         err |= tg3_stop_block(tp, HOSTCC_MODE, HOSTCC_MODE_ENABLE, silent);
5448         err |= tg3_stop_block(tp, WDMAC_MODE, WDMAC_MODE_ENABLE, silent);
5449         err |= tg3_stop_block(tp, MBFREE_MODE, MBFREE_MODE_ENABLE, silent);
5450
5451         tw32(FTQ_RESET, 0xffffffff);
5452         tw32(FTQ_RESET, 0x00000000);
5453
5454         err |= tg3_stop_block(tp, BUFMGR_MODE, BUFMGR_MODE_ENABLE, silent);
5455         err |= tg3_stop_block(tp, MEMARB_MODE, MEMARB_MODE_ENABLE, silent);
5456
5457         if (tp->hw_status)
5458                 memset(tp->hw_status, 0, TG3_HW_STATUS_SIZE);
5459         if (tp->hw_stats)
5460                 memset(tp->hw_stats, 0, sizeof(struct tg3_hw_stats));
5461
5462         return err;
5463 }
5464
5465 /* tp->lock is held. */
5466 static int tg3_nvram_lock(struct tg3 *tp)
5467 {
5468         if (tp->tg3_flags & TG3_FLAG_NVRAM) {
5469                 int i;
5470
5471                 if (tp->nvram_lock_cnt == 0) {
5472                         tw32(NVRAM_SWARB, SWARB_REQ_SET1);
5473                         for (i = 0; i < 8000; i++) {
5474                                 if (tr32(NVRAM_SWARB) & SWARB_GNT1)
5475                                         break;
5476                                 udelay(20);
5477                         }
5478                         if (i == 8000) {
5479                                 tw32(NVRAM_SWARB, SWARB_REQ_CLR1);
5480                                 return -ENODEV;
5481                         }
5482                 }
5483                 tp->nvram_lock_cnt++;
5484         }
5485         return 0;
5486 }
5487
5488 /* tp->lock is held. */
5489 static void tg3_nvram_unlock(struct tg3 *tp)
5490 {
5491         if (tp->tg3_flags & TG3_FLAG_NVRAM) {
5492                 if (tp->nvram_lock_cnt > 0)
5493                         tp->nvram_lock_cnt--;
5494                 if (tp->nvram_lock_cnt == 0)
5495                         tw32_f(NVRAM_SWARB, SWARB_REQ_CLR1);
5496         }
5497 }
5498
5499 /* tp->lock is held. */
5500 static void tg3_enable_nvram_access(struct tg3 *tp)
5501 {
5502         if ((tp->tg3_flags2 & TG3_FLG2_5750_PLUS) &&
5503             !(tp->tg3_flags2 & TG3_FLG2_PROTECTED_NVRAM)) {
5504                 u32 nvaccess = tr32(NVRAM_ACCESS);
5505
5506                 tw32(NVRAM_ACCESS, nvaccess | ACCESS_ENABLE);
5507         }
5508 }
5509
5510 /* tp->lock is held. */
5511 static void tg3_disable_nvram_access(struct tg3 *tp)
5512 {
5513         if ((tp->tg3_flags2 & TG3_FLG2_5750_PLUS) &&
5514             !(tp->tg3_flags2 & TG3_FLG2_PROTECTED_NVRAM)) {
5515                 u32 nvaccess = tr32(NVRAM_ACCESS);
5516
5517                 tw32(NVRAM_ACCESS, nvaccess & ~ACCESS_ENABLE);
5518         }
5519 }
5520
5521 static void tg3_ape_send_event(struct tg3 *tp, u32 event)
5522 {
5523         int i;
5524         u32 apedata;
5525
5526         apedata = tg3_ape_read32(tp, TG3_APE_SEG_SIG);
5527         if (apedata != APE_SEG_SIG_MAGIC)
5528                 return;
5529
5530         apedata = tg3_ape_read32(tp, TG3_APE_FW_STATUS);
5531         if (!(apedata & APE_FW_STATUS_READY))
5532                 return;
5533
5534         /* Wait for up to 1 millisecond for APE to service previous event. */
5535         for (i = 0; i < 10; i++) {
5536                 if (tg3_ape_lock(tp, TG3_APE_LOCK_MEM))
5537                         return;
5538
5539                 apedata = tg3_ape_read32(tp, TG3_APE_EVENT_STATUS);
5540
5541                 if (!(apedata & APE_EVENT_STATUS_EVENT_PENDING))
5542                         tg3_ape_write32(tp, TG3_APE_EVENT_STATUS,
5543                                         event | APE_EVENT_STATUS_EVENT_PENDING);
5544
5545                 tg3_ape_unlock(tp, TG3_APE_LOCK_MEM);
5546
5547                 if (!(apedata & APE_EVENT_STATUS_EVENT_PENDING))
5548                         break;
5549
5550                 udelay(100);
5551         }
5552
5553         if (!(apedata & APE_EVENT_STATUS_EVENT_PENDING))
5554                 tg3_ape_write32(tp, TG3_APE_EVENT, APE_EVENT_1);
5555 }
5556
5557 static void tg3_ape_driver_state_change(struct tg3 *tp, int kind)
5558 {
5559         u32 event;
5560         u32 apedata;
5561
5562         if (!(tp->tg3_flags3 & TG3_FLG3_ENABLE_APE))
5563                 return;
5564
5565         switch (kind) {
5566                 case RESET_KIND_INIT:
5567                         tg3_ape_write32(tp, TG3_APE_HOST_SEG_SIG,
5568                                         APE_HOST_SEG_SIG_MAGIC);
5569                         tg3_ape_write32(tp, TG3_APE_HOST_SEG_LEN,
5570                                         APE_HOST_SEG_LEN_MAGIC);
5571                         apedata = tg3_ape_read32(tp, TG3_APE_HOST_INIT_COUNT);
5572                         tg3_ape_write32(tp, TG3_APE_HOST_INIT_COUNT, ++apedata);
5573                         tg3_ape_write32(tp, TG3_APE_HOST_DRIVER_ID,
5574                                         APE_HOST_DRIVER_ID_MAGIC);
5575                         tg3_ape_write32(tp, TG3_APE_HOST_BEHAVIOR,
5576                                         APE_HOST_BEHAV_NO_PHYLOCK);
5577
5578                         event = APE_EVENT_STATUS_STATE_START;
5579                         break;
5580                 case RESET_KIND_SHUTDOWN:
5581                         event = APE_EVENT_STATUS_STATE_UNLOAD;
5582                         break;
5583                 case RESET_KIND_SUSPEND:
5584                         event = APE_EVENT_STATUS_STATE_SUSPEND;
5585                         break;
5586                 default:
5587                         return;
5588         }
5589
5590         event |= APE_EVENT_STATUS_DRIVER_EVNT | APE_EVENT_STATUS_STATE_CHNGE;
5591
5592         tg3_ape_send_event(tp, event);
5593 }
5594
5595 /* tp->lock is held. */
5596 static void tg3_write_sig_pre_reset(struct tg3 *tp, int kind)
5597 {
5598         tg3_write_mem(tp, NIC_SRAM_FIRMWARE_MBOX,
5599                       NIC_SRAM_FIRMWARE_MBOX_MAGIC1);
5600
5601         if (tp->tg3_flags2 & TG3_FLG2_ASF_NEW_HANDSHAKE) {
5602                 switch (kind) {
5603                 case RESET_KIND_INIT:
5604                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
5605                                       DRV_STATE_START);
5606                         break;
5607
5608                 case RESET_KIND_SHUTDOWN:
5609                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
5610                                       DRV_STATE_UNLOAD);
5611                         break;
5612
5613                 case RESET_KIND_SUSPEND:
5614                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
5615                                       DRV_STATE_SUSPEND);
5616                         break;
5617
5618                 default:
5619                         break;
5620                 }
5621         }
5622
5623         if (kind == RESET_KIND_INIT ||
5624             kind == RESET_KIND_SUSPEND)
5625                 tg3_ape_driver_state_change(tp, kind);
5626 }
5627
5628 /* tp->lock is held. */
5629 static void tg3_write_sig_post_reset(struct tg3 *tp, int kind)
5630 {
5631         if (tp->tg3_flags2 & TG3_FLG2_ASF_NEW_HANDSHAKE) {
5632                 switch (kind) {
5633                 case RESET_KIND_INIT:
5634                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
5635                                       DRV_STATE_START_DONE);
5636                         break;
5637
5638                 case RESET_KIND_SHUTDOWN:
5639                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
5640                                       DRV_STATE_UNLOAD_DONE);
5641                         break;
5642
5643                 default:
5644                         break;
5645                 }
5646         }
5647
5648         if (kind == RESET_KIND_SHUTDOWN)
5649                 tg3_ape_driver_state_change(tp, kind);
5650 }
5651
5652 /* tp->lock is held. */
5653 static void tg3_write_sig_legacy(struct tg3 *tp, int kind)
5654 {
5655         if (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) {
5656                 switch (kind) {
5657                 case RESET_KIND_INIT:
5658                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
5659                                       DRV_STATE_START);
5660                         break;
5661
5662                 case RESET_KIND_SHUTDOWN:
5663                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
5664                                       DRV_STATE_UNLOAD);
5665                         break;
5666
5667                 case RESET_KIND_SUSPEND:
5668                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
5669                                       DRV_STATE_SUSPEND);
5670                         break;
5671
5672                 default:
5673                         break;
5674                 }
5675         }
5676 }
5677
5678 static int tg3_poll_fw(struct tg3 *tp)
5679 {
5680         int i;
5681         u32 val;
5682
5683         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
5684                 /* Wait up to 20ms for init done. */
5685                 for (i = 0; i < 200; i++) {
5686                         if (tr32(VCPU_STATUS) & VCPU_STATUS_INIT_DONE)
5687                                 return 0;
5688                         udelay(100);
5689                 }
5690                 return -ENODEV;
5691         }
5692
5693         /* Wait for firmware initialization to complete. */
5694         for (i = 0; i < 100000; i++) {
5695                 tg3_read_mem(tp, NIC_SRAM_FIRMWARE_MBOX, &val);
5696                 if (val == ~NIC_SRAM_FIRMWARE_MBOX_MAGIC1)
5697                         break;
5698                 udelay(10);
5699         }
5700
5701         /* Chip might not be fitted with firmware.  Some Sun onboard
5702          * parts are configured like that.  So don't signal the timeout
5703          * of the above loop as an error, but do report the lack of
5704          * running firmware once.
5705          */
5706         if (i >= 100000 &&
5707             !(tp->tg3_flags2 & TG3_FLG2_NO_FWARE_REPORTED)) {
5708                 tp->tg3_flags2 |= TG3_FLG2_NO_FWARE_REPORTED;
5709
5710                 printk(KERN_INFO PFX "%s: No firmware running.\n",
5711                        tp->dev->name);
5712         }
5713
5714         return 0;
5715 }
5716
5717 /* Save PCI command register before chip reset */
5718 static void tg3_save_pci_state(struct tg3 *tp)
5719 {
5720         pci_read_config_word(tp->pdev, PCI_COMMAND, &tp->pci_cmd);
5721 }
5722
5723 /* Restore PCI state after chip reset */
5724 static void tg3_restore_pci_state(struct tg3 *tp)
5725 {
5726         u32 val;
5727
5728         /* Re-enable indirect register accesses. */
5729         pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
5730                                tp->misc_host_ctrl);
5731
5732         /* Set MAX PCI retry to zero. */
5733         val = (PCISTATE_ROM_ENABLE | PCISTATE_ROM_RETRY_ENABLE);
5734         if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0 &&
5735             (tp->tg3_flags & TG3_FLAG_PCIX_MODE))
5736                 val |= PCISTATE_RETRY_SAME_DMA;
5737         /* Allow reads and writes to the APE register and memory space. */
5738         if (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE)
5739                 val |= PCISTATE_ALLOW_APE_CTLSPC_WR |
5740                        PCISTATE_ALLOW_APE_SHMEM_WR;
5741         pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE, val);
5742
5743         pci_write_config_word(tp->pdev, PCI_COMMAND, tp->pci_cmd);
5744
5745         if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS)
5746                 pcie_set_readrq(tp->pdev, 4096);
5747         else {
5748                 pci_write_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE,
5749                                       tp->pci_cacheline_sz);
5750                 pci_write_config_byte(tp->pdev, PCI_LATENCY_TIMER,
5751                                       tp->pci_lat_timer);
5752         }
5753
5754         /* Make sure PCI-X relaxed ordering bit is clear. */
5755         if (tp->pcix_cap) {
5756                 u16 pcix_cmd;
5757
5758                 pci_read_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,