[TG3]: Prescaler fix
[linux-2.6.git] / drivers / net / tg3.c
1 /*
2  * tg3.c: Broadcom Tigon3 ethernet driver.
3  *
4  * Copyright (C) 2001, 2002, 2003, 2004 David S. Miller (davem@redhat.com)
5  * Copyright (C) 2001, 2002, 2003 Jeff Garzik (jgarzik@pobox.com)
6  * Copyright (C) 2004 Sun Microsystems Inc.
7  * Copyright (C) 2005-2007 Broadcom Corporation.
8  *
9  * Firmware is:
10  *      Derived from proprietary unpublished source code,
11  *      Copyright (C) 2000-2003 Broadcom Corporation.
12  *
13  *      Permission is hereby granted for the distribution of this firmware
14  *      data in hexadecimal or equivalent format, provided this copyright
15  *      notice is accompanying it.
16  */
17
18
19 #include <linux/module.h>
20 #include <linux/moduleparam.h>
21 #include <linux/kernel.h>
22 #include <linux/types.h>
23 #include <linux/compiler.h>
24 #include <linux/slab.h>
25 #include <linux/delay.h>
26 #include <linux/in.h>
27 #include <linux/init.h>
28 #include <linux/ioport.h>
29 #include <linux/pci.h>
30 #include <linux/netdevice.h>
31 #include <linux/etherdevice.h>
32 #include <linux/skbuff.h>
33 #include <linux/ethtool.h>
34 #include <linux/mii.h>
35 #include <linux/if_vlan.h>
36 #include <linux/ip.h>
37 #include <linux/tcp.h>
38 #include <linux/workqueue.h>
39 #include <linux/prefetch.h>
40 #include <linux/dma-mapping.h>
41
42 #include <net/checksum.h>
43 #include <net/ip.h>
44
45 #include <asm/system.h>
46 #include <asm/io.h>
47 #include <asm/byteorder.h>
48 #include <asm/uaccess.h>
49
50 #ifdef CONFIG_SPARC
51 #include <asm/idprom.h>
52 #include <asm/prom.h>
53 #endif
54
55 #if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE)
56 #define TG3_VLAN_TAG_USED 1
57 #else
58 #define TG3_VLAN_TAG_USED 0
59 #endif
60
61 #define TG3_TSO_SUPPORT 1
62
63 #include "tg3.h"
64
65 #define DRV_MODULE_NAME         "tg3"
66 #define PFX DRV_MODULE_NAME     ": "
67 #define DRV_MODULE_VERSION      "3.85"
68 #define DRV_MODULE_RELDATE      "October 18, 2007"
69
70 #define TG3_DEF_MAC_MODE        0
71 #define TG3_DEF_RX_MODE         0
72 #define TG3_DEF_TX_MODE         0
73 #define TG3_DEF_MSG_ENABLE        \
74         (NETIF_MSG_DRV          | \
75          NETIF_MSG_PROBE        | \
76          NETIF_MSG_LINK         | \
77          NETIF_MSG_TIMER        | \
78          NETIF_MSG_IFDOWN       | \
79          NETIF_MSG_IFUP         | \
80          NETIF_MSG_RX_ERR       | \
81          NETIF_MSG_TX_ERR)
82
83 /* length of time before we decide the hardware is borked,
84  * and dev->tx_timeout() should be called to fix the problem
85  */
86 #define TG3_TX_TIMEOUT                  (5 * HZ)
87
88 /* hardware minimum and maximum for a single frame's data payload */
89 #define TG3_MIN_MTU                     60
90 #define TG3_MAX_MTU(tp) \
91         ((tp->tg3_flags2 & TG3_FLG2_JUMBO_CAPABLE) ? 9000 : 1500)
92
93 /* These numbers seem to be hard coded in the NIC firmware somehow.
94  * You can't change the ring sizes, but you can change where you place
95  * them in the NIC onboard memory.
96  */
97 #define TG3_RX_RING_SIZE                512
98 #define TG3_DEF_RX_RING_PENDING         200
99 #define TG3_RX_JUMBO_RING_SIZE          256
100 #define TG3_DEF_RX_JUMBO_RING_PENDING   100
101
102 /* Do not place this n-ring entries value into the tp struct itself,
103  * we really want to expose these constants to GCC so that modulo et
104  * al.  operations are done with shifts and masks instead of with
105  * hw multiply/modulo instructions.  Another solution would be to
106  * replace things like '% foo' with '& (foo - 1)'.
107  */
108 #define TG3_RX_RCB_RING_SIZE(tp)        \
109         ((tp->tg3_flags2 & TG3_FLG2_5705_PLUS) ?  512 : 1024)
110
111 #define TG3_TX_RING_SIZE                512
112 #define TG3_DEF_TX_RING_PENDING         (TG3_TX_RING_SIZE - 1)
113
114 #define TG3_RX_RING_BYTES       (sizeof(struct tg3_rx_buffer_desc) * \
115                                  TG3_RX_RING_SIZE)
116 #define TG3_RX_JUMBO_RING_BYTES (sizeof(struct tg3_rx_buffer_desc) * \
117                                  TG3_RX_JUMBO_RING_SIZE)
118 #define TG3_RX_RCB_RING_BYTES(tp) (sizeof(struct tg3_rx_buffer_desc) * \
119                                    TG3_RX_RCB_RING_SIZE(tp))
120 #define TG3_TX_RING_BYTES       (sizeof(struct tg3_tx_buffer_desc) * \
121                                  TG3_TX_RING_SIZE)
122 #define NEXT_TX(N)              (((N) + 1) & (TG3_TX_RING_SIZE - 1))
123
124 #define RX_PKT_BUF_SZ           (1536 + tp->rx_offset + 64)
125 #define RX_JUMBO_PKT_BUF_SZ     (9046 + tp->rx_offset + 64)
126
127 /* minimum number of free TX descriptors required to wake up TX process */
128 #define TG3_TX_WAKEUP_THRESH(tp)                ((tp)->tx_pending / 4)
129
130 /* number of ETHTOOL_GSTATS u64's */
131 #define TG3_NUM_STATS           (sizeof(struct tg3_ethtool_stats)/sizeof(u64))
132
133 #define TG3_NUM_TEST            6
134
135 static char version[] __devinitdata =
136         DRV_MODULE_NAME ".c:v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
137
138 MODULE_AUTHOR("David S. Miller (davem@redhat.com) and Jeff Garzik (jgarzik@pobox.com)");
139 MODULE_DESCRIPTION("Broadcom Tigon3 ethernet driver");
140 MODULE_LICENSE("GPL");
141 MODULE_VERSION(DRV_MODULE_VERSION);
142
143 static int tg3_debug = -1;      /* -1 == use TG3_DEF_MSG_ENABLE as value */
144 module_param(tg3_debug, int, 0);
145 MODULE_PARM_DESC(tg3_debug, "Tigon3 bitmapped debugging message enable value");
146
147 static struct pci_device_id tg3_pci_tbl[] = {
148         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5700)},
149         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5701)},
150         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702)},
151         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703)},
152         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704)},
153         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702FE)},
154         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705)},
155         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705_2)},
156         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705M)},
157         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705M_2)},
158         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702X)},
159         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703X)},
160         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704S)},
161         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702A3)},
162         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703A3)},
163         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5782)},
164         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5788)},
165         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5789)},
166         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5901)},
167         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5901_2)},
168         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704S_2)},
169         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705F)},
170         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5720)},
171         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5721)},
172         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5722)},
173         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5750)},
174         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751)},
175         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5750M)},
176         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751M)},
177         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751F)},
178         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5752)},
179         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5752M)},
180         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753)},
181         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753M)},
182         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753F)},
183         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5754)},
184         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5754M)},
185         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5755)},
186         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5755M)},
187         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5756)},
188         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5786)},
189         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787)},
190         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787M)},
191         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787F)},
192         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5714)},
193         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5714S)},
194         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5715)},
195         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5715S)},
196         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5780)},
197         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5780S)},
198         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5781)},
199         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5906)},
200         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5906M)},
201         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5784)},
202         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5764)},
203         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5723)},
204         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5761)},
205         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5761E)},
206         {PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9DXX)},
207         {PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9MXX)},
208         {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1000)},
209         {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1001)},
210         {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1003)},
211         {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC9100)},
212         {PCI_DEVICE(PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_TIGON3)},
213         {}
214 };
215
216 MODULE_DEVICE_TABLE(pci, tg3_pci_tbl);
217
218 static const struct {
219         const char string[ETH_GSTRING_LEN];
220 } ethtool_stats_keys[TG3_NUM_STATS] = {
221         { "rx_octets" },
222         { "rx_fragments" },
223         { "rx_ucast_packets" },
224         { "rx_mcast_packets" },
225         { "rx_bcast_packets" },
226         { "rx_fcs_errors" },
227         { "rx_align_errors" },
228         { "rx_xon_pause_rcvd" },
229         { "rx_xoff_pause_rcvd" },
230         { "rx_mac_ctrl_rcvd" },
231         { "rx_xoff_entered" },
232         { "rx_frame_too_long_errors" },
233         { "rx_jabbers" },
234         { "rx_undersize_packets" },
235         { "rx_in_length_errors" },
236         { "rx_out_length_errors" },
237         { "rx_64_or_less_octet_packets" },
238         { "rx_65_to_127_octet_packets" },
239         { "rx_128_to_255_octet_packets" },
240         { "rx_256_to_511_octet_packets" },
241         { "rx_512_to_1023_octet_packets" },
242         { "rx_1024_to_1522_octet_packets" },
243         { "rx_1523_to_2047_octet_packets" },
244         { "rx_2048_to_4095_octet_packets" },
245         { "rx_4096_to_8191_octet_packets" },
246         { "rx_8192_to_9022_octet_packets" },
247
248         { "tx_octets" },
249         { "tx_collisions" },
250
251         { "tx_xon_sent" },
252         { "tx_xoff_sent" },
253         { "tx_flow_control" },
254         { "tx_mac_errors" },
255         { "tx_single_collisions" },
256         { "tx_mult_collisions" },
257         { "tx_deferred" },
258         { "tx_excessive_collisions" },
259         { "tx_late_collisions" },
260         { "tx_collide_2times" },
261         { "tx_collide_3times" },
262         { "tx_collide_4times" },
263         { "tx_collide_5times" },
264         { "tx_collide_6times" },
265         { "tx_collide_7times" },
266         { "tx_collide_8times" },
267         { "tx_collide_9times" },
268         { "tx_collide_10times" },
269         { "tx_collide_11times" },
270         { "tx_collide_12times" },
271         { "tx_collide_13times" },
272         { "tx_collide_14times" },
273         { "tx_collide_15times" },
274         { "tx_ucast_packets" },
275         { "tx_mcast_packets" },
276         { "tx_bcast_packets" },
277         { "tx_carrier_sense_errors" },
278         { "tx_discards" },
279         { "tx_errors" },
280
281         { "dma_writeq_full" },
282         { "dma_write_prioq_full" },
283         { "rxbds_empty" },
284         { "rx_discards" },
285         { "rx_errors" },
286         { "rx_threshold_hit" },
287
288         { "dma_readq_full" },
289         { "dma_read_prioq_full" },
290         { "tx_comp_queue_full" },
291
292         { "ring_set_send_prod_index" },
293         { "ring_status_update" },
294         { "nic_irqs" },
295         { "nic_avoided_irqs" },
296         { "nic_tx_threshold_hit" }
297 };
298
299 static const struct {
300         const char string[ETH_GSTRING_LEN];
301 } ethtool_test_keys[TG3_NUM_TEST] = {
302         { "nvram test     (online) " },
303         { "link test      (online) " },
304         { "register test  (offline)" },
305         { "memory test    (offline)" },
306         { "loopback test  (offline)" },
307         { "interrupt test (offline)" },
308 };
309
310 static void tg3_write32(struct tg3 *tp, u32 off, u32 val)
311 {
312         writel(val, tp->regs + off);
313 }
314
315 static u32 tg3_read32(struct tg3 *tp, u32 off)
316 {
317         return (readl(tp->regs + off));
318 }
319
320 static void tg3_ape_write32(struct tg3 *tp, u32 off, u32 val)
321 {
322         writel(val, tp->aperegs + off);
323 }
324
325 static u32 tg3_ape_read32(struct tg3 *tp, u32 off)
326 {
327         return (readl(tp->aperegs + off));
328 }
329
330 static void tg3_write_indirect_reg32(struct tg3 *tp, u32 off, u32 val)
331 {
332         unsigned long flags;
333
334         spin_lock_irqsave(&tp->indirect_lock, flags);
335         pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off);
336         pci_write_config_dword(tp->pdev, TG3PCI_REG_DATA, val);
337         spin_unlock_irqrestore(&tp->indirect_lock, flags);
338 }
339
340 static void tg3_write_flush_reg32(struct tg3 *tp, u32 off, u32 val)
341 {
342         writel(val, tp->regs + off);
343         readl(tp->regs + off);
344 }
345
346 static u32 tg3_read_indirect_reg32(struct tg3 *tp, u32 off)
347 {
348         unsigned long flags;
349         u32 val;
350
351         spin_lock_irqsave(&tp->indirect_lock, flags);
352         pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off);
353         pci_read_config_dword(tp->pdev, TG3PCI_REG_DATA, &val);
354         spin_unlock_irqrestore(&tp->indirect_lock, flags);
355         return val;
356 }
357
358 static void tg3_write_indirect_mbox(struct tg3 *tp, u32 off, u32 val)
359 {
360         unsigned long flags;
361
362         if (off == (MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW)) {
363                 pci_write_config_dword(tp->pdev, TG3PCI_RCV_RET_RING_CON_IDX +
364                                        TG3_64BIT_REG_LOW, val);
365                 return;
366         }
367         if (off == (MAILBOX_RCV_STD_PROD_IDX + TG3_64BIT_REG_LOW)) {
368                 pci_write_config_dword(tp->pdev, TG3PCI_STD_RING_PROD_IDX +
369                                        TG3_64BIT_REG_LOW, val);
370                 return;
371         }
372
373         spin_lock_irqsave(&tp->indirect_lock, flags);
374         pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off + 0x5600);
375         pci_write_config_dword(tp->pdev, TG3PCI_REG_DATA, val);
376         spin_unlock_irqrestore(&tp->indirect_lock, flags);
377
378         /* In indirect mode when disabling interrupts, we also need
379          * to clear the interrupt bit in the GRC local ctrl register.
380          */
381         if ((off == (MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW)) &&
382             (val == 0x1)) {
383                 pci_write_config_dword(tp->pdev, TG3PCI_MISC_LOCAL_CTRL,
384                                        tp->grc_local_ctrl|GRC_LCLCTRL_CLEARINT);
385         }
386 }
387
388 static u32 tg3_read_indirect_mbox(struct tg3 *tp, u32 off)
389 {
390         unsigned long flags;
391         u32 val;
392
393         spin_lock_irqsave(&tp->indirect_lock, flags);
394         pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off + 0x5600);
395         pci_read_config_dword(tp->pdev, TG3PCI_REG_DATA, &val);
396         spin_unlock_irqrestore(&tp->indirect_lock, flags);
397         return val;
398 }
399
400 /* usec_wait specifies the wait time in usec when writing to certain registers
401  * where it is unsafe to read back the register without some delay.
402  * GRC_LOCAL_CTRL is one example if the GPIOs are toggled to switch power.
403  * TG3PCI_CLOCK_CTRL is another example if the clock frequencies are changed.
404  */
405 static void _tw32_flush(struct tg3 *tp, u32 off, u32 val, u32 usec_wait)
406 {
407         if ((tp->tg3_flags & TG3_FLAG_PCIX_TARGET_HWBUG) ||
408             (tp->tg3_flags2 & TG3_FLG2_ICH_WORKAROUND))
409                 /* Non-posted methods */
410                 tp->write32(tp, off, val);
411         else {
412                 /* Posted method */
413                 tg3_write32(tp, off, val);
414                 if (usec_wait)
415                         udelay(usec_wait);
416                 tp->read32(tp, off);
417         }
418         /* Wait again after the read for the posted method to guarantee that
419          * the wait time is met.
420          */
421         if (usec_wait)
422                 udelay(usec_wait);
423 }
424
425 static inline void tw32_mailbox_flush(struct tg3 *tp, u32 off, u32 val)
426 {
427         tp->write32_mbox(tp, off, val);
428         if (!(tp->tg3_flags & TG3_FLAG_MBOX_WRITE_REORDER) &&
429             !(tp->tg3_flags2 & TG3_FLG2_ICH_WORKAROUND))
430                 tp->read32_mbox(tp, off);
431 }
432
433 static void tg3_write32_tx_mbox(struct tg3 *tp, u32 off, u32 val)
434 {
435         void __iomem *mbox = tp->regs + off;
436         writel(val, mbox);
437         if (tp->tg3_flags & TG3_FLAG_TXD_MBOX_HWBUG)
438                 writel(val, mbox);
439         if (tp->tg3_flags & TG3_FLAG_MBOX_WRITE_REORDER)
440                 readl(mbox);
441 }
442
443 static u32 tg3_read32_mbox_5906(struct tg3 *tp, u32 off)
444 {
445         return (readl(tp->regs + off + GRCMBOX_BASE));
446 }
447
448 static void tg3_write32_mbox_5906(struct tg3 *tp, u32 off, u32 val)
449 {
450         writel(val, tp->regs + off + GRCMBOX_BASE);
451 }
452
453 #define tw32_mailbox(reg, val)  tp->write32_mbox(tp, reg, val)
454 #define tw32_mailbox_f(reg, val)        tw32_mailbox_flush(tp, (reg), (val))
455 #define tw32_rx_mbox(reg, val)  tp->write32_rx_mbox(tp, reg, val)
456 #define tw32_tx_mbox(reg, val)  tp->write32_tx_mbox(tp, reg, val)
457 #define tr32_mailbox(reg)       tp->read32_mbox(tp, reg)
458
459 #define tw32(reg,val)           tp->write32(tp, reg, val)
460 #define tw32_f(reg,val)         _tw32_flush(tp,(reg),(val), 0)
461 #define tw32_wait_f(reg,val,us) _tw32_flush(tp,(reg),(val), (us))
462 #define tr32(reg)               tp->read32(tp, reg)
463
464 static void tg3_write_mem(struct tg3 *tp, u32 off, u32 val)
465 {
466         unsigned long flags;
467
468         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) &&
469             (off >= NIC_SRAM_STATS_BLK) && (off < NIC_SRAM_TX_BUFFER_DESC))
470                 return;
471
472         spin_lock_irqsave(&tp->indirect_lock, flags);
473         if (tp->tg3_flags & TG3_FLAG_SRAM_USE_CONFIG) {
474                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off);
475                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
476
477                 /* Always leave this as zero. */
478                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
479         } else {
480                 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, off);
481                 tw32_f(TG3PCI_MEM_WIN_DATA, val);
482
483                 /* Always leave this as zero. */
484                 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, 0);
485         }
486         spin_unlock_irqrestore(&tp->indirect_lock, flags);
487 }
488
489 static void tg3_read_mem(struct tg3 *tp, u32 off, u32 *val)
490 {
491         unsigned long flags;
492
493         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) &&
494             (off >= NIC_SRAM_STATS_BLK) && (off < NIC_SRAM_TX_BUFFER_DESC)) {
495                 *val = 0;
496                 return;
497         }
498
499         spin_lock_irqsave(&tp->indirect_lock, flags);
500         if (tp->tg3_flags & TG3_FLAG_SRAM_USE_CONFIG) {
501                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off);
502                 pci_read_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
503
504                 /* Always leave this as zero. */
505                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
506         } else {
507                 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, off);
508                 *val = tr32(TG3PCI_MEM_WIN_DATA);
509
510                 /* Always leave this as zero. */
511                 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, 0);
512         }
513         spin_unlock_irqrestore(&tp->indirect_lock, flags);
514 }
515
516 static void tg3_ape_lock_init(struct tg3 *tp)
517 {
518         int i;
519
520         /* Make sure the driver hasn't any stale locks. */
521         for (i = 0; i < 8; i++)
522                 tg3_ape_write32(tp, TG3_APE_LOCK_GRANT + 4 * i,
523                                 APE_LOCK_GRANT_DRIVER);
524 }
525
526 static int tg3_ape_lock(struct tg3 *tp, int locknum)
527 {
528         int i, off;
529         int ret = 0;
530         u32 status;
531
532         if (!(tp->tg3_flags3 & TG3_FLG3_ENABLE_APE))
533                 return 0;
534
535         switch (locknum) {
536                 case TG3_APE_LOCK_MEM:
537                         break;
538                 default:
539                         return -EINVAL;
540         }
541
542         off = 4 * locknum;
543
544         tg3_ape_write32(tp, TG3_APE_LOCK_REQ + off, APE_LOCK_REQ_DRIVER);
545
546         /* Wait for up to 1 millisecond to acquire lock. */
547         for (i = 0; i < 100; i++) {
548                 status = tg3_ape_read32(tp, TG3_APE_LOCK_GRANT + off);
549                 if (status == APE_LOCK_GRANT_DRIVER)
550                         break;
551                 udelay(10);
552         }
553
554         if (status != APE_LOCK_GRANT_DRIVER) {
555                 /* Revoke the lock request. */
556                 tg3_ape_write32(tp, TG3_APE_LOCK_GRANT + off,
557                                 APE_LOCK_GRANT_DRIVER);
558
559                 ret = -EBUSY;
560         }
561
562         return ret;
563 }
564
565 static void tg3_ape_unlock(struct tg3 *tp, int locknum)
566 {
567         int off;
568
569         if (!(tp->tg3_flags3 & TG3_FLG3_ENABLE_APE))
570                 return;
571
572         switch (locknum) {
573                 case TG3_APE_LOCK_MEM:
574                         break;
575                 default:
576                         return;
577         }
578
579         off = 4 * locknum;
580         tg3_ape_write32(tp, TG3_APE_LOCK_GRANT + off, APE_LOCK_GRANT_DRIVER);
581 }
582
583 static void tg3_disable_ints(struct tg3 *tp)
584 {
585         tw32(TG3PCI_MISC_HOST_CTRL,
586              (tp->misc_host_ctrl | MISC_HOST_CTRL_MASK_PCI_INT));
587         tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
588 }
589
590 static inline void tg3_cond_int(struct tg3 *tp)
591 {
592         if (!(tp->tg3_flags & TG3_FLAG_TAGGED_STATUS) &&
593             (tp->hw_status->status & SD_STATUS_UPDATED))
594                 tw32(GRC_LOCAL_CTRL, tp->grc_local_ctrl | GRC_LCLCTRL_SETINT);
595         else
596                 tw32(HOSTCC_MODE, tp->coalesce_mode |
597                      (HOSTCC_MODE_ENABLE | HOSTCC_MODE_NOW));
598 }
599
600 static void tg3_enable_ints(struct tg3 *tp)
601 {
602         tp->irq_sync = 0;
603         wmb();
604
605         tw32(TG3PCI_MISC_HOST_CTRL,
606              (tp->misc_host_ctrl & ~MISC_HOST_CTRL_MASK_PCI_INT));
607         tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
608                        (tp->last_tag << 24));
609         if (tp->tg3_flags2 & TG3_FLG2_1SHOT_MSI)
610                 tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
611                                (tp->last_tag << 24));
612         tg3_cond_int(tp);
613 }
614
615 static inline unsigned int tg3_has_work(struct tg3 *tp)
616 {
617         struct tg3_hw_status *sblk = tp->hw_status;
618         unsigned int work_exists = 0;
619
620         /* check for phy events */
621         if (!(tp->tg3_flags &
622               (TG3_FLAG_USE_LINKCHG_REG |
623                TG3_FLAG_POLL_SERDES))) {
624                 if (sblk->status & SD_STATUS_LINK_CHG)
625                         work_exists = 1;
626         }
627         /* check for RX/TX work to do */
628         if (sblk->idx[0].tx_consumer != tp->tx_cons ||
629             sblk->idx[0].rx_producer != tp->rx_rcb_ptr)
630                 work_exists = 1;
631
632         return work_exists;
633 }
634
635 /* tg3_restart_ints
636  *  similar to tg3_enable_ints, but it accurately determines whether there
637  *  is new work pending and can return without flushing the PIO write
638  *  which reenables interrupts
639  */
640 static void tg3_restart_ints(struct tg3 *tp)
641 {
642         tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
643                      tp->last_tag << 24);
644         mmiowb();
645
646         /* When doing tagged status, this work check is unnecessary.
647          * The last_tag we write above tells the chip which piece of
648          * work we've completed.
649          */
650         if (!(tp->tg3_flags & TG3_FLAG_TAGGED_STATUS) &&
651             tg3_has_work(tp))
652                 tw32(HOSTCC_MODE, tp->coalesce_mode |
653                      (HOSTCC_MODE_ENABLE | HOSTCC_MODE_NOW));
654 }
655
656 static inline void tg3_netif_stop(struct tg3 *tp)
657 {
658         tp->dev->trans_start = jiffies; /* prevent tx timeout */
659         napi_disable(&tp->napi);
660         netif_tx_disable(tp->dev);
661 }
662
663 static inline void tg3_netif_start(struct tg3 *tp)
664 {
665         netif_wake_queue(tp->dev);
666         /* NOTE: unconditional netif_wake_queue is only appropriate
667          * so long as all callers are assured to have free tx slots
668          * (such as after tg3_init_hw)
669          */
670         napi_enable(&tp->napi);
671         tp->hw_status->status |= SD_STATUS_UPDATED;
672         tg3_enable_ints(tp);
673 }
674
675 static void tg3_switch_clocks(struct tg3 *tp)
676 {
677         u32 clock_ctrl = tr32(TG3PCI_CLOCK_CTRL);
678         u32 orig_clock_ctrl;
679
680         if ((tp->tg3_flags & TG3_FLAG_CPMU_PRESENT) ||
681             (tp->tg3_flags2 & TG3_FLG2_5780_CLASS))
682                 return;
683
684         orig_clock_ctrl = clock_ctrl;
685         clock_ctrl &= (CLOCK_CTRL_FORCE_CLKRUN |
686                        CLOCK_CTRL_CLKRUN_OENABLE |
687                        0x1f);
688         tp->pci_clock_ctrl = clock_ctrl;
689
690         if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
691                 if (orig_clock_ctrl & CLOCK_CTRL_625_CORE) {
692                         tw32_wait_f(TG3PCI_CLOCK_CTRL,
693                                     clock_ctrl | CLOCK_CTRL_625_CORE, 40);
694                 }
695         } else if ((orig_clock_ctrl & CLOCK_CTRL_44MHZ_CORE) != 0) {
696                 tw32_wait_f(TG3PCI_CLOCK_CTRL,
697                             clock_ctrl |
698                             (CLOCK_CTRL_44MHZ_CORE | CLOCK_CTRL_ALTCLK),
699                             40);
700                 tw32_wait_f(TG3PCI_CLOCK_CTRL,
701                             clock_ctrl | (CLOCK_CTRL_ALTCLK),
702                             40);
703         }
704         tw32_wait_f(TG3PCI_CLOCK_CTRL, clock_ctrl, 40);
705 }
706
707 #define PHY_BUSY_LOOPS  5000
708
709 static int tg3_readphy(struct tg3 *tp, int reg, u32 *val)
710 {
711         u32 frame_val;
712         unsigned int loops;
713         int ret;
714
715         if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
716                 tw32_f(MAC_MI_MODE,
717                      (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
718                 udelay(80);
719         }
720
721         *val = 0x0;
722
723         frame_val  = ((PHY_ADDR << MI_COM_PHY_ADDR_SHIFT) &
724                       MI_COM_PHY_ADDR_MASK);
725         frame_val |= ((reg << MI_COM_REG_ADDR_SHIFT) &
726                       MI_COM_REG_ADDR_MASK);
727         frame_val |= (MI_COM_CMD_READ | MI_COM_START);
728
729         tw32_f(MAC_MI_COM, frame_val);
730
731         loops = PHY_BUSY_LOOPS;
732         while (loops != 0) {
733                 udelay(10);
734                 frame_val = tr32(MAC_MI_COM);
735
736                 if ((frame_val & MI_COM_BUSY) == 0) {
737                         udelay(5);
738                         frame_val = tr32(MAC_MI_COM);
739                         break;
740                 }
741                 loops -= 1;
742         }
743
744         ret = -EBUSY;
745         if (loops != 0) {
746                 *val = frame_val & MI_COM_DATA_MASK;
747                 ret = 0;
748         }
749
750         if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
751                 tw32_f(MAC_MI_MODE, tp->mi_mode);
752                 udelay(80);
753         }
754
755         return ret;
756 }
757
758 static int tg3_writephy(struct tg3 *tp, int reg, u32 val)
759 {
760         u32 frame_val;
761         unsigned int loops;
762         int ret;
763
764         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906 &&
765             (reg == MII_TG3_CTRL || reg == MII_TG3_AUX_CTRL))
766                 return 0;
767
768         if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
769                 tw32_f(MAC_MI_MODE,
770                      (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
771                 udelay(80);
772         }
773
774         frame_val  = ((PHY_ADDR << MI_COM_PHY_ADDR_SHIFT) &
775                       MI_COM_PHY_ADDR_MASK);
776         frame_val |= ((reg << MI_COM_REG_ADDR_SHIFT) &
777                       MI_COM_REG_ADDR_MASK);
778         frame_val |= (val & MI_COM_DATA_MASK);
779         frame_val |= (MI_COM_CMD_WRITE | MI_COM_START);
780
781         tw32_f(MAC_MI_COM, frame_val);
782
783         loops = PHY_BUSY_LOOPS;
784         while (loops != 0) {
785                 udelay(10);
786                 frame_val = tr32(MAC_MI_COM);
787                 if ((frame_val & MI_COM_BUSY) == 0) {
788                         udelay(5);
789                         frame_val = tr32(MAC_MI_COM);
790                         break;
791                 }
792                 loops -= 1;
793         }
794
795         ret = -EBUSY;
796         if (loops != 0)
797                 ret = 0;
798
799         if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
800                 tw32_f(MAC_MI_MODE, tp->mi_mode);
801                 udelay(80);
802         }
803
804         return ret;
805 }
806
807 static void tg3_phy_toggle_automdix(struct tg3 *tp, int enable)
808 {
809         u32 phy;
810
811         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS) ||
812             (tp->tg3_flags2 & TG3_FLG2_ANY_SERDES))
813                 return;
814
815         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
816                 u32 ephy;
817
818                 if (!tg3_readphy(tp, MII_TG3_EPHY_TEST, &ephy)) {
819                         tg3_writephy(tp, MII_TG3_EPHY_TEST,
820                                      ephy | MII_TG3_EPHY_SHADOW_EN);
821                         if (!tg3_readphy(tp, MII_TG3_EPHYTST_MISCCTRL, &phy)) {
822                                 if (enable)
823                                         phy |= MII_TG3_EPHYTST_MISCCTRL_MDIX;
824                                 else
825                                         phy &= ~MII_TG3_EPHYTST_MISCCTRL_MDIX;
826                                 tg3_writephy(tp, MII_TG3_EPHYTST_MISCCTRL, phy);
827                         }
828                         tg3_writephy(tp, MII_TG3_EPHY_TEST, ephy);
829                 }
830         } else {
831                 phy = MII_TG3_AUXCTL_MISC_RDSEL_MISC |
832                       MII_TG3_AUXCTL_SHDWSEL_MISC;
833                 if (!tg3_writephy(tp, MII_TG3_AUX_CTRL, phy) &&
834                     !tg3_readphy(tp, MII_TG3_AUX_CTRL, &phy)) {
835                         if (enable)
836                                 phy |= MII_TG3_AUXCTL_MISC_FORCE_AMDIX;
837                         else
838                                 phy &= ~MII_TG3_AUXCTL_MISC_FORCE_AMDIX;
839                         phy |= MII_TG3_AUXCTL_MISC_WREN;
840                         tg3_writephy(tp, MII_TG3_AUX_CTRL, phy);
841                 }
842         }
843 }
844
845 static void tg3_phy_set_wirespeed(struct tg3 *tp)
846 {
847         u32 val;
848
849         if (tp->tg3_flags2 & TG3_FLG2_NO_ETH_WIRE_SPEED)
850                 return;
851
852         if (!tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x7007) &&
853             !tg3_readphy(tp, MII_TG3_AUX_CTRL, &val))
854                 tg3_writephy(tp, MII_TG3_AUX_CTRL,
855                              (val | (1 << 15) | (1 << 4)));
856 }
857
858 static int tg3_bmcr_reset(struct tg3 *tp)
859 {
860         u32 phy_control;
861         int limit, err;
862
863         /* OK, reset it, and poll the BMCR_RESET bit until it
864          * clears or we time out.
865          */
866         phy_control = BMCR_RESET;
867         err = tg3_writephy(tp, MII_BMCR, phy_control);
868         if (err != 0)
869                 return -EBUSY;
870
871         limit = 5000;
872         while (limit--) {
873                 err = tg3_readphy(tp, MII_BMCR, &phy_control);
874                 if (err != 0)
875                         return -EBUSY;
876
877                 if ((phy_control & BMCR_RESET) == 0) {
878                         udelay(40);
879                         break;
880                 }
881                 udelay(10);
882         }
883         if (limit <= 0)
884                 return -EBUSY;
885
886         return 0;
887 }
888
889 static int tg3_wait_macro_done(struct tg3 *tp)
890 {
891         int limit = 100;
892
893         while (limit--) {
894                 u32 tmp32;
895
896                 if (!tg3_readphy(tp, 0x16, &tmp32)) {
897                         if ((tmp32 & 0x1000) == 0)
898                                 break;
899                 }
900         }
901         if (limit <= 0)
902                 return -EBUSY;
903
904         return 0;
905 }
906
907 static int tg3_phy_write_and_check_testpat(struct tg3 *tp, int *resetp)
908 {
909         static const u32 test_pat[4][6] = {
910         { 0x00005555, 0x00000005, 0x00002aaa, 0x0000000a, 0x00003456, 0x00000003 },
911         { 0x00002aaa, 0x0000000a, 0x00003333, 0x00000003, 0x0000789a, 0x00000005 },
912         { 0x00005a5a, 0x00000005, 0x00002a6a, 0x0000000a, 0x00001bcd, 0x00000003 },
913         { 0x00002a5a, 0x0000000a, 0x000033c3, 0x00000003, 0x00002ef1, 0x00000005 }
914         };
915         int chan;
916
917         for (chan = 0; chan < 4; chan++) {
918                 int i;
919
920                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
921                              (chan * 0x2000) | 0x0200);
922                 tg3_writephy(tp, 0x16, 0x0002);
923
924                 for (i = 0; i < 6; i++)
925                         tg3_writephy(tp, MII_TG3_DSP_RW_PORT,
926                                      test_pat[chan][i]);
927
928                 tg3_writephy(tp, 0x16, 0x0202);
929                 if (tg3_wait_macro_done(tp)) {
930                         *resetp = 1;
931                         return -EBUSY;
932                 }
933
934                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
935                              (chan * 0x2000) | 0x0200);
936                 tg3_writephy(tp, 0x16, 0x0082);
937                 if (tg3_wait_macro_done(tp)) {
938                         *resetp = 1;
939                         return -EBUSY;
940                 }
941
942                 tg3_writephy(tp, 0x16, 0x0802);
943                 if (tg3_wait_macro_done(tp)) {
944                         *resetp = 1;
945                         return -EBUSY;
946                 }
947
948                 for (i = 0; i < 6; i += 2) {
949                         u32 low, high;
950
951                         if (tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &low) ||
952                             tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &high) ||
953                             tg3_wait_macro_done(tp)) {
954                                 *resetp = 1;
955                                 return -EBUSY;
956                         }
957                         low &= 0x7fff;
958                         high &= 0x000f;
959                         if (low != test_pat[chan][i] ||
960                             high != test_pat[chan][i+1]) {
961                                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000b);
962                                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x4001);
963                                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x4005);
964
965                                 return -EBUSY;
966                         }
967                 }
968         }
969
970         return 0;
971 }
972
973 static int tg3_phy_reset_chanpat(struct tg3 *tp)
974 {
975         int chan;
976
977         for (chan = 0; chan < 4; chan++) {
978                 int i;
979
980                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
981                              (chan * 0x2000) | 0x0200);
982                 tg3_writephy(tp, 0x16, 0x0002);
983                 for (i = 0; i < 6; i++)
984                         tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x000);
985                 tg3_writephy(tp, 0x16, 0x0202);
986                 if (tg3_wait_macro_done(tp))
987                         return -EBUSY;
988         }
989
990         return 0;
991 }
992
993 static int tg3_phy_reset_5703_4_5(struct tg3 *tp)
994 {
995         u32 reg32, phy9_orig;
996         int retries, do_phy_reset, err;
997
998         retries = 10;
999         do_phy_reset = 1;
1000         do {
1001                 if (do_phy_reset) {
1002                         err = tg3_bmcr_reset(tp);
1003                         if (err)
1004                                 return err;
1005                         do_phy_reset = 0;
1006                 }
1007
1008                 /* Disable transmitter and interrupt.  */
1009                 if (tg3_readphy(tp, MII_TG3_EXT_CTRL, &reg32))
1010                         continue;
1011
1012                 reg32 |= 0x3000;
1013                 tg3_writephy(tp, MII_TG3_EXT_CTRL, reg32);
1014
1015                 /* Set full-duplex, 1000 mbps.  */
1016                 tg3_writephy(tp, MII_BMCR,
1017                              BMCR_FULLDPLX | TG3_BMCR_SPEED1000);
1018
1019                 /* Set to master mode.  */
1020                 if (tg3_readphy(tp, MII_TG3_CTRL, &phy9_orig))
1021                         continue;
1022
1023                 tg3_writephy(tp, MII_TG3_CTRL,
1024                              (MII_TG3_CTRL_AS_MASTER |
1025                               MII_TG3_CTRL_ENABLE_AS_MASTER));
1026
1027                 /* Enable SM_DSP_CLOCK and 6dB.  */
1028                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0c00);
1029
1030                 /* Block the PHY control access.  */
1031                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8005);
1032                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0800);
1033
1034                 err = tg3_phy_write_and_check_testpat(tp, &do_phy_reset);
1035                 if (!err)
1036                         break;
1037         } while (--retries);
1038
1039         err = tg3_phy_reset_chanpat(tp);
1040         if (err)
1041                 return err;
1042
1043         tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8005);
1044         tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0000);
1045
1046         tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8200);
1047         tg3_writephy(tp, 0x16, 0x0000);
1048
1049         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
1050             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
1051                 /* Set Extended packet length bit for jumbo frames */
1052                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x4400);
1053         }
1054         else {
1055                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0400);
1056         }
1057
1058         tg3_writephy(tp, MII_TG3_CTRL, phy9_orig);
1059
1060         if (!tg3_readphy(tp, MII_TG3_EXT_CTRL, &reg32)) {
1061                 reg32 &= ~0x3000;
1062                 tg3_writephy(tp, MII_TG3_EXT_CTRL, reg32);
1063         } else if (!err)
1064                 err = -EBUSY;
1065
1066         return err;
1067 }
1068
1069 static void tg3_link_report(struct tg3 *);
1070
1071 /* This will reset the tigon3 PHY if there is no valid
1072  * link unless the FORCE argument is non-zero.
1073  */
1074 static int tg3_phy_reset(struct tg3 *tp)
1075 {
1076         u32 phy_status;
1077         int err;
1078
1079         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
1080                 u32 val;
1081
1082                 val = tr32(GRC_MISC_CFG);
1083                 tw32_f(GRC_MISC_CFG, val & ~GRC_MISC_CFG_EPHY_IDDQ);
1084                 udelay(40);
1085         }
1086         err  = tg3_readphy(tp, MII_BMSR, &phy_status);
1087         err |= tg3_readphy(tp, MII_BMSR, &phy_status);
1088         if (err != 0)
1089                 return -EBUSY;
1090
1091         if (netif_running(tp->dev) && netif_carrier_ok(tp->dev)) {
1092                 netif_carrier_off(tp->dev);
1093                 tg3_link_report(tp);
1094         }
1095
1096         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
1097             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
1098             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
1099                 err = tg3_phy_reset_5703_4_5(tp);
1100                 if (err)
1101                         return err;
1102                 goto out;
1103         }
1104
1105         err = tg3_bmcr_reset(tp);
1106         if (err)
1107                 return err;
1108
1109         if (tp->pci_chip_rev_id == CHIPREV_ID_5784_A0 ||
1110             tp->pci_chip_rev_id == CHIPREV_ID_5761_A0) {
1111                 u32 val;
1112
1113                 val = tr32(TG3_CPMU_LSPD_1000MB_CLK);
1114                 if ((val & CPMU_LSPD_1000MB_MACCLK_MASK) ==
1115                     CPMU_LSPD_1000MB_MACCLK_12_5) {
1116                         val &= ~CPMU_LSPD_1000MB_MACCLK_MASK;
1117                         udelay(40);
1118                         tw32_f(TG3_CPMU_LSPD_1000MB_CLK, val);
1119                 }
1120
1121                 /* Disable GPHY autopowerdown. */
1122                 tg3_writephy(tp, MII_TG3_MISC_SHDW,
1123                              MII_TG3_MISC_SHDW_WREN |
1124                              MII_TG3_MISC_SHDW_APD_SEL |
1125                              MII_TG3_MISC_SHDW_APD_WKTM_84MS);
1126         }
1127
1128 out:
1129         if (tp->tg3_flags2 & TG3_FLG2_PHY_ADC_BUG) {
1130                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0c00);
1131                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x201f);
1132                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x2aaa);
1133                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000a);
1134                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0323);
1135                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0400);
1136         }
1137         if (tp->tg3_flags2 & TG3_FLG2_PHY_5704_A0_BUG) {
1138                 tg3_writephy(tp, 0x1c, 0x8d68);
1139                 tg3_writephy(tp, 0x1c, 0x8d68);
1140         }
1141         if (tp->tg3_flags2 & TG3_FLG2_PHY_BER_BUG) {
1142                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0c00);
1143                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000a);
1144                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x310b);
1145                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x201f);
1146                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x9506);
1147                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x401f);
1148                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x14e2);
1149                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0400);
1150         }
1151         else if (tp->tg3_flags2 & TG3_FLG2_PHY_JITTER_BUG) {
1152                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0c00);
1153                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000a);
1154                 if (tp->tg3_flags2 & TG3_FLG2_PHY_ADJUST_TRIM) {
1155                         tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x110b);
1156                         tg3_writephy(tp, MII_TG3_TEST1,
1157                                      MII_TG3_TEST1_TRIM_EN | 0x4);
1158                 } else
1159                         tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x010b);
1160                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0400);
1161         }
1162         /* Set Extended packet length bit (bit 14) on all chips that */
1163         /* support jumbo frames */
1164         if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5401) {
1165                 /* Cannot do read-modify-write on 5401 */
1166                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x4c20);
1167         } else if (tp->tg3_flags2 & TG3_FLG2_JUMBO_CAPABLE) {
1168                 u32 phy_reg;
1169
1170                 /* Set bit 14 with read-modify-write to preserve other bits */
1171                 if (!tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0007) &&
1172                     !tg3_readphy(tp, MII_TG3_AUX_CTRL, &phy_reg))
1173                         tg3_writephy(tp, MII_TG3_AUX_CTRL, phy_reg | 0x4000);
1174         }
1175
1176         /* Set phy register 0x10 bit 0 to high fifo elasticity to support
1177          * jumbo frames transmission.
1178          */
1179         if (tp->tg3_flags2 & TG3_FLG2_JUMBO_CAPABLE) {
1180                 u32 phy_reg;
1181
1182                 if (!tg3_readphy(tp, MII_TG3_EXT_CTRL, &phy_reg))
1183                     tg3_writephy(tp, MII_TG3_EXT_CTRL,
1184                                  phy_reg | MII_TG3_EXT_CTRL_FIFO_ELASTIC);
1185         }
1186
1187         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
1188                 /* adjust output voltage */
1189                 tg3_writephy(tp, MII_TG3_EPHY_PTEST, 0x12);
1190         }
1191
1192         tg3_phy_toggle_automdix(tp, 1);
1193         tg3_phy_set_wirespeed(tp);
1194         return 0;
1195 }
1196
1197 static void tg3_frob_aux_power(struct tg3 *tp)
1198 {
1199         struct tg3 *tp_peer = tp;
1200
1201         if ((tp->tg3_flags2 & TG3_FLG2_IS_NIC) == 0)
1202                 return;
1203
1204         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) ||
1205             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714)) {
1206                 struct net_device *dev_peer;
1207
1208                 dev_peer = pci_get_drvdata(tp->pdev_peer);
1209                 /* remove_one() may have been run on the peer. */
1210                 if (!dev_peer)
1211                         tp_peer = tp;
1212                 else
1213                         tp_peer = netdev_priv(dev_peer);
1214         }
1215
1216         if ((tp->tg3_flags & TG3_FLAG_WOL_ENABLE) != 0 ||
1217             (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) != 0 ||
1218             (tp_peer->tg3_flags & TG3_FLAG_WOL_ENABLE) != 0 ||
1219             (tp_peer->tg3_flags & TG3_FLAG_ENABLE_ASF) != 0) {
1220                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
1221                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
1222                         tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1223                                     (GRC_LCLCTRL_GPIO_OE0 |
1224                                      GRC_LCLCTRL_GPIO_OE1 |
1225                                      GRC_LCLCTRL_GPIO_OE2 |
1226                                      GRC_LCLCTRL_GPIO_OUTPUT0 |
1227                                      GRC_LCLCTRL_GPIO_OUTPUT1),
1228                                     100);
1229                 } else {
1230                         u32 no_gpio2;
1231                         u32 grc_local_ctrl = 0;
1232
1233                         if (tp_peer != tp &&
1234                             (tp_peer->tg3_flags & TG3_FLAG_INIT_COMPLETE) != 0)
1235                                 return;
1236
1237                         /* Workaround to prevent overdrawing Amps. */
1238                         if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
1239                             ASIC_REV_5714) {
1240                                 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE3;
1241                                 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1242                                             grc_local_ctrl, 100);
1243                         }
1244
1245                         /* On 5753 and variants, GPIO2 cannot be used. */
1246                         no_gpio2 = tp->nic_sram_data_cfg &
1247                                     NIC_SRAM_DATA_CFG_NO_GPIO2;
1248
1249                         grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE0 |
1250                                          GRC_LCLCTRL_GPIO_OE1 |
1251                                          GRC_LCLCTRL_GPIO_OE2 |
1252                                          GRC_LCLCTRL_GPIO_OUTPUT1 |
1253                                          GRC_LCLCTRL_GPIO_OUTPUT2;
1254                         if (no_gpio2) {
1255                                 grc_local_ctrl &= ~(GRC_LCLCTRL_GPIO_OE2 |
1256                                                     GRC_LCLCTRL_GPIO_OUTPUT2);
1257                         }
1258                         tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1259                                                     grc_local_ctrl, 100);
1260
1261                         grc_local_ctrl |= GRC_LCLCTRL_GPIO_OUTPUT0;
1262
1263                         tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1264                                                     grc_local_ctrl, 100);
1265
1266                         if (!no_gpio2) {
1267                                 grc_local_ctrl &= ~GRC_LCLCTRL_GPIO_OUTPUT2;
1268                                 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1269                                             grc_local_ctrl, 100);
1270                         }
1271                 }
1272         } else {
1273                 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
1274                     GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701) {
1275                         if (tp_peer != tp &&
1276                             (tp_peer->tg3_flags & TG3_FLAG_INIT_COMPLETE) != 0)
1277                                 return;
1278
1279                         tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1280                                     (GRC_LCLCTRL_GPIO_OE1 |
1281                                      GRC_LCLCTRL_GPIO_OUTPUT1), 100);
1282
1283                         tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1284                                     GRC_LCLCTRL_GPIO_OE1, 100);
1285
1286                         tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1287                                     (GRC_LCLCTRL_GPIO_OE1 |
1288                                      GRC_LCLCTRL_GPIO_OUTPUT1), 100);
1289                 }
1290         }
1291 }
1292
1293 static int tg3_5700_link_polarity(struct tg3 *tp, u32 speed)
1294 {
1295         if (tp->led_ctrl == LED_CTRL_MODE_PHY_2)
1296                 return 1;
1297         else if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5411) {
1298                 if (speed != SPEED_10)
1299                         return 1;
1300         } else if (speed == SPEED_10)
1301                 return 1;
1302
1303         return 0;
1304 }
1305
1306 static int tg3_setup_phy(struct tg3 *, int);
1307
1308 #define RESET_KIND_SHUTDOWN     0
1309 #define RESET_KIND_INIT         1
1310 #define RESET_KIND_SUSPEND      2
1311
1312 static void tg3_write_sig_post_reset(struct tg3 *, int);
1313 static int tg3_halt_cpu(struct tg3 *, u32);
1314 static int tg3_nvram_lock(struct tg3 *);
1315 static void tg3_nvram_unlock(struct tg3 *);
1316
1317 static void tg3_power_down_phy(struct tg3 *tp)
1318 {
1319         u32 val;
1320
1321         if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) {
1322                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
1323                         u32 sg_dig_ctrl = tr32(SG_DIG_CTRL);
1324                         u32 serdes_cfg = tr32(MAC_SERDES_CFG);
1325
1326                         sg_dig_ctrl |=
1327                                 SG_DIG_USING_HW_AUTONEG | SG_DIG_SOFT_RESET;
1328                         tw32(SG_DIG_CTRL, sg_dig_ctrl);
1329                         tw32(MAC_SERDES_CFG, serdes_cfg | (1 << 15));
1330                 }
1331                 return;
1332         }
1333
1334         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
1335                 tg3_bmcr_reset(tp);
1336                 val = tr32(GRC_MISC_CFG);
1337                 tw32_f(GRC_MISC_CFG, val | GRC_MISC_CFG_EPHY_IDDQ);
1338                 udelay(40);
1339                 return;
1340         } else {
1341                 tg3_writephy(tp, MII_TG3_EXT_CTRL,
1342                              MII_TG3_EXT_CTRL_FORCE_LED_OFF);
1343                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x01b2);
1344         }
1345
1346         /* The PHY should not be powered down on some chips because
1347          * of bugs.
1348          */
1349         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
1350             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
1351             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780 &&
1352              (tp->tg3_flags2 & TG3_FLG2_MII_SERDES)))
1353                 return;
1354
1355         if (tp->pci_chip_rev_id == CHIPREV_ID_5784_A0 ||
1356             tp->pci_chip_rev_id == CHIPREV_ID_5761_A0) {
1357                 val = tr32(TG3_CPMU_LSPD_1000MB_CLK);
1358                 val &= ~CPMU_LSPD_1000MB_MACCLK_MASK;
1359                 val |= CPMU_LSPD_1000MB_MACCLK_12_5;
1360                 tw32_f(TG3_CPMU_LSPD_1000MB_CLK, val);
1361         }
1362
1363         tg3_writephy(tp, MII_BMCR, BMCR_PDOWN);
1364 }
1365
1366 static int tg3_set_power_state(struct tg3 *tp, pci_power_t state)
1367 {
1368         u32 misc_host_ctrl;
1369         u16 power_control, power_caps;
1370         int pm = tp->pm_cap;
1371
1372         /* Make sure register accesses (indirect or otherwise)
1373          * will function correctly.
1374          */
1375         pci_write_config_dword(tp->pdev,
1376                                TG3PCI_MISC_HOST_CTRL,
1377                                tp->misc_host_ctrl);
1378
1379         pci_read_config_word(tp->pdev,
1380                              pm + PCI_PM_CTRL,
1381                              &power_control);
1382         power_control |= PCI_PM_CTRL_PME_STATUS;
1383         power_control &= ~(PCI_PM_CTRL_STATE_MASK);
1384         switch (state) {
1385         case PCI_D0:
1386                 power_control |= 0;
1387                 pci_write_config_word(tp->pdev,
1388                                       pm + PCI_PM_CTRL,
1389                                       power_control);
1390                 udelay(100);    /* Delay after power state change */
1391
1392                 /* Switch out of Vaux if it is a NIC */
1393                 if (tp->tg3_flags2 & TG3_FLG2_IS_NIC)
1394                         tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl, 100);
1395
1396                 return 0;
1397
1398         case PCI_D1:
1399                 power_control |= 1;
1400                 break;
1401
1402         case PCI_D2:
1403                 power_control |= 2;
1404                 break;
1405
1406         case PCI_D3hot:
1407                 power_control |= 3;
1408                 break;
1409
1410         default:
1411                 printk(KERN_WARNING PFX "%s: Invalid power state (%d) "
1412                        "requested.\n",
1413                        tp->dev->name, state);
1414                 return -EINVAL;
1415         };
1416
1417         power_control |= PCI_PM_CTRL_PME_ENABLE;
1418
1419         misc_host_ctrl = tr32(TG3PCI_MISC_HOST_CTRL);
1420         tw32(TG3PCI_MISC_HOST_CTRL,
1421              misc_host_ctrl | MISC_HOST_CTRL_MASK_PCI_INT);
1422
1423         if (tp->link_config.phy_is_low_power == 0) {
1424                 tp->link_config.phy_is_low_power = 1;
1425                 tp->link_config.orig_speed = tp->link_config.speed;
1426                 tp->link_config.orig_duplex = tp->link_config.duplex;
1427                 tp->link_config.orig_autoneg = tp->link_config.autoneg;
1428         }
1429
1430         if (!(tp->tg3_flags2 & TG3_FLG2_ANY_SERDES)) {
1431                 tp->link_config.speed = SPEED_10;
1432                 tp->link_config.duplex = DUPLEX_HALF;
1433                 tp->link_config.autoneg = AUTONEG_ENABLE;
1434                 tg3_setup_phy(tp, 0);
1435         }
1436
1437         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
1438                 u32 val;
1439
1440                 val = tr32(GRC_VCPU_EXT_CTRL);
1441                 tw32(GRC_VCPU_EXT_CTRL, val | GRC_VCPU_EXT_CTRL_DISABLE_WOL);
1442         } else if (!(tp->tg3_flags & TG3_FLAG_ENABLE_ASF)) {
1443                 int i;
1444                 u32 val;
1445
1446                 for (i = 0; i < 200; i++) {
1447                         tg3_read_mem(tp, NIC_SRAM_FW_ASF_STATUS_MBOX, &val);
1448                         if (val == ~NIC_SRAM_FIRMWARE_MBOX_MAGIC1)
1449                                 break;
1450                         msleep(1);
1451                 }
1452         }
1453         if (tp->tg3_flags & TG3_FLAG_WOL_CAP)
1454                 tg3_write_mem(tp, NIC_SRAM_WOL_MBOX, WOL_SIGNATURE |
1455                                                      WOL_DRV_STATE_SHUTDOWN |
1456                                                      WOL_DRV_WOL |
1457                                                      WOL_SET_MAGIC_PKT);
1458
1459         pci_read_config_word(tp->pdev, pm + PCI_PM_PMC, &power_caps);
1460
1461         if (tp->tg3_flags & TG3_FLAG_WOL_ENABLE) {
1462                 u32 mac_mode;
1463
1464                 if (!(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)) {
1465                         tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x5a);
1466                         udelay(40);
1467
1468                         if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES)
1469                                 mac_mode = MAC_MODE_PORT_MODE_GMII;
1470                         else
1471                                 mac_mode = MAC_MODE_PORT_MODE_MII;
1472
1473                         mac_mode |= tp->mac_mode & MAC_MODE_LINK_POLARITY;
1474                         if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
1475                             ASIC_REV_5700) {
1476                                 u32 speed = (tp->tg3_flags &
1477                                              TG3_FLAG_WOL_SPEED_100MB) ?
1478                                              SPEED_100 : SPEED_10;
1479                                 if (tg3_5700_link_polarity(tp, speed))
1480                                         mac_mode |= MAC_MODE_LINK_POLARITY;
1481                                 else
1482                                         mac_mode &= ~MAC_MODE_LINK_POLARITY;
1483                         }
1484                 } else {
1485                         mac_mode = MAC_MODE_PORT_MODE_TBI;
1486                 }
1487
1488                 if (!(tp->tg3_flags2 & TG3_FLG2_5750_PLUS))
1489                         tw32(MAC_LED_CTRL, tp->led_ctrl);
1490
1491                 if (((power_caps & PCI_PM_CAP_PME_D3cold) &&
1492                      (tp->tg3_flags & TG3_FLAG_WOL_ENABLE)))
1493                         mac_mode |= MAC_MODE_MAGIC_PKT_ENABLE;
1494
1495                 tw32_f(MAC_MODE, mac_mode);
1496                 udelay(100);
1497
1498                 tw32_f(MAC_RX_MODE, RX_MODE_ENABLE);
1499                 udelay(10);
1500         }
1501
1502         if (!(tp->tg3_flags & TG3_FLAG_WOL_SPEED_100MB) &&
1503             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
1504              GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)) {
1505                 u32 base_val;
1506
1507                 base_val = tp->pci_clock_ctrl;
1508                 base_val |= (CLOCK_CTRL_RXCLK_DISABLE |
1509                              CLOCK_CTRL_TXCLK_DISABLE);
1510
1511                 tw32_wait_f(TG3PCI_CLOCK_CTRL, base_val | CLOCK_CTRL_ALTCLK |
1512                             CLOCK_CTRL_PWRDOWN_PLL133, 40);
1513         } else if ((tp->tg3_flags2 & TG3_FLG2_5780_CLASS) ||
1514                    (tp->tg3_flags & TG3_FLAG_CPMU_PRESENT) ||
1515                    (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)) {
1516                 /* do nothing */
1517         } else if (!((tp->tg3_flags2 & TG3_FLG2_5750_PLUS) &&
1518                      (tp->tg3_flags & TG3_FLAG_ENABLE_ASF))) {
1519                 u32 newbits1, newbits2;
1520
1521                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
1522                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
1523                         newbits1 = (CLOCK_CTRL_RXCLK_DISABLE |
1524                                     CLOCK_CTRL_TXCLK_DISABLE |
1525                                     CLOCK_CTRL_ALTCLK);
1526                         newbits2 = newbits1 | CLOCK_CTRL_44MHZ_CORE;
1527                 } else if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
1528                         newbits1 = CLOCK_CTRL_625_CORE;
1529                         newbits2 = newbits1 | CLOCK_CTRL_ALTCLK;
1530                 } else {
1531                         newbits1 = CLOCK_CTRL_ALTCLK;
1532                         newbits2 = newbits1 | CLOCK_CTRL_44MHZ_CORE;
1533                 }
1534
1535                 tw32_wait_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl | newbits1,
1536                             40);
1537
1538                 tw32_wait_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl | newbits2,
1539                             40);
1540
1541                 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
1542                         u32 newbits3;
1543
1544                         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
1545                             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
1546                                 newbits3 = (CLOCK_CTRL_RXCLK_DISABLE |
1547                                             CLOCK_CTRL_TXCLK_DISABLE |
1548                                             CLOCK_CTRL_44MHZ_CORE);
1549                         } else {
1550                                 newbits3 = CLOCK_CTRL_44MHZ_CORE;
1551                         }
1552
1553                         tw32_wait_f(TG3PCI_CLOCK_CTRL,
1554                                     tp->pci_clock_ctrl | newbits3, 40);
1555                 }
1556         }
1557
1558         if (!(tp->tg3_flags & TG3_FLAG_WOL_ENABLE) &&
1559             !(tp->tg3_flags & TG3_FLAG_ENABLE_ASF) &&
1560             !(tp->tg3_flags3 & TG3_FLG3_ENABLE_APE))
1561                 tg3_power_down_phy(tp);
1562
1563         tg3_frob_aux_power(tp);
1564
1565         /* Workaround for unstable PLL clock */
1566         if ((GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_AX) ||
1567             (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_BX)) {
1568                 u32 val = tr32(0x7d00);
1569
1570                 val &= ~((1 << 16) | (1 << 4) | (1 << 2) | (1 << 1) | 1);
1571                 tw32(0x7d00, val);
1572                 if (!(tp->tg3_flags & TG3_FLAG_ENABLE_ASF)) {
1573                         int err;
1574
1575                         err = tg3_nvram_lock(tp);
1576                         tg3_halt_cpu(tp, RX_CPU_BASE);
1577                         if (!err)
1578                                 tg3_nvram_unlock(tp);
1579                 }
1580         }
1581
1582         tg3_write_sig_post_reset(tp, RESET_KIND_SHUTDOWN);
1583
1584         /* Finally, set the new power state. */
1585         pci_write_config_word(tp->pdev, pm + PCI_PM_CTRL, power_control);
1586         udelay(100);    /* Delay after power state change */
1587
1588         return 0;
1589 }
1590
1591 static void tg3_link_report(struct tg3 *tp)
1592 {
1593         if (!netif_carrier_ok(tp->dev)) {
1594                 if (netif_msg_link(tp))
1595                         printk(KERN_INFO PFX "%s: Link is down.\n",
1596                                tp->dev->name);
1597         } else if (netif_msg_link(tp)) {
1598                 printk(KERN_INFO PFX "%s: Link is up at %d Mbps, %s duplex.\n",
1599                        tp->dev->name,
1600                        (tp->link_config.active_speed == SPEED_1000 ?
1601                         1000 :
1602                         (tp->link_config.active_speed == SPEED_100 ?
1603                          100 : 10)),
1604                        (tp->link_config.active_duplex == DUPLEX_FULL ?
1605                         "full" : "half"));
1606
1607                 printk(KERN_INFO PFX "%s: Flow control is %s for TX and "
1608                        "%s for RX.\n",
1609                        tp->dev->name,
1610                        (tp->tg3_flags & TG3_FLAG_TX_PAUSE) ? "on" : "off",
1611                        (tp->tg3_flags & TG3_FLAG_RX_PAUSE) ? "on" : "off");
1612         }
1613 }
1614
1615 static void tg3_setup_flow_control(struct tg3 *tp, u32 local_adv, u32 remote_adv)
1616 {
1617         u32 new_tg3_flags = 0;
1618         u32 old_rx_mode = tp->rx_mode;
1619         u32 old_tx_mode = tp->tx_mode;
1620
1621         if (tp->tg3_flags & TG3_FLAG_PAUSE_AUTONEG) {
1622
1623                 /* Convert 1000BaseX flow control bits to 1000BaseT
1624                  * bits before resolving flow control.
1625                  */
1626                 if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES) {
1627                         local_adv &= ~(ADVERTISE_PAUSE_CAP |
1628                                        ADVERTISE_PAUSE_ASYM);
1629                         remote_adv &= ~(LPA_PAUSE_CAP | LPA_PAUSE_ASYM);
1630
1631                         if (local_adv & ADVERTISE_1000XPAUSE)
1632                                 local_adv |= ADVERTISE_PAUSE_CAP;
1633                         if (local_adv & ADVERTISE_1000XPSE_ASYM)
1634                                 local_adv |= ADVERTISE_PAUSE_ASYM;
1635                         if (remote_adv & LPA_1000XPAUSE)
1636                                 remote_adv |= LPA_PAUSE_CAP;
1637                         if (remote_adv & LPA_1000XPAUSE_ASYM)
1638                                 remote_adv |= LPA_PAUSE_ASYM;
1639                 }
1640
1641                 if (local_adv & ADVERTISE_PAUSE_CAP) {
1642                         if (local_adv & ADVERTISE_PAUSE_ASYM) {
1643                                 if (remote_adv & LPA_PAUSE_CAP)
1644                                         new_tg3_flags |=
1645                                                 (TG3_FLAG_RX_PAUSE |
1646                                                 TG3_FLAG_TX_PAUSE);
1647                                 else if (remote_adv & LPA_PAUSE_ASYM)
1648                                         new_tg3_flags |=
1649                                                 (TG3_FLAG_RX_PAUSE);
1650                         } else {
1651                                 if (remote_adv & LPA_PAUSE_CAP)
1652                                         new_tg3_flags |=
1653                                                 (TG3_FLAG_RX_PAUSE |
1654                                                 TG3_FLAG_TX_PAUSE);
1655                         }
1656                 } else if (local_adv & ADVERTISE_PAUSE_ASYM) {
1657                         if ((remote_adv & LPA_PAUSE_CAP) &&
1658                         (remote_adv & LPA_PAUSE_ASYM))
1659                                 new_tg3_flags |= TG3_FLAG_TX_PAUSE;
1660                 }
1661
1662                 tp->tg3_flags &= ~(TG3_FLAG_RX_PAUSE | TG3_FLAG_TX_PAUSE);
1663                 tp->tg3_flags |= new_tg3_flags;
1664         } else {
1665                 new_tg3_flags = tp->tg3_flags;
1666         }
1667
1668         if (new_tg3_flags & TG3_FLAG_RX_PAUSE)
1669                 tp->rx_mode |= RX_MODE_FLOW_CTRL_ENABLE;
1670         else
1671                 tp->rx_mode &= ~RX_MODE_FLOW_CTRL_ENABLE;
1672
1673         if (old_rx_mode != tp->rx_mode) {
1674                 tw32_f(MAC_RX_MODE, tp->rx_mode);
1675         }
1676
1677         if (new_tg3_flags & TG3_FLAG_TX_PAUSE)
1678                 tp->tx_mode |= TX_MODE_FLOW_CTRL_ENABLE;
1679         else
1680                 tp->tx_mode &= ~TX_MODE_FLOW_CTRL_ENABLE;
1681
1682         if (old_tx_mode != tp->tx_mode) {
1683                 tw32_f(MAC_TX_MODE, tp->tx_mode);
1684         }
1685 }
1686
1687 static void tg3_aux_stat_to_speed_duplex(struct tg3 *tp, u32 val, u16 *speed, u8 *duplex)
1688 {
1689         switch (val & MII_TG3_AUX_STAT_SPDMASK) {
1690         case MII_TG3_AUX_STAT_10HALF:
1691                 *speed = SPEED_10;
1692                 *duplex = DUPLEX_HALF;
1693                 break;
1694
1695         case MII_TG3_AUX_STAT_10FULL:
1696                 *speed = SPEED_10;
1697                 *duplex = DUPLEX_FULL;
1698                 break;
1699
1700         case MII_TG3_AUX_STAT_100HALF:
1701                 *speed = SPEED_100;
1702                 *duplex = DUPLEX_HALF;
1703                 break;
1704
1705         case MII_TG3_AUX_STAT_100FULL:
1706                 *speed = SPEED_100;
1707                 *duplex = DUPLEX_FULL;
1708                 break;
1709
1710         case MII_TG3_AUX_STAT_1000HALF:
1711                 *speed = SPEED_1000;
1712                 *duplex = DUPLEX_HALF;
1713                 break;
1714
1715         case MII_TG3_AUX_STAT_1000FULL:
1716                 *speed = SPEED_1000;
1717                 *duplex = DUPLEX_FULL;
1718                 break;
1719
1720         default:
1721                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
1722                         *speed = (val & MII_TG3_AUX_STAT_100) ? SPEED_100 :
1723                                  SPEED_10;
1724                         *duplex = (val & MII_TG3_AUX_STAT_FULL) ? DUPLEX_FULL :
1725                                   DUPLEX_HALF;
1726                         break;
1727                 }
1728                 *speed = SPEED_INVALID;
1729                 *duplex = DUPLEX_INVALID;
1730                 break;
1731         };
1732 }
1733
1734 static void tg3_phy_copper_begin(struct tg3 *tp)
1735 {
1736         u32 new_adv;
1737         int i;
1738
1739         if (tp->link_config.phy_is_low_power) {
1740                 /* Entering low power mode.  Disable gigabit and
1741                  * 100baseT advertisements.
1742                  */
1743                 tg3_writephy(tp, MII_TG3_CTRL, 0);
1744
1745                 new_adv = (ADVERTISE_10HALF | ADVERTISE_10FULL |
1746                            ADVERTISE_CSMA | ADVERTISE_PAUSE_CAP);
1747                 if (tp->tg3_flags & TG3_FLAG_WOL_SPEED_100MB)
1748                         new_adv |= (ADVERTISE_100HALF | ADVERTISE_100FULL);
1749
1750                 tg3_writephy(tp, MII_ADVERTISE, new_adv);
1751         } else if (tp->link_config.speed == SPEED_INVALID) {
1752                 if (tp->tg3_flags & TG3_FLAG_10_100_ONLY)
1753                         tp->link_config.advertising &=
1754                                 ~(ADVERTISED_1000baseT_Half |
1755                                   ADVERTISED_1000baseT_Full);
1756
1757                 new_adv = (ADVERTISE_CSMA | ADVERTISE_PAUSE_CAP);
1758                 if (tp->link_config.advertising & ADVERTISED_10baseT_Half)
1759                         new_adv |= ADVERTISE_10HALF;
1760                 if (tp->link_config.advertising & ADVERTISED_10baseT_Full)
1761                         new_adv |= ADVERTISE_10FULL;
1762                 if (tp->link_config.advertising & ADVERTISED_100baseT_Half)
1763                         new_adv |= ADVERTISE_100HALF;
1764                 if (tp->link_config.advertising & ADVERTISED_100baseT_Full)
1765                         new_adv |= ADVERTISE_100FULL;
1766                 tg3_writephy(tp, MII_ADVERTISE, new_adv);
1767
1768                 if (tp->link_config.advertising &
1769                     (ADVERTISED_1000baseT_Half | ADVERTISED_1000baseT_Full)) {
1770                         new_adv = 0;
1771                         if (tp->link_config.advertising & ADVERTISED_1000baseT_Half)
1772                                 new_adv |= MII_TG3_CTRL_ADV_1000_HALF;
1773                         if (tp->link_config.advertising & ADVERTISED_1000baseT_Full)
1774                                 new_adv |= MII_TG3_CTRL_ADV_1000_FULL;
1775                         if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY) &&
1776                             (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
1777                              tp->pci_chip_rev_id == CHIPREV_ID_5701_B0))
1778                                 new_adv |= (MII_TG3_CTRL_AS_MASTER |
1779                                             MII_TG3_CTRL_ENABLE_AS_MASTER);
1780                         tg3_writephy(tp, MII_TG3_CTRL, new_adv);
1781                 } else {
1782                         tg3_writephy(tp, MII_TG3_CTRL, 0);
1783                 }
1784         } else {
1785                 /* Asking for a specific link mode. */
1786                 if (tp->link_config.speed == SPEED_1000) {
1787                         new_adv = ADVERTISE_CSMA | ADVERTISE_PAUSE_CAP;
1788                         tg3_writephy(tp, MII_ADVERTISE, new_adv);
1789
1790                         if (tp->link_config.duplex == DUPLEX_FULL)
1791                                 new_adv = MII_TG3_CTRL_ADV_1000_FULL;
1792                         else
1793                                 new_adv = MII_TG3_CTRL_ADV_1000_HALF;
1794                         if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
1795                             tp->pci_chip_rev_id == CHIPREV_ID_5701_B0)
1796                                 new_adv |= (MII_TG3_CTRL_AS_MASTER |
1797                                             MII_TG3_CTRL_ENABLE_AS_MASTER);
1798                         tg3_writephy(tp, MII_TG3_CTRL, new_adv);
1799                 } else {
1800                         tg3_writephy(tp, MII_TG3_CTRL, 0);
1801
1802                         new_adv = ADVERTISE_CSMA | ADVERTISE_PAUSE_CAP;
1803                         if (tp->link_config.speed == SPEED_100) {
1804                                 if (tp->link_config.duplex == DUPLEX_FULL)
1805                                         new_adv |= ADVERTISE_100FULL;
1806                                 else
1807                                         new_adv |= ADVERTISE_100HALF;
1808                         } else {
1809                                 if (tp->link_config.duplex == DUPLEX_FULL)
1810                                         new_adv |= ADVERTISE_10FULL;
1811                                 else
1812                                         new_adv |= ADVERTISE_10HALF;
1813                         }
1814                         tg3_writephy(tp, MII_ADVERTISE, new_adv);
1815                 }
1816         }
1817
1818         if (tp->link_config.autoneg == AUTONEG_DISABLE &&
1819             tp->link_config.speed != SPEED_INVALID) {
1820                 u32 bmcr, orig_bmcr;
1821
1822                 tp->link_config.active_speed = tp->link_config.speed;
1823                 tp->link_config.active_duplex = tp->link_config.duplex;
1824
1825                 bmcr = 0;
1826                 switch (tp->link_config.speed) {
1827                 default:
1828                 case SPEED_10:
1829                         break;
1830
1831                 case SPEED_100:
1832                         bmcr |= BMCR_SPEED100;
1833                         break;
1834
1835                 case SPEED_1000:
1836                         bmcr |= TG3_BMCR_SPEED1000;
1837                         break;
1838                 };
1839
1840                 if (tp->link_config.duplex == DUPLEX_FULL)
1841                         bmcr |= BMCR_FULLDPLX;
1842
1843                 if (!tg3_readphy(tp, MII_BMCR, &orig_bmcr) &&
1844                     (bmcr != orig_bmcr)) {
1845                         tg3_writephy(tp, MII_BMCR, BMCR_LOOPBACK);
1846                         for (i = 0; i < 1500; i++) {
1847                                 u32 tmp;
1848
1849                                 udelay(10);
1850                                 if (tg3_readphy(tp, MII_BMSR, &tmp) ||
1851                                     tg3_readphy(tp, MII_BMSR, &tmp))
1852                                         continue;
1853                                 if (!(tmp & BMSR_LSTATUS)) {
1854                                         udelay(40);
1855                                         break;
1856                                 }
1857                         }
1858                         tg3_writephy(tp, MII_BMCR, bmcr);
1859                         udelay(40);
1860                 }
1861         } else {
1862                 tg3_writephy(tp, MII_BMCR,
1863                              BMCR_ANENABLE | BMCR_ANRESTART);
1864         }
1865 }
1866
1867 static int tg3_init_5401phy_dsp(struct tg3 *tp)
1868 {
1869         int err;
1870
1871         /* Turn off tap power management. */
1872         /* Set Extended packet length bit */
1873         err  = tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x4c20);
1874
1875         err |= tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x0012);
1876         err |= tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x1804);
1877
1878         err |= tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x0013);
1879         err |= tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x1204);
1880
1881         err |= tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8006);
1882         err |= tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0132);
1883
1884         err |= tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8006);
1885         err |= tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0232);
1886
1887         err |= tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x201f);
1888         err |= tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0a20);
1889
1890         udelay(40);
1891
1892         return err;
1893 }
1894
1895 static int tg3_copper_is_advertising_all(struct tg3 *tp, u32 mask)
1896 {
1897         u32 adv_reg, all_mask = 0;
1898
1899         if (mask & ADVERTISED_10baseT_Half)
1900                 all_mask |= ADVERTISE_10HALF;
1901         if (mask & ADVERTISED_10baseT_Full)
1902                 all_mask |= ADVERTISE_10FULL;
1903         if (mask & ADVERTISED_100baseT_Half)
1904                 all_mask |= ADVERTISE_100HALF;
1905         if (mask & ADVERTISED_100baseT_Full)
1906                 all_mask |= ADVERTISE_100FULL;
1907
1908         if (tg3_readphy(tp, MII_ADVERTISE, &adv_reg))
1909                 return 0;
1910
1911         if ((adv_reg & all_mask) != all_mask)
1912                 return 0;
1913         if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY)) {
1914                 u32 tg3_ctrl;
1915
1916                 all_mask = 0;
1917                 if (mask & ADVERTISED_1000baseT_Half)
1918                         all_mask |= ADVERTISE_1000HALF;
1919                 if (mask & ADVERTISED_1000baseT_Full)
1920                         all_mask |= ADVERTISE_1000FULL;
1921
1922                 if (tg3_readphy(tp, MII_TG3_CTRL, &tg3_ctrl))
1923                         return 0;
1924
1925                 if ((tg3_ctrl & all_mask) != all_mask)
1926                         return 0;
1927         }
1928         return 1;
1929 }
1930
1931 static int tg3_setup_copper_phy(struct tg3 *tp, int force_reset)
1932 {
1933         int current_link_up;
1934         u32 bmsr, dummy;
1935         u16 current_speed;
1936         u8 current_duplex;
1937         int i, err;
1938
1939         tw32(MAC_EVENT, 0);
1940
1941         tw32_f(MAC_STATUS,
1942              (MAC_STATUS_SYNC_CHANGED |
1943               MAC_STATUS_CFG_CHANGED |
1944               MAC_STATUS_MI_COMPLETION |
1945               MAC_STATUS_LNKSTATE_CHANGED));
1946         udelay(40);
1947
1948         tp->mi_mode = MAC_MI_MODE_BASE;
1949         tw32_f(MAC_MI_MODE, tp->mi_mode);
1950         udelay(80);
1951
1952         tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x02);
1953
1954         /* Some third-party PHYs need to be reset on link going
1955          * down.
1956          */
1957         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
1958              GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
1959              GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) &&
1960             netif_carrier_ok(tp->dev)) {
1961                 tg3_readphy(tp, MII_BMSR, &bmsr);
1962                 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
1963                     !(bmsr & BMSR_LSTATUS))
1964                         force_reset = 1;
1965         }
1966         if (force_reset)
1967                 tg3_phy_reset(tp);
1968
1969         if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5401) {
1970                 tg3_readphy(tp, MII_BMSR, &bmsr);
1971                 if (tg3_readphy(tp, MII_BMSR, &bmsr) ||
1972                     !(tp->tg3_flags & TG3_FLAG_INIT_COMPLETE))
1973                         bmsr = 0;
1974
1975                 if (!(bmsr & BMSR_LSTATUS)) {
1976                         err = tg3_init_5401phy_dsp(tp);
1977                         if (err)
1978                                 return err;
1979
1980                         tg3_readphy(tp, MII_BMSR, &bmsr);
1981                         for (i = 0; i < 1000; i++) {
1982                                 udelay(10);
1983                                 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
1984                                     (bmsr & BMSR_LSTATUS)) {
1985                                         udelay(40);
1986                                         break;
1987                                 }
1988                         }
1989
1990                         if ((tp->phy_id & PHY_ID_REV_MASK) == PHY_REV_BCM5401_B0 &&
1991                             !(bmsr & BMSR_LSTATUS) &&
1992                             tp->link_config.active_speed == SPEED_1000) {
1993                                 err = tg3_phy_reset(tp);
1994                                 if (!err)
1995                                         err = tg3_init_5401phy_dsp(tp);
1996                                 if (err)
1997                                         return err;
1998                         }
1999                 }
2000         } else if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
2001                    tp->pci_chip_rev_id == CHIPREV_ID_5701_B0) {
2002                 /* 5701 {A0,B0} CRC bug workaround */
2003                 tg3_writephy(tp, 0x15, 0x0a75);
2004                 tg3_writephy(tp, 0x1c, 0x8c68);
2005                 tg3_writephy(tp, 0x1c, 0x8d68);
2006                 tg3_writephy(tp, 0x1c, 0x8c68);
2007         }
2008
2009         /* Clear pending interrupts... */
2010         tg3_readphy(tp, MII_TG3_ISTAT, &dummy);
2011         tg3_readphy(tp, MII_TG3_ISTAT, &dummy);
2012
2013         if (tp->tg3_flags & TG3_FLAG_USE_MI_INTERRUPT)
2014                 tg3_writephy(tp, MII_TG3_IMASK, ~MII_TG3_INT_LINKCHG);
2015         else if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5906)
2016                 tg3_writephy(tp, MII_TG3_IMASK, ~0);
2017
2018         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
2019             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
2020                 if (tp->led_ctrl == LED_CTRL_MODE_PHY_1)
2021                         tg3_writephy(tp, MII_TG3_EXT_CTRL,
2022                                      MII_TG3_EXT_CTRL_LNK3_LED_MODE);
2023                 else
2024                         tg3_writephy(tp, MII_TG3_EXT_CTRL, 0);
2025         }
2026
2027         current_link_up = 0;
2028         current_speed = SPEED_INVALID;
2029         current_duplex = DUPLEX_INVALID;
2030
2031         if (tp->tg3_flags2 & TG3_FLG2_CAPACITIVE_COUPLING) {
2032                 u32 val;
2033
2034                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x4007);
2035                 tg3_readphy(tp, MII_TG3_AUX_CTRL, &val);
2036                 if (!(val & (1 << 10))) {
2037                         val |= (1 << 10);
2038                         tg3_writephy(tp, MII_TG3_AUX_CTRL, val);
2039                         goto relink;
2040                 }
2041         }
2042
2043         bmsr = 0;
2044         for (i = 0; i < 100; i++) {
2045                 tg3_readphy(tp, MII_BMSR, &bmsr);
2046                 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
2047                     (bmsr & BMSR_LSTATUS))
2048                         break;
2049                 udelay(40);
2050         }
2051
2052         if (bmsr & BMSR_LSTATUS) {
2053                 u32 aux_stat, bmcr;
2054
2055                 tg3_readphy(tp, MII_TG3_AUX_STAT, &aux_stat);
2056                 for (i = 0; i < 2000; i++) {
2057                         udelay(10);
2058                         if (!tg3_readphy(tp, MII_TG3_AUX_STAT, &aux_stat) &&
2059                             aux_stat)
2060                                 break;
2061                 }
2062
2063                 tg3_aux_stat_to_speed_duplex(tp, aux_stat,
2064                                              &current_speed,
2065                                              &current_duplex);
2066
2067                 bmcr = 0;
2068                 for (i = 0; i < 200; i++) {
2069                         tg3_readphy(tp, MII_BMCR, &bmcr);
2070                         if (tg3_readphy(tp, MII_BMCR, &bmcr))
2071                                 continue;
2072                         if (bmcr && bmcr != 0x7fff)
2073                                 break;
2074                         udelay(10);
2075                 }
2076
2077                 if (tp->link_config.autoneg == AUTONEG_ENABLE) {
2078                         if (bmcr & BMCR_ANENABLE) {
2079                                 current_link_up = 1;
2080
2081                                 /* Force autoneg restart if we are exiting
2082                                  * low power mode.
2083                                  */
2084                                 if (!tg3_copper_is_advertising_all(tp,
2085                                                 tp->link_config.advertising))
2086                                         current_link_up = 0;
2087                         } else {
2088                                 current_link_up = 0;
2089                         }
2090                 } else {
2091                         if (!(bmcr & BMCR_ANENABLE) &&
2092                             tp->link_config.speed == current_speed &&
2093                             tp->link_config.duplex == current_duplex) {
2094                                 current_link_up = 1;
2095                         } else {
2096                                 current_link_up = 0;
2097                         }
2098                 }
2099
2100                 tp->link_config.active_speed = current_speed;
2101                 tp->link_config.active_duplex = current_duplex;
2102         }
2103
2104         if (current_link_up == 1 &&
2105             (tp->link_config.active_duplex == DUPLEX_FULL) &&
2106             (tp->link_config.autoneg == AUTONEG_ENABLE)) {
2107                 u32 local_adv, remote_adv;
2108
2109                 if (tg3_readphy(tp, MII_ADVERTISE, &local_adv))
2110                         local_adv = 0;
2111                 local_adv &= (ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM);
2112
2113                 if (tg3_readphy(tp, MII_LPA, &remote_adv))
2114                         remote_adv = 0;
2115
2116                 remote_adv &= (LPA_PAUSE_CAP | LPA_PAUSE_ASYM);
2117
2118                 /* If we are not advertising full pause capability,
2119                  * something is wrong.  Bring the link down and reconfigure.
2120                  */
2121                 if (local_adv != ADVERTISE_PAUSE_CAP) {
2122                         current_link_up = 0;
2123                 } else {
2124                         tg3_setup_flow_control(tp, local_adv, remote_adv);
2125                 }
2126         }
2127 relink:
2128         if (current_link_up == 0 || tp->link_config.phy_is_low_power) {
2129                 u32 tmp;
2130
2131                 tg3_phy_copper_begin(tp);
2132
2133                 tg3_readphy(tp, MII_BMSR, &tmp);
2134                 if (!tg3_readphy(tp, MII_BMSR, &tmp) &&
2135                     (tmp & BMSR_LSTATUS))
2136                         current_link_up = 1;
2137         }
2138
2139         tp->mac_mode &= ~MAC_MODE_PORT_MODE_MASK;
2140         if (current_link_up == 1) {
2141                 if (tp->link_config.active_speed == SPEED_100 ||
2142                     tp->link_config.active_speed == SPEED_10)
2143                         tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
2144                 else
2145                         tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
2146         } else
2147                 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
2148
2149         tp->mac_mode &= ~MAC_MODE_HALF_DUPLEX;
2150         if (tp->link_config.active_duplex == DUPLEX_HALF)
2151                 tp->mac_mode |= MAC_MODE_HALF_DUPLEX;
2152
2153         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700) {
2154                 if (current_link_up == 1 &&
2155                     tg3_5700_link_polarity(tp, tp->link_config.active_speed))
2156                         tp->mac_mode |= MAC_MODE_LINK_POLARITY;
2157                 else
2158                         tp->mac_mode &= ~MAC_MODE_LINK_POLARITY;
2159         }
2160
2161         /* ??? Without this setting Netgear GA302T PHY does not
2162          * ??? send/receive packets...
2163          */
2164         if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5411 &&
2165             tp->pci_chip_rev_id == CHIPREV_ID_5700_ALTIMA) {
2166                 tp->mi_mode |= MAC_MI_MODE_AUTO_POLL;
2167                 tw32_f(MAC_MI_MODE, tp->mi_mode);
2168                 udelay(80);
2169         }
2170
2171         tw32_f(MAC_MODE, tp->mac_mode);
2172         udelay(40);
2173
2174         if (tp->tg3_flags & TG3_FLAG_USE_LINKCHG_REG) {
2175                 /* Polled via timer. */
2176                 tw32_f(MAC_EVENT, 0);
2177         } else {
2178                 tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
2179         }
2180         udelay(40);
2181
2182         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 &&
2183             current_link_up == 1 &&
2184             tp->link_config.active_speed == SPEED_1000 &&
2185             ((tp->tg3_flags & TG3_FLAG_PCIX_MODE) ||
2186              (tp->tg3_flags & TG3_FLAG_PCI_HIGH_SPEED))) {
2187                 udelay(120);
2188                 tw32_f(MAC_STATUS,
2189                      (MAC_STATUS_SYNC_CHANGED |
2190                       MAC_STATUS_CFG_CHANGED));
2191                 udelay(40);
2192                 tg3_write_mem(tp,
2193                               NIC_SRAM_FIRMWARE_MBOX,
2194                               NIC_SRAM_FIRMWARE_MBOX_MAGIC2);
2195         }
2196
2197         if (current_link_up != netif_carrier_ok(tp->dev)) {
2198                 if (current_link_up)
2199                         netif_carrier_on(tp->dev);
2200                 else
2201                         netif_carrier_off(tp->dev);
2202                 tg3_link_report(tp);
2203         }
2204
2205         return 0;
2206 }
2207
2208 struct tg3_fiber_aneginfo {
2209         int state;
2210 #define ANEG_STATE_UNKNOWN              0
2211 #define ANEG_STATE_AN_ENABLE            1
2212 #define ANEG_STATE_RESTART_INIT         2
2213 #define ANEG_STATE_RESTART              3
2214 #define ANEG_STATE_DISABLE_LINK_OK      4
2215 #define ANEG_STATE_ABILITY_DETECT_INIT  5
2216 #define ANEG_STATE_ABILITY_DETECT       6
2217 #define ANEG_STATE_ACK_DETECT_INIT      7
2218 #define ANEG_STATE_ACK_DETECT           8
2219 #define ANEG_STATE_COMPLETE_ACK_INIT    9
2220 #define ANEG_STATE_COMPLETE_ACK         10
2221 #define ANEG_STATE_IDLE_DETECT_INIT     11
2222 #define ANEG_STATE_IDLE_DETECT          12
2223 #define ANEG_STATE_LINK_OK              13
2224 #define ANEG_STATE_NEXT_PAGE_WAIT_INIT  14
2225 #define ANEG_STATE_NEXT_PAGE_WAIT       15
2226
2227         u32 flags;
2228 #define MR_AN_ENABLE            0x00000001
2229 #define MR_RESTART_AN           0x00000002
2230 #define MR_AN_COMPLETE          0x00000004
2231 #define MR_PAGE_RX              0x00000008
2232 #define MR_NP_LOADED            0x00000010
2233 #define MR_TOGGLE_TX            0x00000020
2234 #define MR_LP_ADV_FULL_DUPLEX   0x00000040
2235 #define MR_LP_ADV_HALF_DUPLEX   0x00000080
2236 #define MR_LP_ADV_SYM_PAUSE     0x00000100
2237 #define MR_LP_ADV_ASYM_PAUSE    0x00000200
2238 #define MR_LP_ADV_REMOTE_FAULT1 0x00000400
2239 #define MR_LP_ADV_REMOTE_FAULT2 0x00000800
2240 #define MR_LP_ADV_NEXT_PAGE     0x00001000
2241 #define MR_TOGGLE_RX            0x00002000
2242 #define MR_NP_RX                0x00004000
2243
2244 #define MR_LINK_OK              0x80000000
2245
2246         unsigned long link_time, cur_time;
2247
2248         u32 ability_match_cfg;
2249         int ability_match_count;
2250
2251         char ability_match, idle_match, ack_match;
2252
2253         u32 txconfig, rxconfig;
2254 #define ANEG_CFG_NP             0x00000080
2255 #define ANEG_CFG_ACK            0x00000040
2256 #define ANEG_CFG_RF2            0x00000020
2257 #define ANEG_CFG_RF1            0x00000010
2258 #define ANEG_CFG_PS2            0x00000001
2259 #define ANEG_CFG_PS1            0x00008000
2260 #define ANEG_CFG_HD             0x00004000
2261 #define ANEG_CFG_FD             0x00002000
2262 #define ANEG_CFG_INVAL          0x00001f06
2263
2264 };
2265 #define ANEG_OK         0
2266 #define ANEG_DONE       1
2267 #define ANEG_TIMER_ENAB 2
2268 #define ANEG_FAILED     -1
2269
2270 #define ANEG_STATE_SETTLE_TIME  10000
2271
2272 static int tg3_fiber_aneg_smachine(struct tg3 *tp,
2273                                    struct tg3_fiber_aneginfo *ap)
2274 {
2275         unsigned long delta;
2276         u32 rx_cfg_reg;
2277         int ret;
2278
2279         if (ap->state == ANEG_STATE_UNKNOWN) {
2280                 ap->rxconfig = 0;
2281                 ap->link_time = 0;
2282                 ap->cur_time = 0;
2283                 ap->ability_match_cfg = 0;
2284                 ap->ability_match_count = 0;
2285                 ap->ability_match = 0;
2286                 ap->idle_match = 0;
2287                 ap->ack_match = 0;
2288         }
2289         ap->cur_time++;
2290
2291         if (tr32(MAC_STATUS) & MAC_STATUS_RCVD_CFG) {
2292                 rx_cfg_reg = tr32(MAC_RX_AUTO_NEG);
2293
2294                 if (rx_cfg_reg != ap->ability_match_cfg) {
2295                         ap->ability_match_cfg = rx_cfg_reg;
2296                         ap->ability_match = 0;
2297                         ap->ability_match_count = 0;
2298                 } else {
2299                         if (++ap->ability_match_count > 1) {
2300                                 ap->ability_match = 1;
2301                                 ap->ability_match_cfg = rx_cfg_reg;
2302                         }
2303                 }
2304                 if (rx_cfg_reg & ANEG_CFG_ACK)
2305                         ap->ack_match = 1;
2306                 else
2307                         ap->ack_match = 0;
2308
2309                 ap->idle_match = 0;
2310         } else {
2311                 ap->idle_match = 1;
2312                 ap->ability_match_cfg = 0;
2313                 ap->ability_match_count = 0;
2314                 ap->ability_match = 0;
2315                 ap->ack_match = 0;
2316
2317                 rx_cfg_reg = 0;
2318         }
2319
2320         ap->rxconfig = rx_cfg_reg;
2321         ret = ANEG_OK;
2322
2323         switch(ap->state) {
2324         case ANEG_STATE_UNKNOWN:
2325                 if (ap->flags & (MR_AN_ENABLE | MR_RESTART_AN))
2326                         ap->state = ANEG_STATE_AN_ENABLE;
2327
2328                 /* fallthru */
2329         case ANEG_STATE_AN_ENABLE:
2330                 ap->flags &= ~(MR_AN_COMPLETE | MR_PAGE_RX);
2331                 if (ap->flags & MR_AN_ENABLE) {
2332                         ap->link_time = 0;
2333                         ap->cur_time = 0;
2334                         ap->ability_match_cfg = 0;
2335                         ap->ability_match_count = 0;
2336                         ap->ability_match = 0;
2337                         ap->idle_match = 0;
2338                         ap->ack_match = 0;
2339
2340                         ap->state = ANEG_STATE_RESTART_INIT;
2341                 } else {
2342                         ap->state = ANEG_STATE_DISABLE_LINK_OK;
2343                 }
2344                 break;
2345
2346         case ANEG_STATE_RESTART_INIT:
2347                 ap->link_time = ap->cur_time;
2348                 ap->flags &= ~(MR_NP_LOADED);
2349                 ap->txconfig = 0;
2350                 tw32(MAC_TX_AUTO_NEG, 0);
2351                 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
2352                 tw32_f(MAC_MODE, tp->mac_mode);
2353                 udelay(40);
2354
2355                 ret = ANEG_TIMER_ENAB;
2356                 ap->state = ANEG_STATE_RESTART;
2357
2358                 /* fallthru */
2359         case ANEG_STATE_RESTART:
2360                 delta = ap->cur_time - ap->link_time;
2361                 if (delta > ANEG_STATE_SETTLE_TIME) {
2362                         ap->state = ANEG_STATE_ABILITY_DETECT_INIT;
2363                 } else {
2364                         ret = ANEG_TIMER_ENAB;
2365                 }
2366                 break;
2367
2368         case ANEG_STATE_DISABLE_LINK_OK:
2369                 ret = ANEG_DONE;
2370                 break;
2371
2372         case ANEG_STATE_ABILITY_DETECT_INIT:
2373                 ap->flags &= ~(MR_TOGGLE_TX);
2374                 ap->txconfig = (ANEG_CFG_FD | ANEG_CFG_PS1);
2375                 tw32(MAC_TX_AUTO_NEG, ap->txconfig);
2376                 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
2377                 tw32_f(MAC_MODE, tp->mac_mode);
2378                 udelay(40);
2379
2380                 ap->state = ANEG_STATE_ABILITY_DETECT;
2381                 break;
2382
2383         case ANEG_STATE_ABILITY_DETECT:
2384                 if (ap->ability_match != 0 && ap->rxconfig != 0) {
2385                         ap->state = ANEG_STATE_ACK_DETECT_INIT;
2386                 }
2387                 break;
2388
2389         case ANEG_STATE_ACK_DETECT_INIT:
2390                 ap->txconfig |= ANEG_CFG_ACK;
2391                 tw32(MAC_TX_AUTO_NEG, ap->txconfig);
2392                 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
2393                 tw32_f(MAC_MODE, tp->mac_mode);
2394                 udelay(40);
2395
2396                 ap->state = ANEG_STATE_ACK_DETECT;
2397
2398                 /* fallthru */
2399         case ANEG_STATE_ACK_DETECT:
2400                 if (ap->ack_match != 0) {
2401                         if ((ap->rxconfig & ~ANEG_CFG_ACK) ==
2402                             (ap->ability_match_cfg & ~ANEG_CFG_ACK)) {
2403                                 ap->state = ANEG_STATE_COMPLETE_ACK_INIT;
2404                         } else {
2405                                 ap->state = ANEG_STATE_AN_ENABLE;
2406                         }
2407                 } else if (ap->ability_match != 0 &&
2408                            ap->rxconfig == 0) {
2409                         ap->state = ANEG_STATE_AN_ENABLE;
2410                 }
2411                 break;
2412
2413         case ANEG_STATE_COMPLETE_ACK_INIT:
2414                 if (ap->rxconfig & ANEG_CFG_INVAL) {
2415                         ret = ANEG_FAILED;
2416                         break;
2417                 }
2418                 ap->flags &= ~(MR_LP_ADV_FULL_DUPLEX |
2419                                MR_LP_ADV_HALF_DUPLEX |
2420                                MR_LP_ADV_SYM_PAUSE |
2421                                MR_LP_ADV_ASYM_PAUSE |
2422                                MR_LP_ADV_REMOTE_FAULT1 |
2423                                MR_LP_ADV_REMOTE_FAULT2 |
2424                                MR_LP_ADV_NEXT_PAGE |
2425                                MR_TOGGLE_RX |
2426                                MR_NP_RX);
2427                 if (ap->rxconfig & ANEG_CFG_FD)
2428                         ap->flags |= MR_LP_ADV_FULL_DUPLEX;
2429                 if (ap->rxconfig & ANEG_CFG_HD)
2430                         ap->flags |= MR_LP_ADV_HALF_DUPLEX;
2431                 if (ap->rxconfig & ANEG_CFG_PS1)
2432                         ap->flags |= MR_LP_ADV_SYM_PAUSE;
2433                 if (ap->rxconfig & ANEG_CFG_PS2)
2434                         ap->flags |= MR_LP_ADV_ASYM_PAUSE;
2435                 if (ap->rxconfig & ANEG_CFG_RF1)
2436                         ap->flags |= MR_LP_ADV_REMOTE_FAULT1;
2437                 if (ap->rxconfig & ANEG_CFG_RF2)
2438                         ap->flags |= MR_LP_ADV_REMOTE_FAULT2;
2439                 if (ap->rxconfig & ANEG_CFG_NP)
2440                         ap->flags |= MR_LP_ADV_NEXT_PAGE;
2441
2442                 ap->link_time = ap->cur_time;
2443
2444                 ap->flags ^= (MR_TOGGLE_TX);
2445                 if (ap->rxconfig & 0x0008)
2446                         ap->flags |= MR_TOGGLE_RX;
2447                 if (ap->rxconfig & ANEG_CFG_NP)
2448                         ap->flags |= MR_NP_RX;
2449                 ap->flags |= MR_PAGE_RX;
2450
2451                 ap->state = ANEG_STATE_COMPLETE_ACK;
2452                 ret = ANEG_TIMER_ENAB;
2453                 break;
2454
2455         case ANEG_STATE_COMPLETE_ACK:
2456                 if (ap->ability_match != 0 &&
2457                     ap->rxconfig == 0) {
2458                         ap->state = ANEG_STATE_AN_ENABLE;
2459                         break;
2460                 }
2461                 delta = ap->cur_time - ap->link_time;
2462                 if (delta > ANEG_STATE_SETTLE_TIME) {
2463                         if (!(ap->flags & (MR_LP_ADV_NEXT_PAGE))) {
2464                                 ap->state = ANEG_STATE_IDLE_DETECT_INIT;
2465                         } else {
2466                                 if ((ap->txconfig & ANEG_CFG_NP) == 0 &&
2467                                     !(ap->flags & MR_NP_RX)) {
2468                                         ap->state = ANEG_STATE_IDLE_DETECT_INIT;
2469                                 } else {
2470                                         ret = ANEG_FAILED;
2471                                 }
2472                         }
2473                 }
2474                 break;
2475
2476         case ANEG_STATE_IDLE_DETECT_INIT:
2477                 ap->link_time = ap->cur_time;
2478                 tp->mac_mode &= ~MAC_MODE_SEND_CONFIGS;
2479                 tw32_f(MAC_MODE, tp->mac_mode);
2480                 udelay(40);
2481
2482                 ap->state = ANEG_STATE_IDLE_DETECT;
2483                 ret = ANEG_TIMER_ENAB;
2484                 break;
2485
2486         case ANEG_STATE_IDLE_DETECT:
2487                 if (ap->ability_match != 0 &&
2488                     ap->rxconfig == 0) {
2489                         ap->state = ANEG_STATE_AN_ENABLE;
2490                         break;
2491                 }
2492                 delta = ap->cur_time - ap->link_time;
2493                 if (delta > ANEG_STATE_SETTLE_TIME) {
2494                         /* XXX another gem from the Broadcom driver :( */
2495                         ap->state = ANEG_STATE_LINK_OK;
2496                 }
2497                 break;
2498
2499         case ANEG_STATE_LINK_OK:
2500                 ap->flags |= (MR_AN_COMPLETE | MR_LINK_OK);
2501                 ret = ANEG_DONE;
2502                 break;
2503
2504         case ANEG_STATE_NEXT_PAGE_WAIT_INIT:
2505                 /* ??? unimplemented */
2506                 break;
2507
2508         case ANEG_STATE_NEXT_PAGE_WAIT:
2509                 /* ??? unimplemented */
2510                 break;
2511
2512         default:
2513                 ret = ANEG_FAILED;
2514                 break;
2515         };
2516
2517         return ret;
2518 }
2519
2520 static int fiber_autoneg(struct tg3 *tp, u32 *flags)
2521 {
2522         int res = 0;
2523         struct tg3_fiber_aneginfo aninfo;
2524         int status = ANEG_FAILED;
2525         unsigned int tick;
2526         u32 tmp;
2527
2528         tw32_f(MAC_TX_AUTO_NEG, 0);
2529
2530         tmp = tp->mac_mode & ~MAC_MODE_PORT_MODE_MASK;
2531         tw32_f(MAC_MODE, tmp | MAC_MODE_PORT_MODE_GMII);
2532         udelay(40);
2533
2534         tw32_f(MAC_MODE, tp->mac_mode | MAC_MODE_SEND_CONFIGS);
2535         udelay(40);
2536
2537         memset(&aninfo, 0, sizeof(aninfo));
2538         aninfo.flags |= MR_AN_ENABLE;
2539         aninfo.state = ANEG_STATE_UNKNOWN;
2540         aninfo.cur_time = 0;
2541         tick = 0;
2542         while (++tick < 195000) {
2543                 status = tg3_fiber_aneg_smachine(tp, &aninfo);
2544                 if (status == ANEG_DONE || status == ANEG_FAILED)
2545                         break;
2546
2547                 udelay(1);
2548         }
2549
2550         tp->mac_mode &= ~MAC_MODE_SEND_CONFIGS;
2551         tw32_f(MAC_MODE, tp->mac_mode);
2552         udelay(40);
2553
2554         *flags = aninfo.flags;
2555
2556         if (status == ANEG_DONE &&
2557             (aninfo.flags & (MR_AN_COMPLETE | MR_LINK_OK |
2558                              MR_LP_ADV_FULL_DUPLEX)))
2559                 res = 1;
2560
2561         return res;
2562 }
2563
2564 static void tg3_init_bcm8002(struct tg3 *tp)
2565 {
2566         u32 mac_status = tr32(MAC_STATUS);
2567         int i;
2568
2569         /* Reset when initting first time or we have a link. */
2570         if ((tp->tg3_flags & TG3_FLAG_INIT_COMPLETE) &&
2571             !(mac_status & MAC_STATUS_PCS_SYNCED))
2572                 return;
2573
2574         /* Set PLL lock range. */
2575         tg3_writephy(tp, 0x16, 0x8007);
2576
2577         /* SW reset */
2578         tg3_writephy(tp, MII_BMCR, BMCR_RESET);
2579
2580         /* Wait for reset to complete. */
2581         /* XXX schedule_timeout() ... */
2582         for (i = 0; i < 500; i++)
2583                 udelay(10);
2584
2585         /* Config mode; select PMA/Ch 1 regs. */
2586         tg3_writephy(tp, 0x10, 0x8411);
2587
2588         /* Enable auto-lock and comdet, select txclk for tx. */
2589         tg3_writephy(tp, 0x11, 0x0a10);
2590
2591         tg3_writephy(tp, 0x18, 0x00a0);
2592         tg3_writephy(tp, 0x16, 0x41ff);
2593
2594         /* Assert and deassert POR. */
2595         tg3_writephy(tp, 0x13, 0x0400);
2596         udelay(40);
2597         tg3_writephy(tp, 0x13, 0x0000);
2598
2599         tg3_writephy(tp, 0x11, 0x0a50);
2600         udelay(40);
2601         tg3_writephy(tp, 0x11, 0x0a10);
2602
2603         /* Wait for signal to stabilize */
2604         /* XXX schedule_timeout() ... */
2605         for (i = 0; i < 15000; i++)
2606                 udelay(10);
2607
2608         /* Deselect the channel register so we can read the PHYID
2609          * later.
2610          */
2611         tg3_writephy(tp, 0x10, 0x8011);
2612 }
2613
2614 static int tg3_setup_fiber_hw_autoneg(struct tg3 *tp, u32 mac_status)
2615 {
2616         u32 sg_dig_ctrl, sg_dig_status;
2617         u32 serdes_cfg, expected_sg_dig_ctrl;
2618         int workaround, port_a;
2619         int current_link_up;
2620
2621         serdes_cfg = 0;
2622         expected_sg_dig_ctrl = 0;
2623         workaround = 0;
2624         port_a = 1;
2625         current_link_up = 0;
2626
2627         if (tp->pci_chip_rev_id != CHIPREV_ID_5704_A0 &&
2628             tp->pci_chip_rev_id != CHIPREV_ID_5704_A1) {
2629                 workaround = 1;
2630                 if (tr32(TG3PCI_DUAL_MAC_CTRL) & DUAL_MAC_CTRL_ID)
2631                         port_a = 0;
2632
2633                 /* preserve bits 0-11,13,14 for signal pre-emphasis */
2634                 /* preserve bits 20-23 for voltage regulator */
2635                 serdes_cfg = tr32(MAC_SERDES_CFG) & 0x00f06fff;
2636         }
2637
2638         sg_dig_ctrl = tr32(SG_DIG_CTRL);
2639
2640         if (tp->link_config.autoneg != AUTONEG_ENABLE) {
2641                 if (sg_dig_ctrl & (1 << 31)) {
2642                         if (workaround) {
2643                                 u32 val = serdes_cfg;
2644
2645                                 if (port_a)
2646                                         val |= 0xc010000;
2647                                 else
2648                                         val |= 0x4010000;
2649                                 tw32_f(MAC_SERDES_CFG, val);
2650                         }
2651                         tw32_f(SG_DIG_CTRL, 0x01388400);
2652                 }
2653                 if (mac_status & MAC_STATUS_PCS_SYNCED) {
2654                         tg3_setup_flow_control(tp, 0, 0);
2655                         current_link_up = 1;
2656                 }
2657                 goto out;
2658         }
2659
2660         /* Want auto-negotiation.  */
2661         expected_sg_dig_ctrl = 0x81388400;
2662
2663         /* Pause capability */
2664         expected_sg_dig_ctrl |= (1 << 11);
2665
2666         /* Asymettric pause */
2667         expected_sg_dig_ctrl |= (1 << 12);
2668
2669         if (sg_dig_ctrl != expected_sg_dig_ctrl) {
2670                 if ((tp->tg3_flags2 & TG3_FLG2_PARALLEL_DETECT) &&
2671                     tp->serdes_counter &&
2672                     ((mac_status & (MAC_STATUS_PCS_SYNCED |
2673                                     MAC_STATUS_RCVD_CFG)) ==
2674                      MAC_STATUS_PCS_SYNCED)) {
2675                         tp->serdes_counter--;
2676                         current_link_up = 1;
2677                         goto out;
2678                 }
2679 restart_autoneg:
2680                 if (workaround)
2681                         tw32_f(MAC_SERDES_CFG, serdes_cfg | 0xc011000);
2682                 tw32_f(SG_DIG_CTRL, expected_sg_dig_ctrl | (1 << 30));
2683                 udelay(5);
2684                 tw32_f(SG_DIG_CTRL, expected_sg_dig_ctrl);
2685
2686                 tp->serdes_counter = SERDES_AN_TIMEOUT_5704S;
2687                 tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
2688         } else if (mac_status & (MAC_STATUS_PCS_SYNCED |
2689                                  MAC_STATUS_SIGNAL_DET)) {
2690                 sg_dig_status = tr32(SG_DIG_STATUS);
2691                 mac_status = tr32(MAC_STATUS);
2692
2693                 if ((sg_dig_status & (1 << 1)) &&
2694                     (mac_status & MAC_STATUS_PCS_SYNCED)) {
2695                         u32 local_adv, remote_adv;
2696
2697                         local_adv = ADVERTISE_PAUSE_CAP;
2698                         remote_adv = 0;
2699                         if (sg_dig_status & (1 << 19))
2700                                 remote_adv |= LPA_PAUSE_CAP;
2701                         if (sg_dig_status & (1 << 20))
2702                                 remote_adv |= LPA_PAUSE_ASYM;
2703
2704                         tg3_setup_flow_control(tp, local_adv, remote_adv);
2705                         current_link_up = 1;
2706                         tp->serdes_counter = 0;
2707                         tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
2708                 } else if (!(sg_dig_status & (1 << 1))) {
2709                         if (tp->serdes_counter)
2710                                 tp->serdes_counter--;
2711                         else {
2712                                 if (workaround) {
2713                                         u32 val = serdes_cfg;
2714
2715                                         if (port_a)
2716                                                 val |= 0xc010000;
2717                                         else
2718                                                 val |= 0x4010000;
2719
2720                                         tw32_f(MAC_SERDES_CFG, val);
2721                                 }
2722
2723                                 tw32_f(SG_DIG_CTRL, 0x01388400);
2724                                 udelay(40);
2725
2726                                 /* Link parallel detection - link is up */
2727                                 /* only if we have PCS_SYNC and not */
2728                                 /* receiving config code words */
2729                                 mac_status = tr32(MAC_STATUS);
2730                                 if ((mac_status & MAC_STATUS_PCS_SYNCED) &&
2731                                     !(mac_status & MAC_STATUS_RCVD_CFG)) {
2732                                         tg3_setup_flow_control(tp, 0, 0);
2733                                         current_link_up = 1;
2734                                         tp->tg3_flags2 |=
2735                                                 TG3_FLG2_PARALLEL_DETECT;
2736                                         tp->serdes_counter =
2737                                                 SERDES_PARALLEL_DET_TIMEOUT;
2738                                 } else
2739                                         goto restart_autoneg;
2740                         }
2741                 }
2742         } else {
2743                 tp->serdes_counter = SERDES_AN_TIMEOUT_5704S;
2744                 tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
2745         }
2746
2747 out:
2748         return current_link_up;
2749 }
2750
2751 static int tg3_setup_fiber_by_hand(struct tg3 *tp, u32 mac_status)
2752 {
2753         int current_link_up = 0;
2754
2755         if (!(mac_status & MAC_STATUS_PCS_SYNCED))
2756                 goto out;
2757
2758         if (tp->link_config.autoneg == AUTONEG_ENABLE) {
2759                 u32 flags;
2760                 int i;
2761
2762                 if (fiber_autoneg(tp, &flags)) {
2763                         u32 local_adv, remote_adv;
2764
2765                         local_adv = ADVERTISE_PAUSE_CAP;
2766                         remote_adv = 0;
2767                         if (flags & MR_LP_ADV_SYM_PAUSE)
2768                                 remote_adv |= LPA_PAUSE_CAP;
2769                         if (flags & MR_LP_ADV_ASYM_PAUSE)
2770                                 remote_adv |= LPA_PAUSE_ASYM;
2771
2772                         tg3_setup_flow_control(tp, local_adv, remote_adv);
2773
2774                         current_link_up = 1;
2775                 }
2776                 for (i = 0; i < 30; i++) {
2777                         udelay(20);
2778                         tw32_f(MAC_STATUS,
2779                                (MAC_STATUS_SYNC_CHANGED |
2780                                 MAC_STATUS_CFG_CHANGED));
2781                         udelay(40);
2782                         if ((tr32(MAC_STATUS) &
2783                              (MAC_STATUS_SYNC_CHANGED |
2784                               MAC_STATUS_CFG_CHANGED)) == 0)
2785                                 break;
2786                 }
2787
2788                 mac_status = tr32(MAC_STATUS);
2789                 if (current_link_up == 0 &&
2790                     (mac_status & MAC_STATUS_PCS_SYNCED) &&
2791                     !(mac_status & MAC_STATUS_RCVD_CFG))
2792                         current_link_up = 1;
2793         } else {
2794                 /* Forcing 1000FD link up. */
2795                 current_link_up = 1;
2796
2797                 tw32_f(MAC_MODE, (tp->mac_mode | MAC_MODE_SEND_CONFIGS));
2798                 udelay(40);
2799
2800                 tw32_f(MAC_MODE, tp->mac_mode);
2801                 udelay(40);
2802         }
2803
2804 out:
2805         return current_link_up;
2806 }
2807
2808 static int tg3_setup_fiber_phy(struct tg3 *tp, int force_reset)
2809 {
2810         u32 orig_pause_cfg;
2811         u16 orig_active_speed;
2812         u8 orig_active_duplex;
2813         u32 mac_status;
2814         int current_link_up;
2815         int i;
2816
2817         orig_pause_cfg =
2818                 (tp->tg3_flags & (TG3_FLAG_RX_PAUSE |
2819                                   TG3_FLAG_TX_PAUSE));
2820         orig_active_speed = tp->link_config.active_speed;
2821         orig_active_duplex = tp->link_config.active_duplex;
2822
2823         if (!(tp->tg3_flags2 & TG3_FLG2_HW_AUTONEG) &&
2824             netif_carrier_ok(tp->dev) &&
2825             (tp->tg3_flags & TG3_FLAG_INIT_COMPLETE)) {
2826                 mac_status = tr32(MAC_STATUS);
2827                 mac_status &= (MAC_STATUS_PCS_SYNCED |
2828                                MAC_STATUS_SIGNAL_DET |
2829                                MAC_STATUS_CFG_CHANGED |
2830                                MAC_STATUS_RCVD_CFG);
2831                 if (mac_status == (MAC_STATUS_PCS_SYNCED |
2832                                    MAC_STATUS_SIGNAL_DET)) {
2833                         tw32_f(MAC_STATUS, (MAC_STATUS_SYNC_CHANGED |
2834                                             MAC_STATUS_CFG_CHANGED));
2835                         return 0;
2836                 }
2837         }
2838
2839         tw32_f(MAC_TX_AUTO_NEG, 0);
2840
2841         tp->mac_mode &= ~(MAC_MODE_PORT_MODE_MASK | MAC_MODE_HALF_DUPLEX);
2842         tp->mac_mode |= MAC_MODE_PORT_MODE_TBI;
2843         tw32_f(MAC_MODE, tp->mac_mode);
2844         udelay(40);
2845
2846         if (tp->phy_id == PHY_ID_BCM8002)
2847                 tg3_init_bcm8002(tp);
2848
2849         /* Enable link change event even when serdes polling.  */
2850         tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
2851         udelay(40);
2852
2853         current_link_up = 0;
2854         mac_status = tr32(MAC_STATUS);
2855
2856         if (tp->tg3_flags2 & TG3_FLG2_HW_AUTONEG)
2857                 current_link_up = tg3_setup_fiber_hw_autoneg(tp, mac_status);
2858         else
2859                 current_link_up = tg3_setup_fiber_by_hand(tp, mac_status);
2860
2861         tp->hw_status->status =
2862                 (SD_STATUS_UPDATED |
2863                  (tp->hw_status->status & ~SD_STATUS_LINK_CHG));
2864
2865         for (i = 0; i < 100; i++) {
2866                 tw32_f(MAC_STATUS, (MAC_STATUS_SYNC_CHANGED |
2867                                     MAC_STATUS_CFG_CHANGED));
2868                 udelay(5);
2869                 if ((tr32(MAC_STATUS) & (MAC_STATUS_SYNC_CHANGED |
2870                                          MAC_STATUS_CFG_CHANGED |
2871                                          MAC_STATUS_LNKSTATE_CHANGED)) == 0)
2872                         break;
2873         }
2874
2875         mac_status = tr32(MAC_STATUS);
2876         if ((mac_status & MAC_STATUS_PCS_SYNCED) == 0) {
2877                 current_link_up = 0;
2878                 if (tp->link_config.autoneg == AUTONEG_ENABLE &&
2879                     tp->serdes_counter == 0) {
2880                         tw32_f(MAC_MODE, (tp->mac_mode |
2881                                           MAC_MODE_SEND_CONFIGS));
2882                         udelay(1);
2883                         tw32_f(MAC_MODE, tp->mac_mode);
2884                 }
2885         }
2886
2887         if (current_link_up == 1) {
2888                 tp->link_config.active_speed = SPEED_1000;
2889                 tp->link_config.active_duplex = DUPLEX_FULL;
2890                 tw32(MAC_LED_CTRL, (tp->led_ctrl |
2891                                     LED_CTRL_LNKLED_OVERRIDE |
2892                                     LED_CTRL_1000MBPS_ON));
2893         } else {
2894                 tp->link_config.active_speed = SPEED_INVALID;
2895                 tp->link_config.active_duplex = DUPLEX_INVALID;
2896                 tw32(MAC_LED_CTRL, (tp->led_ctrl |
2897                                     LED_CTRL_LNKLED_OVERRIDE |
2898                                     LED_CTRL_TRAFFIC_OVERRIDE));
2899         }
2900
2901         if (current_link_up != netif_carrier_ok(tp->dev)) {
2902                 if (current_link_up)
2903                         netif_carrier_on(tp->dev);
2904                 else
2905                         netif_carrier_off(tp->dev);
2906                 tg3_link_report(tp);
2907         } else {
2908                 u32 now_pause_cfg =
2909                         tp->tg3_flags & (TG3_FLAG_RX_PAUSE |
2910                                          TG3_FLAG_TX_PAUSE);
2911                 if (orig_pause_cfg != now_pause_cfg ||
2912                     orig_active_speed != tp->link_config.active_speed ||
2913                     orig_active_duplex != tp->link_config.active_duplex)
2914                         tg3_link_report(tp);
2915         }
2916
2917         return 0;
2918 }
2919
2920 static int tg3_setup_fiber_mii_phy(struct tg3 *tp, int force_reset)
2921 {
2922         int current_link_up, err = 0;
2923         u32 bmsr, bmcr;
2924         u16 current_speed;
2925         u8 current_duplex;
2926
2927         tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
2928         tw32_f(MAC_MODE, tp->mac_mode);
2929         udelay(40);
2930
2931         tw32(MAC_EVENT, 0);
2932
2933         tw32_f(MAC_STATUS,
2934              (MAC_STATUS_SYNC_CHANGED |
2935               MAC_STATUS_CFG_CHANGED |
2936               MAC_STATUS_MI_COMPLETION |
2937               MAC_STATUS_LNKSTATE_CHANGED));
2938         udelay(40);
2939
2940         if (force_reset)
2941                 tg3_phy_reset(tp);
2942
2943         current_link_up = 0;
2944         current_speed = SPEED_INVALID;
2945         current_duplex = DUPLEX_INVALID;
2946
2947         err |= tg3_readphy(tp, MII_BMSR, &bmsr);
2948         err |= tg3_readphy(tp, MII_BMSR, &bmsr);
2949         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) {
2950                 if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
2951                         bmsr |= BMSR_LSTATUS;
2952                 else
2953                         bmsr &= ~BMSR_LSTATUS;
2954         }
2955
2956         err |= tg3_readphy(tp, MII_BMCR, &bmcr);
2957
2958         if ((tp->link_config.autoneg == AUTONEG_ENABLE) && !force_reset &&
2959             (tp->tg3_flags2 & TG3_FLG2_PARALLEL_DETECT)) {
2960                 /* do nothing, just check for link up at the end */
2961         } else if (tp->link_config.autoneg == AUTONEG_ENABLE) {
2962                 u32 adv, new_adv;
2963
2964                 err |= tg3_readphy(tp, MII_ADVERTISE, &adv);
2965                 new_adv = adv & ~(ADVERTISE_1000XFULL | ADVERTISE_1000XHALF |
2966                                   ADVERTISE_1000XPAUSE |
2967                                   ADVERTISE_1000XPSE_ASYM |
2968                                   ADVERTISE_SLCT);
2969
2970                 /* Always advertise symmetric PAUSE just like copper */
2971                 new_adv |= ADVERTISE_1000XPAUSE;
2972
2973                 if (tp->link_config.advertising & ADVERTISED_1000baseT_Half)
2974                         new_adv |= ADVERTISE_1000XHALF;
2975                 if (tp->link_config.advertising & ADVERTISED_1000baseT_Full)
2976                         new_adv |= ADVERTISE_1000XFULL;
2977
2978                 if ((new_adv != adv) || !(bmcr & BMCR_ANENABLE)) {
2979                         tg3_writephy(tp, MII_ADVERTISE, new_adv);
2980                         bmcr |= BMCR_ANENABLE | BMCR_ANRESTART;
2981                         tg3_writephy(tp, MII_BMCR, bmcr);
2982
2983                         tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
2984                         tp->serdes_counter = SERDES_AN_TIMEOUT_5714S;
2985                         tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
2986
2987                         return err;
2988                 }
2989         } else {
2990                 u32 new_bmcr;
2991
2992                 bmcr &= ~BMCR_SPEED1000;
2993                 new_bmcr = bmcr & ~(BMCR_ANENABLE | BMCR_FULLDPLX);
2994
2995                 if (tp->link_config.duplex == DUPLEX_FULL)
2996                         new_bmcr |= BMCR_FULLDPLX;
2997
2998                 if (new_bmcr != bmcr) {
2999                         /* BMCR_SPEED1000 is a reserved bit that needs
3000                          * to be set on write.
3001                          */
3002                         new_bmcr |= BMCR_SPEED1000;
3003
3004                         /* Force a linkdown */
3005                         if (netif_carrier_ok(tp->dev)) {
3006                                 u32 adv;
3007
3008                                 err |= tg3_readphy(tp, MII_ADVERTISE, &adv);
3009                                 adv &= ~(ADVERTISE_1000XFULL |
3010                                          ADVERTISE_1000XHALF |
3011                                          ADVERTISE_SLCT);
3012                                 tg3_writephy(tp, MII_ADVERTISE, adv);
3013                                 tg3_writephy(tp, MII_BMCR, bmcr |
3014                                                            BMCR_ANRESTART |
3015                                                            BMCR_ANENABLE);
3016                                 udelay(10);
3017                                 netif_carrier_off(tp->dev);
3018                         }
3019                         tg3_writephy(tp, MII_BMCR, new_bmcr);
3020                         bmcr = new_bmcr;
3021                         err |= tg3_readphy(tp, MII_BMSR, &bmsr);
3022                         err |= tg3_readphy(tp, MII_BMSR, &bmsr);
3023                         if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
3024                             ASIC_REV_5714) {
3025                                 if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
3026                                         bmsr |= BMSR_LSTATUS;
3027                                 else
3028                                         bmsr &= ~BMSR_LSTATUS;
3029                         }
3030                         tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
3031                 }
3032         }
3033
3034         if (bmsr & BMSR_LSTATUS) {
3035                 current_speed = SPEED_1000;
3036                 current_link_up = 1;
3037                 if (bmcr & BMCR_FULLDPLX)
3038                         current_duplex = DUPLEX_FULL;
3039                 else
3040                         current_duplex = DUPLEX_HALF;
3041
3042                 if (bmcr & BMCR_ANENABLE) {
3043                         u32 local_adv, remote_adv, common;
3044
3045                         err |= tg3_readphy(tp, MII_ADVERTISE, &local_adv);
3046                         err |= tg3_readphy(tp, MII_LPA, &remote_adv);
3047                         common = local_adv & remote_adv;
3048                         if (common & (ADVERTISE_1000XHALF |
3049                                       ADVERTISE_1000XFULL)) {
3050                                 if (common & ADVERTISE_1000XFULL)
3051                                         current_duplex = DUPLEX_FULL;
3052                                 else
3053                                         current_duplex = DUPLEX_HALF;
3054
3055                                 tg3_setup_flow_control(tp, local_adv,
3056                                                        remote_adv);
3057                         }
3058                         else
3059                                 current_link_up = 0;
3060                 }
3061         }
3062
3063         tp->mac_mode &= ~MAC_MODE_HALF_DUPLEX;
3064         if (tp->link_config.active_duplex == DUPLEX_HALF)
3065                 tp->mac_mode |= MAC_MODE_HALF_DUPLEX;
3066
3067         tw32_f(MAC_MODE, tp->mac_mode);
3068         udelay(40);
3069
3070         tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
3071
3072         tp->link_config.active_speed = current_speed;
3073         tp->link_config.active_duplex = current_duplex;
3074
3075         if (current_link_up != netif_carrier_ok(tp->dev)) {
3076                 if (current_link_up)
3077                         netif_carrier_on(tp->dev);
3078                 else {
3079                         netif_carrier_off(tp->dev);
3080                         tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
3081                 }
3082                 tg3_link_report(tp);
3083         }
3084         return err;
3085 }
3086
3087 static void tg3_serdes_parallel_detect(struct tg3 *tp)
3088 {
3089         if (tp->serdes_counter) {
3090                 /* Give autoneg time to complete. */
3091                 tp->serdes_counter--;
3092                 return;
3093         }
3094         if (!netif_carrier_ok(tp->dev) &&
3095             (tp->link_config.autoneg == AUTONEG_ENABLE)) {
3096                 u32 bmcr;
3097
3098                 tg3_readphy(tp, MII_BMCR, &bmcr);
3099                 if (bmcr & BMCR_ANENABLE) {
3100                         u32 phy1, phy2;
3101
3102                         /* Select shadow register 0x1f */
3103                         tg3_writephy(tp, 0x1c, 0x7c00);
3104                         tg3_readphy(tp, 0x1c, &phy1);
3105
3106                         /* Select expansion interrupt status register */
3107                         tg3_writephy(tp, 0x17, 0x0f01);
3108                         tg3_readphy(tp, 0x15, &phy2);
3109                         tg3_readphy(tp, 0x15, &phy2);
3110
3111                         if ((phy1 & 0x10) && !(phy2 & 0x20)) {
3112                                 /* We have signal detect and not receiving
3113                                  * config code words, link is up by parallel
3114                                  * detection.
3115                                  */
3116
3117                                 bmcr &= ~BMCR_ANENABLE;
3118                                 bmcr |= BMCR_SPEED1000 | BMCR_FULLDPLX;
3119                                 tg3_writephy(tp, MII_BMCR, bmcr);
3120                                 tp->tg3_flags2 |= TG3_FLG2_PARALLEL_DETECT;
3121                         }
3122                 }
3123         }
3124         else if (netif_carrier_ok(tp->dev) &&
3125                  (tp->link_config.autoneg == AUTONEG_ENABLE) &&
3126                  (tp->tg3_flags2 & TG3_FLG2_PARALLEL_DETECT)) {
3127                 u32 phy2;
3128
3129                 /* Select expansion interrupt status register */
3130                 tg3_writephy(tp, 0x17, 0x0f01);
3131                 tg3_readphy(tp, 0x15, &phy2);
3132                 if (phy2 & 0x20) {
3133                         u32 bmcr;
3134
3135                         /* Config code words received, turn on autoneg. */
3136                         tg3_readphy(tp, MII_BMCR, &bmcr);
3137                         tg3_writephy(tp, MII_BMCR, bmcr | BMCR_ANENABLE);
3138
3139                         tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
3140
3141                 }
3142         }
3143 }
3144
3145 static int tg3_setup_phy(struct tg3 *tp, int force_reset)
3146 {
3147         int err;
3148
3149         if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) {
3150                 err = tg3_setup_fiber_phy(tp, force_reset);
3151         } else if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES) {
3152                 err = tg3_setup_fiber_mii_phy(tp, force_reset);
3153         } else {
3154                 err = tg3_setup_copper_phy(tp, force_reset);
3155         }
3156
3157         if (tp->pci_chip_rev_id == CHIPREV_ID_5784_A0) {
3158                 u32 val, scale;
3159
3160                 val = tr32(TG3_CPMU_CLCK_STAT) & CPMU_CLCK_STAT_MAC_CLCK_MASK;
3161                 if (val == CPMU_CLCK_STAT_MAC_CLCK_62_5)
3162                         scale = 65;
3163                 else if (val == CPMU_CLCK_STAT_MAC_CLCK_6_25)
3164                         scale = 6;
3165                 else
3166                         scale = 12;
3167
3168                 val = tr32(GRC_MISC_CFG) & ~GRC_MISC_CFG_PRESCALAR_MASK;
3169                 val |= (scale << GRC_MISC_CFG_PRESCALAR_SHIFT);
3170                 tw32(GRC_MISC_CFG, val);
3171         }
3172
3173         if (tp->link_config.active_speed == SPEED_1000 &&
3174             tp->link_config.active_duplex == DUPLEX_HALF)
3175                 tw32(MAC_TX_LENGTHS,
3176                      ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
3177                       (6 << TX_LENGTHS_IPG_SHIFT) |
3178                       (0xff << TX_LENGTHS_SLOT_TIME_SHIFT)));
3179         else
3180                 tw32(MAC_TX_LENGTHS,
3181                      ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
3182                       (6 << TX_LENGTHS_IPG_SHIFT) |
3183                       (32 << TX_LENGTHS_SLOT_TIME_SHIFT)));
3184
3185         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
3186                 if (netif_carrier_ok(tp->dev)) {
3187                         tw32(HOSTCC_STAT_COAL_TICKS,
3188                              tp->coal.stats_block_coalesce_usecs);
3189                 } else {
3190                         tw32(HOSTCC_STAT_COAL_TICKS, 0);
3191                 }
3192         }
3193
3194         if (tp->tg3_flags & TG3_FLAG_ASPM_WORKAROUND) {
3195                 u32 val = tr32(PCIE_PWR_MGMT_THRESH);
3196                 if (!netif_carrier_ok(tp->dev))
3197                         val = (val & ~PCIE_PWR_MGMT_L1_THRESH_MSK) |
3198                               tp->pwrmgmt_thresh;
3199                 else
3200                         val |= PCIE_PWR_MGMT_L1_THRESH_MSK;
3201                 tw32(PCIE_PWR_MGMT_THRESH, val);
3202         }
3203
3204         return err;
3205 }
3206
3207 /* This is called whenever we suspect that the system chipset is re-
3208  * ordering the sequence of MMIO to the tx send mailbox. The symptom
3209  * is bogus tx completions. We try to recover by setting the
3210  * TG3_FLAG_MBOX_WRITE_REORDER flag and resetting the chip later
3211  * in the workqueue.
3212  */
3213 static void tg3_tx_recover(struct tg3 *tp)
3214 {
3215         BUG_ON((tp->tg3_flags & TG3_FLAG_MBOX_WRITE_REORDER) ||
3216                tp->write32_tx_mbox == tg3_write_indirect_mbox);
3217
3218         printk(KERN_WARNING PFX "%s: The system may be re-ordering memory-"
3219                "mapped I/O cycles to the network device, attempting to "
3220                "recover. Please report the problem to the driver maintainer "
3221                "and include system chipset information.\n", tp->dev->name);
3222
3223         spin_lock(&tp->lock);
3224         tp->tg3_flags |= TG3_FLAG_TX_RECOVERY_PENDING;
3225         spin_unlock(&tp->lock);
3226 }
3227
3228 static inline u32 tg3_tx_avail(struct tg3 *tp)
3229 {
3230         smp_mb();
3231         return (tp->tx_pending -
3232                 ((tp->tx_prod - tp->tx_cons) & (TG3_TX_RING_SIZE - 1)));
3233 }
3234
3235 /* Tigon3 never reports partial packet sends.  So we do not
3236  * need special logic to handle SKBs that have not had all
3237  * of their frags sent yet, like SunGEM does.
3238  */
3239 static void tg3_tx(struct tg3 *tp)
3240 {
3241         u32 hw_idx = tp->hw_status->idx[0].tx_consumer;
3242         u32 sw_idx = tp->tx_cons;
3243
3244         while (sw_idx != hw_idx) {
3245                 struct tx_ring_info *ri = &tp->tx_buffers[sw_idx];
3246                 struct sk_buff *skb = ri->skb;
3247                 int i, tx_bug = 0;
3248
3249                 if (unlikely(skb == NULL)) {
3250                         tg3_tx_recover(tp);
3251                         return;
3252                 }
3253
3254                 pci_unmap_single(tp->pdev,
3255                                  pci_unmap_addr(ri, mapping),
3256                                  skb_headlen(skb),
3257                                  PCI_DMA_TODEVICE);
3258
3259                 ri->skb = NULL;
3260
3261                 sw_idx = NEXT_TX(sw_idx);
3262
3263                 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
3264                         ri = &tp->tx_buffers[sw_idx];
3265                         if (unlikely(ri->skb != NULL || sw_idx == hw_idx))
3266                                 tx_bug = 1;
3267
3268                         pci_unmap_page(tp->pdev,
3269                                        pci_unmap_addr(ri, mapping),
3270                                        skb_shinfo(skb)->frags[i].size,
3271                                        PCI_DMA_TODEVICE);
3272
3273                         sw_idx = NEXT_TX(sw_idx);
3274                 }
3275
3276                 dev_kfree_skb(skb);
3277
3278                 if (unlikely(tx_bug)) {
3279                         tg3_tx_recover(tp);
3280                         return;
3281                 }
3282         }
3283
3284         tp->tx_cons = sw_idx;
3285
3286         /* Need to make the tx_cons update visible to tg3_start_xmit()
3287          * before checking for netif_queue_stopped().  Without the
3288          * memory barrier, there is a small possibility that tg3_start_xmit()
3289          * will miss it and cause the queue to be stopped forever.
3290          */
3291         smp_mb();
3292
3293         if (unlikely(netif_queue_stopped(tp->dev) &&
3294                      (tg3_tx_avail(tp) > TG3_TX_WAKEUP_THRESH(tp)))) {
3295                 netif_tx_lock(tp->dev);
3296                 if (netif_queue_stopped(tp->dev) &&
3297                     (tg3_tx_avail(tp) > TG3_TX_WAKEUP_THRESH(tp)))
3298                         netif_wake_queue(tp->dev);
3299                 netif_tx_unlock(tp->dev);
3300         }
3301 }
3302
3303 /* Returns size of skb allocated or < 0 on error.
3304  *
3305  * We only need to fill in the address because the other members
3306  * of the RX descriptor are invariant, see tg3_init_rings.
3307  *
3308  * Note the purposeful assymetry of cpu vs. chip accesses.  For
3309  * posting buffers we only dirty the first cache line of the RX
3310  * descriptor (containing the address).  Whereas for the RX status
3311  * buffers the cpu only reads the last cacheline of the RX descriptor
3312  * (to fetch the error flags, vlan tag, checksum, and opaque cookie).
3313  */
3314 static int tg3_alloc_rx_skb(struct tg3 *tp, u32 opaque_key,
3315                             int src_idx, u32 dest_idx_unmasked)
3316 {
3317         struct tg3_rx_buffer_desc *desc;
3318         struct ring_info *map, *src_map;
3319         struct sk_buff *skb;
3320         dma_addr_t mapping;
3321         int skb_size, dest_idx;
3322
3323         src_map = NULL;
3324         switch (opaque_key) {
3325         case RXD_OPAQUE_RING_STD:
3326                 dest_idx = dest_idx_unmasked % TG3_RX_RING_SIZE;
3327                 desc = &tp->rx_std[dest_idx];
3328                 map = &tp->rx_std_buffers[dest_idx];
3329                 if (src_idx >= 0)
3330                         src_map = &tp->rx_std_buffers[src_idx];
3331                 skb_size = tp->rx_pkt_buf_sz;
3332                 break;
3333
3334         case RXD_OPAQUE_RING_JUMBO:
3335                 dest_idx = dest_idx_unmasked % TG3_RX_JUMBO_RING_SIZE;
3336                 desc = &tp->rx_jumbo[dest_idx];
3337                 map = &tp->rx_jumbo_buffers[dest_idx];
3338                 if (src_idx >= 0)
3339                         src_map = &tp->rx_jumbo_buffers[src_idx];
3340                 skb_size = RX_JUMBO_PKT_BUF_SZ;
3341                 break;
3342
3343         default:
3344                 return -EINVAL;
3345         };
3346
3347         /* Do not overwrite any of the map or rp information
3348          * until we are sure we can commit to a new buffer.
3349          *
3350          * Callers depend upon this behavior and assume that
3351          * we leave everything unchanged if we fail.
3352          */
3353         skb = netdev_alloc_skb(tp->dev, skb_size);
3354         if (skb == NULL)
3355                 return -ENOMEM;
3356
3357         skb_reserve(skb, tp->rx_offset);
3358
3359         mapping = pci_map_single(tp->pdev, skb->data,
3360                                  skb_size - tp->rx_offset,
3361                                  PCI_DMA_FROMDEVICE);
3362
3363         map->skb = skb;
3364         pci_unmap_addr_set(map, mapping, mapping);
3365
3366         if (src_map != NULL)
3367                 src_map->skb = NULL;
3368
3369         desc->addr_hi = ((u64)mapping >> 32);
3370         desc->addr_lo = ((u64)mapping & 0xffffffff);
3371
3372         return skb_size;
3373 }
3374
3375 /* We only need to move over in the address because the other
3376  * members of the RX descriptor are invariant.  See notes above
3377  * tg3_alloc_rx_skb for full details.
3378  */
3379 static void tg3_recycle_rx(struct tg3 *tp, u32 opaque_key,
3380                            int src_idx, u32 dest_idx_unmasked)
3381 {
3382         struct tg3_rx_buffer_desc *src_desc, *dest_desc;
3383         struct ring_info *src_map, *dest_map;
3384         int dest_idx;
3385
3386         switch (opaque_key) {
3387         case RXD_OPAQUE_RING_STD:
3388                 dest_idx = dest_idx_unmasked % TG3_RX_RING_SIZE;
3389                 dest_desc = &tp->rx_std[dest_idx];
3390                 dest_map = &tp->rx_std_buffers[dest_idx];
3391                 src_desc = &tp->rx_std[src_idx];
3392                 src_map = &tp->rx_std_buffers[src_idx];
3393                 break;
3394
3395         case RXD_OPAQUE_RING_JUMBO:
3396                 dest_idx = dest_idx_unmasked % TG3_RX_JUMBO_RING_SIZE;
3397                 dest_desc = &tp->rx_jumbo[dest_idx];
3398                 dest_map = &tp->rx_jumbo_buffers[dest_idx];
3399                 src_desc = &tp->rx_jumbo[src_idx];
3400                 src_map = &tp->rx_jumbo_buffers[src_idx];
3401                 break;
3402
3403         default:
3404                 return;
3405         };
3406
3407         dest_map->skb = src_map->skb;
3408         pci_unmap_addr_set(dest_map, mapping,
3409                            pci_unmap_addr(src_map, mapping));
3410         dest_desc->addr_hi = src_desc->addr_hi;
3411         dest_desc->addr_lo = src_desc->addr_lo;
3412
3413         src_map->skb = NULL;
3414 }
3415
3416 #if TG3_VLAN_TAG_USED
3417 static int tg3_vlan_rx(struct tg3 *tp, struct sk_buff *skb, u16 vlan_tag)
3418 {
3419         return vlan_hwaccel_receive_skb(skb, tp->vlgrp, vlan_tag);
3420 }
3421 #endif
3422
3423 /* The RX ring scheme is composed of multiple rings which post fresh
3424  * buffers to the chip, and one special ring the chip uses to report
3425  * status back to the host.
3426  *
3427  * The special ring reports the status of received packets to the
3428  * host.  The chip does not write into the original descriptor the
3429  * RX buffer was obtained from.  The chip simply takes the original
3430  * descriptor as provided by the host, updates the status and length
3431  * field, then writes this into the next status ring entry.
3432  *
3433  * Each ring the host uses to post buffers to the chip is described
3434  * by a TG3_BDINFO entry in the chips SRAM area.  When a packet arrives,
3435  * it is first placed into the on-chip ram.  When the packet's length
3436  * is known, it walks down the TG3_BDINFO entries to select the ring.
3437  * Each TG3_BDINFO specifies a MAXLEN field and the first TG3_BDINFO
3438  * which is within the range of the new packet's length is chosen.
3439  *
3440  * The "separate ring for rx status" scheme may sound queer, but it makes
3441  * sense from a cache coherency perspective.  If only the host writes
3442  * to the buffer post rings, and only the chip writes to the rx status
3443  * rings, then cache lines never move beyond shared-modified state.
3444  * If both the host and chip were to write into the same ring, cache line
3445  * eviction could occur since both entities want it in an exclusive state.
3446  */
3447 static int tg3_rx(struct tg3 *tp, int budget)
3448 {
3449         u32 work_mask, rx_std_posted = 0;
3450         u32 sw_idx = tp->rx_rcb_ptr;
3451         u16 hw_idx;
3452         int received;
3453
3454         hw_idx = tp->hw_status->idx[0].rx_producer;
3455         /*
3456          * We need to order the read of hw_idx and the read of
3457          * the opaque cookie.
3458          */
3459         rmb();
3460         work_mask = 0;
3461         received = 0;
3462         while (sw_idx != hw_idx && budget > 0) {
3463                 struct tg3_rx_buffer_desc *desc = &tp->rx_rcb[sw_idx];
3464                 unsigned int len;
3465                 struct sk_buff *skb;
3466                 dma_addr_t dma_addr;
3467                 u32 opaque_key, desc_idx, *post_ptr;
3468
3469                 desc_idx = desc->opaque & RXD_OPAQUE_INDEX_MASK;
3470                 opaque_key = desc->opaque & RXD_OPAQUE_RING_MASK;
3471                 if (opaque_key == RXD_OPAQUE_RING_STD) {
3472                         dma_addr = pci_unmap_addr(&tp->rx_std_buffers[desc_idx],
3473                                                   mapping);
3474                         skb = tp->rx_std_buffers[desc_idx].skb;
3475                         post_ptr = &tp->rx_std_ptr;
3476                         rx_std_posted++;
3477                 } else if (opaque_key == RXD_OPAQUE_RING_JUMBO) {
3478                         dma_addr = pci_unmap_addr(&tp->rx_jumbo_buffers[desc_idx],
3479                                                   mapping);
3480                         skb = tp->rx_jumbo_buffers[desc_idx].skb;
3481                         post_ptr = &tp->rx_jumbo_ptr;
3482                 }
3483                 else {
3484                         goto next_pkt_nopost;
3485                 }
3486
3487                 work_mask |= opaque_key;
3488
3489                 if ((desc->err_vlan & RXD_ERR_MASK) != 0 &&
3490                     (desc->err_vlan != RXD_ERR_ODD_NIBBLE_RCVD_MII)) {
3491                 drop_it:
3492                         tg3_recycle_rx(tp, opaque_key,
3493                                        desc_idx, *post_ptr);
3494                 drop_it_no_recycle:
3495                         /* Other statistics kept track of by card. */
3496                         tp->net_stats.rx_dropped++;
3497                         goto next_pkt;
3498                 }
3499
3500                 len = ((desc->idx_len & RXD_LEN_MASK) >> RXD_LEN_SHIFT) - 4; /* omit crc */
3501
3502                 if (len > RX_COPY_THRESHOLD
3503                         && tp->rx_offset == 2
3504                         /* rx_offset != 2 iff this is a 5701 card running
3505                          * in PCI-X mode [see tg3_get_invariants()] */
3506                 ) {
3507                         int skb_size;
3508
3509                         skb_size = tg3_alloc_rx_skb(tp, opaque_key,
3510                                                     desc_idx, *post_ptr);
3511                         if (skb_size < 0)
3512                                 goto drop_it;
3513
3514                         pci_unmap_single(tp->pdev, dma_addr,
3515                                          skb_size - tp->rx_offset,
3516                                          PCI_DMA_FROMDEVICE);
3517
3518                         skb_put(skb, len);
3519                 } else {
3520                         struct sk_buff *copy_skb;
3521
3522                         tg3_recycle_rx(tp, opaque_key,
3523                                        desc_idx, *post_ptr);
3524
3525                         copy_skb = netdev_alloc_skb(tp->dev, len + 2);
3526                         if (copy_skb == NULL)
3527                                 goto drop_it_no_recycle;
3528
3529                         skb_reserve(copy_skb, 2);
3530                         skb_put(copy_skb, len);
3531                         pci_dma_sync_single_for_cpu(tp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE);
3532                         skb_copy_from_linear_data(skb, copy_skb->data, len);
3533                         pci_dma_sync_single_for_device(tp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE);
3534
3535                         /* We'll reuse the original ring buffer. */
3536                         skb = copy_skb;
3537                 }
3538
3539                 if ((tp->tg3_flags & TG3_FLAG_RX_CHECKSUMS) &&
3540                     (desc->type_flags & RXD_FLAG_TCPUDP_CSUM) &&
3541                     (((desc->ip_tcp_csum & RXD_TCPCSUM_MASK)
3542                       >> RXD_TCPCSUM_SHIFT) == 0xffff))
3543                         skb->ip_summed = CHECKSUM_UNNECESSARY;
3544                 else
3545                         skb->ip_summed = CHECKSUM_NONE;
3546
3547                 skb->protocol = eth_type_trans(skb, tp->dev);
3548 #if TG3_VLAN_TAG_USED
3549                 if (tp->vlgrp != NULL &&
3550                     desc->type_flags & RXD_FLAG_VLAN) {
3551                         tg3_vlan_rx(tp, skb,
3552                                     desc->err_vlan & RXD_VLAN_MASK);
3553                 } else
3554 #endif
3555                         netif_receive_skb(skb);
3556
3557                 tp->dev->last_rx = jiffies;
3558                 received++;
3559                 budget--;
3560
3561 next_pkt:
3562                 (*post_ptr)++;
3563
3564                 if (unlikely(rx_std_posted >= tp->rx_std_max_post)) {
3565                         u32 idx = *post_ptr % TG3_RX_RING_SIZE;
3566
3567                         tw32_rx_mbox(MAILBOX_RCV_STD_PROD_IDX +
3568                                      TG3_64BIT_REG_LOW, idx);
3569                         work_mask &= ~RXD_OPAQUE_RING_STD;
3570                         rx_std_posted = 0;
3571                 }
3572 next_pkt_nopost:
3573                 sw_idx++;
3574                 sw_idx &= (TG3_RX_RCB_RING_SIZE(tp) - 1);
3575
3576                 /* Refresh hw_idx to see if there is new work */
3577                 if (sw_idx == hw_idx) {
3578                         hw_idx = tp->hw_status->idx[0].rx_producer;
3579                         rmb();
3580                 }
3581         }
3582
3583         /* ACK the status ring. */
3584         tp->rx_rcb_ptr = sw_idx;
3585         tw32_rx_mbox(MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW, sw_idx);
3586
3587         /* Refill RX ring(s). */
3588         if (work_mask & RXD_OPAQUE_RING_STD) {
3589                 sw_idx = tp->rx_std_ptr % TG3_RX_RING_SIZE;
3590                 tw32_rx_mbox(MAILBOX_RCV_STD_PROD_IDX + TG3_64BIT_REG_LOW,
3591                              sw_idx);
3592         }
3593         if (work_mask & RXD_OPAQUE_RING_JUMBO) {
3594                 sw_idx = tp->rx_jumbo_ptr % TG3_RX_JUMBO_RING_SIZE;
3595                 tw32_rx_mbox(MAILBOX_RCV_JUMBO_PROD_IDX + TG3_64BIT_REG_LOW,
3596                              sw_idx);
3597         }
3598         mmiowb();
3599
3600         return received;
3601 }
3602
3603 static int tg3_poll_work(struct tg3 *tp, int work_done, int budget)
3604 {
3605         struct tg3_hw_status *sblk = tp->hw_status;
3606
3607         /* handle link change and other phy events */
3608         if (!(tp->tg3_flags &
3609               (TG3_FLAG_USE_LINKCHG_REG |
3610                TG3_FLAG_POLL_SERDES))) {
3611                 if (sblk->status & SD_STATUS_LINK_CHG) {
3612                         sblk->status = SD_STATUS_UPDATED |
3613                                 (sblk->status & ~SD_STATUS_LINK_CHG);
3614                         spin_lock(&tp->lock);
3615                         tg3_setup_phy(tp, 0);
3616                         spin_unlock(&tp->lock);
3617                 }
3618         }
3619
3620         /* run TX completion thread */
3621         if (sblk->idx[0].tx_consumer != tp->tx_cons) {
3622                 tg3_tx(tp);
3623                 if (unlikely(tp->tg3_flags & TG3_FLAG_TX_RECOVERY_PENDING))
3624                         return work_done;
3625         }
3626
3627         /* run RX thread, within the bounds set by NAPI.
3628          * All RX "locking" is done by ensuring outside
3629          * code synchronizes with tg3->napi.poll()
3630          */
3631         if (sblk->idx[0].rx_producer != tp->rx_rcb_ptr)
3632                 work_done += tg3_rx(tp, budget - work_done);
3633
3634         return work_done;
3635 }
3636
3637 static int tg3_poll(struct napi_struct *napi, int budget)
3638 {
3639         struct tg3 *tp = container_of(napi, struct tg3, napi);
3640         int work_done = 0;
3641         struct tg3_hw_status *sblk = tp->hw_status;
3642
3643         while (1) {
3644                 work_done = tg3_poll_work(tp, work_done, budget);
3645
3646                 if (unlikely(tp->tg3_flags & TG3_FLAG_TX_RECOVERY_PENDING))
3647                         goto tx_recovery;
3648
3649                 if (unlikely(work_done >= budget))
3650                         break;
3651
3652                 if (tp->tg3_flags & TG3_FLAG_TAGGED_STATUS) {
3653                         /* tp->last_tag is used in tg3_restart_ints() below
3654                          * to tell the hw how much work has been processed,
3655                          * so we must read it before checking for more work.
3656                          */
3657                         tp->last_tag = sblk->status_tag;
3658                         rmb();
3659                 } else
3660                         sblk->status &= ~SD_STATUS_UPDATED;
3661
3662                 if (likely(!tg3_has_work(tp))) {
3663                         netif_rx_complete(tp->dev, napi);
3664                         tg3_restart_ints(tp);
3665                         break;
3666                 }
3667         }
3668
3669         return work_done;
3670
3671 tx_recovery:
3672         /* work_done is guaranteed to be less than budget. */
3673         netif_rx_complete(tp->dev, napi);
3674         schedule_work(&tp->reset_task);
3675         return work_done;
3676 }
3677
3678 static void tg3_irq_quiesce(struct tg3 *tp)
3679 {
3680         BUG_ON(tp->irq_sync);
3681
3682         tp->irq_sync = 1;
3683         smp_mb();
3684
3685         synchronize_irq(tp->pdev->irq);
3686 }
3687
3688 static inline int tg3_irq_sync(struct tg3 *tp)
3689 {
3690         return tp->irq_sync;
3691 }
3692
3693 /* Fully shutdown all tg3 driver activity elsewhere in the system.
3694  * If irq_sync is non-zero, then the IRQ handler must be synchronized
3695  * with as well.  Most of the time, this is not necessary except when
3696  * shutting down the device.
3697  */
3698 static inline void tg3_full_lock(struct tg3 *tp, int irq_sync)
3699 {
3700         spin_lock_bh(&tp->lock);
3701         if (irq_sync)
3702                 tg3_irq_quiesce(tp);
3703 }
3704
3705 static inline void tg3_full_unlock(struct tg3 *tp)
3706 {
3707         spin_unlock_bh(&tp->lock);
3708 }
3709
3710 /* One-shot MSI handler - Chip automatically disables interrupt
3711  * after sending MSI so driver doesn't have to do it.
3712  */
3713 static irqreturn_t tg3_msi_1shot(int irq, void *dev_id)
3714 {
3715         struct net_device *dev = dev_id;
3716         struct tg3 *tp = netdev_priv(dev);
3717
3718         prefetch(tp->hw_status);
3719         prefetch(&tp->rx_rcb[tp->rx_rcb_ptr]);
3720
3721         if (likely(!tg3_irq_sync(tp)))
3722                 netif_rx_schedule(dev, &tp->napi);
3723
3724         return IRQ_HANDLED;
3725 }
3726
3727 /* MSI ISR - No need to check for interrupt sharing and no need to
3728  * flush status block and interrupt mailbox. PCI ordering rules
3729  * guarantee that MSI will arrive after the status block.
3730  */
3731 static irqreturn_t tg3_msi(int irq, void *dev_id)
3732 {
3733         struct net_device *dev = dev_id;
3734         struct tg3 *tp = netdev_priv(dev);
3735
3736         prefetch(tp->hw_status);
3737         prefetch(&tp->rx_rcb[tp->rx_rcb_ptr]);
3738         /*
3739          * Writing any value to intr-mbox-0 clears PCI INTA# and
3740          * chip-internal interrupt pending events.
3741          * Writing non-zero to intr-mbox-0 additional tells the
3742          * NIC to stop sending us irqs, engaging "in-intr-handler"
3743          * event coalescing.
3744          */
3745         tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
3746         if (likely(!tg3_irq_sync(tp)))
3747                 netif_rx_schedule(dev, &tp->napi);
3748
3749         return IRQ_RETVAL(1);
3750 }
3751
3752 static irqreturn_t tg3_interrupt(int irq, void *dev_id)
3753 {
3754         struct net_device *dev = dev_id;
3755         struct tg3 *tp = netdev_priv(dev);
3756         struct tg3_hw_status *sblk = tp->hw_status;
3757         unsigned int handled = 1;
3758
3759         /* In INTx mode, it is possible for the interrupt to arrive at
3760          * the CPU before the status block posted prior to the interrupt.
3761          * Reading the PCI State register will confirm whether the
3762          * interrupt is ours and will flush the status block.
3763          */
3764         if (unlikely(!(sblk->status & SD_STATUS_UPDATED))) {
3765                 if ((tp->tg3_flags & TG3_FLAG_CHIP_RESETTING) ||
3766                     (tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
3767                         handled = 0;
3768                         goto out;
3769                 }
3770         }
3771
3772         /*
3773          * Writing any value to intr-mbox-0 clears PCI INTA# and
3774          * chip-internal interrupt pending events.
3775          * Writing non-zero to intr-mbox-0 additional tells the
3776          * NIC to stop sending us irqs, engaging "in-intr-handler"
3777          * event coalescing.
3778          *
3779          * Flush the mailbox to de-assert the IRQ immediately to prevent
3780          * spurious interrupts.  The flush impacts performance but
3781          * excessive spurious interrupts can be worse in some cases.
3782          */
3783         tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
3784         if (tg3_irq_sync(tp))
3785                 goto out;
3786         sblk->status &= ~SD_STATUS_UPDATED;
3787         if (likely(tg3_has_work(tp))) {
3788                 prefetch(&tp->rx_rcb[tp->rx_rcb_ptr]);
3789                 netif_rx_schedule(dev, &tp->napi);
3790         } else {
3791                 /* No work, shared interrupt perhaps?  re-enable
3792                  * interrupts, and flush that PCI write
3793                  */
3794                 tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
3795                                0x00000000);
3796         }
3797 out:
3798         return IRQ_RETVAL(handled);
3799 }
3800
3801 static irqreturn_t tg3_interrupt_tagged(int irq, void *dev_id)
3802 {
3803         struct net_device *dev = dev_id;
3804         struct tg3 *tp = netdev_priv(dev);
3805         struct tg3_hw_status *sblk = tp->hw_status;
3806         unsigned int handled = 1;
3807
3808         /* In INTx mode, it is possible for the interrupt to arrive at
3809          * the CPU before the status block posted prior to the interrupt.
3810          * Reading the PCI State register will confirm whether the
3811          * interrupt is ours and will flush the status block.
3812          */
3813         if (unlikely(sblk->status_tag == tp->last_tag)) {
3814                 if ((tp->tg3_flags & TG3_FLAG_CHIP_RESETTING) ||
3815                     (tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
3816                         handled = 0;
3817                         goto out;
3818                 }
3819         }
3820
3821         /*
3822          * writing any value to intr-mbox-0 clears PCI INTA# and
3823          * chip-internal interrupt pending events.
3824          * writing non-zero to intr-mbox-0 additional tells the
3825          * NIC to stop sending us irqs, engaging "in-intr-handler"
3826          * event coalescing.
3827          *
3828          * Flush the mailbox to de-assert the IRQ immediately to prevent
3829          * spurious interrupts.  The flush impacts performance but
3830          * excessive spurious interrupts can be worse in some cases.
3831          */
3832         tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
3833         if (tg3_irq_sync(tp))
3834                 goto out;
3835         if (netif_rx_schedule_prep(dev, &tp->napi)) {
3836                 prefetch(&tp->rx_rcb[tp->rx_rcb_ptr]);
3837                 /* Update last_tag to mark that this status has been
3838                  * seen. Because interrupt may be shared, we may be
3839                  * racing with tg3_poll(), so only update last_tag
3840                  * if tg3_poll() is not scheduled.
3841                  */
3842                 tp->last_tag = sblk->status_tag;
3843                 __netif_rx_schedule(dev, &tp->napi);
3844         }
3845 out:
3846         return IRQ_RETVAL(handled);
3847 }
3848
3849 /* ISR for interrupt test */
3850 static irqreturn_t tg3_test_isr(int irq, void *dev_id)
3851 {
3852         struct net_device *dev = dev_id;
3853         struct tg3 *tp = netdev_priv(dev);
3854         struct tg3_hw_status *sblk = tp->hw_status;
3855
3856         if ((sblk->status & SD_STATUS_UPDATED) ||
3857             !(tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
3858                 tg3_disable_ints(tp);
3859                 return IRQ_RETVAL(1);
3860         }
3861         return IRQ_RETVAL(0);
3862 }
3863
3864 static int tg3_init_hw(struct tg3 *, int);
3865 static int tg3_halt(struct tg3 *, int, int);
3866
3867 /* Restart hardware after configuration changes, self-test, etc.
3868  * Invoked with tp->lock held.
3869  */
3870 static int tg3_restart_hw(struct tg3 *tp, int reset_phy)
3871 {
3872         int err;
3873
3874         err = tg3_init_hw(tp, reset_phy);
3875         if (err) {
3876                 printk(KERN_ERR PFX "%s: Failed to re-initialize device, "
3877                        "aborting.\n", tp->dev->name);
3878                 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
3879                 tg3_full_unlock(tp);
3880                 del_timer_sync(&tp->timer);
3881                 tp->irq_sync = 0;
3882                 napi_enable(&tp->napi);
3883                 dev_close(tp->dev);
3884                 tg3_full_lock(tp, 0);
3885         }
3886         return err;
3887 }
3888
3889 #ifdef CONFIG_NET_POLL_CONTROLLER
3890 static void tg3_poll_controller(struct net_device *dev)
3891 {
3892         struct tg3 *tp = netdev_priv(dev);
3893
3894         tg3_interrupt(tp->pdev->irq, dev);
3895 }
3896 #endif
3897
3898 static void tg3_reset_task(struct work_struct *work)
3899 {
3900         struct tg3 *tp = container_of(work, struct tg3, reset_task);
3901         unsigned int restart_timer;
3902
3903         tg3_full_lock(tp, 0);
3904
3905         if (!netif_running(tp->dev)) {
3906                 tg3_full_unlock(tp);
3907                 return;
3908         }
3909
3910         tg3_full_unlock(tp);
3911
3912         tg3_netif_stop(tp);
3913
3914         tg3_full_lock(tp, 1);
3915
3916         restart_timer = tp->tg3_flags2 & TG3_FLG2_RESTART_TIMER;
3917         tp->tg3_flags2 &= ~TG3_FLG2_RESTART_TIMER;
3918
3919         if (tp->tg3_flags & TG3_FLAG_TX_RECOVERY_PENDING) {
3920                 tp->write32_tx_mbox = tg3_write32_tx_mbox;
3921                 tp->write32_rx_mbox = tg3_write_flush_reg32;
3922                 tp->tg3_flags |= TG3_FLAG_MBOX_WRITE_REORDER;
3923                 tp->tg3_flags &= ~TG3_FLAG_TX_RECOVERY_PENDING;
3924         }
3925
3926         tg3_halt(tp, RESET_KIND_SHUTDOWN, 0);
3927         if (tg3_init_hw(tp, 1))
3928                 goto out;
3929
3930         tg3_netif_start(tp);
3931
3932         if (restart_timer)
3933                 mod_timer(&tp->timer, jiffies + 1);
3934
3935 out:
3936         tg3_full_unlock(tp);
3937 }
3938
3939 static void tg3_dump_short_state(struct tg3 *tp)
3940 {
3941         printk(KERN_ERR PFX "DEBUG: MAC_TX_STATUS[%08x] MAC_RX_STATUS[%08x]\n",
3942                tr32(MAC_TX_STATUS), tr32(MAC_RX_STATUS));
3943         printk(KERN_ERR PFX "DEBUG: RDMAC_STATUS[%08x] WDMAC_STATUS[%08x]\n",
3944                tr32(RDMAC_STATUS), tr32(WDMAC_STATUS));
3945 }
3946
3947 static void tg3_tx_timeout(struct net_device *dev)
3948 {
3949         struct tg3 *tp = netdev_priv(dev);
3950
3951         if (netif_msg_tx_err(tp)) {
3952                 printk(KERN_ERR PFX "%s: transmit timed out, resetting\n",
3953                        dev->name);
3954                 tg3_dump_short_state(tp);
3955         }
3956
3957         schedule_work(&tp->reset_task);
3958 }
3959
3960 /* Test for DMA buffers crossing any 4GB boundaries: 4G, 8G, etc */
3961 static inline int tg3_4g_overflow_test(dma_addr_t mapping, int len)
3962 {
3963         u32 base = (u32) mapping & 0xffffffff;
3964
3965         return ((base > 0xffffdcc0) &&
3966                 (base + len + 8 < base));
3967 }
3968
3969 /* Test for DMA addresses > 40-bit */
3970 static inline int tg3_40bit_overflow_test(struct tg3 *tp, dma_addr_t mapping,
3971                                           int len)
3972 {
3973 #if defined(CONFIG_HIGHMEM) && (BITS_PER_LONG == 64)
3974         if (tp->tg3_flags & TG3_FLAG_40BIT_DMA_BUG)
3975                 return (((u64) mapping + len) > DMA_40BIT_MASK);
3976         return 0;
3977 #else
3978         return 0;
3979 #endif
3980 }
3981
3982 static void tg3_set_txd(struct tg3 *, int, dma_addr_t, int, u32, u32);
3983
3984 /* Workaround 4GB and 40-bit hardware DMA bugs. */
3985 static int tigon3_dma_hwbug_workaround(struct tg3 *tp, struct sk_buff *skb,
3986                                        u32 last_plus_one, u32 *start,
3987                                        u32 base_flags, u32 mss)
3988 {
3989         struct sk_buff *new_skb = skb_copy(skb, GFP_ATOMIC);
3990         dma_addr_t new_addr = 0;
3991         u32 entry = *start;
3992         int i, ret = 0;
3993
3994         if (!new_skb) {
3995                 ret = -1;
3996         } else {
3997                 /* New SKB is guaranteed to be linear. */
3998                 entry = *start;
3999                 new_addr = pci_map_single(tp->pdev, new_skb->data, new_skb->len,
4000                                           PCI_DMA_TODEVICE);
4001                 /* Make sure new skb does not cross any 4G boundaries.
4002                  * Drop the packet if it does.
4003                  */
4004                 if (tg3_4g_overflow_test(new_addr, new_skb->len)) {
4005                         ret = -1;
4006                         dev_kfree_skb(new_skb);
4007                         new_skb = NULL;
4008                 } else {
4009                         tg3_set_txd(tp, entry, new_addr, new_skb->len,
4010                                     base_flags, 1 | (mss << 1));
4011                         *start = NEXT_TX(entry);
4012                 }
4013         }
4014
4015         /* Now clean up the sw ring entries. */
4016         i = 0;
4017         while (entry != last_plus_one) {
4018                 int len;
4019
4020                 if (i == 0)
4021                         len = skb_headlen(skb);
4022                 else
4023                         len = skb_shinfo(skb)->frags[i-1].size;
4024                 pci_unmap_single(tp->pdev,
4025                                  pci_unmap_addr(&tp->tx_buffers[entry], mapping),
4026                                  len, PCI_DMA_TODEVICE);
4027                 if (i == 0) {
4028                         tp->tx_buffers[entry].skb = new_skb;
4029                         pci_unmap_addr_set(&tp->tx_buffers[entry], mapping, new_addr);
4030                 } else {
4031                         tp->tx_buffers[entry].skb = NULL;
4032                 }
4033                 entry = NEXT_TX(entry);
4034                 i++;
4035         }
4036
4037         dev_kfree_skb(skb);
4038
4039         return ret;
4040 }
4041
4042 static void tg3_set_txd(struct tg3 *tp, int entry,
4043                         dma_addr_t mapping, int len, u32 flags,
4044                         u32 mss_and_is_end)
4045 {
4046         struct tg3_tx_buffer_desc *txd = &tp->tx_ring[entry];
4047         int is_end = (mss_and_is_end & 0x1);
4048         u32 mss = (mss_and_is_end >> 1);
4049         u32 vlan_tag = 0;
4050
4051         if (is_end)
4052                 flags |= TXD_FLAG_END;
4053         if (flags & TXD_FLAG_VLAN) {
4054                 vlan_tag = flags >> 16;
4055                 flags &= 0xffff;
4056         }
4057         vlan_tag |= (mss << TXD_MSS_SHIFT);
4058
4059         txd->addr_hi = ((u64) mapping >> 32);
4060         txd->addr_lo = ((u64) mapping & 0xffffffff);
4061         txd->len_flags = (len << TXD_LEN_SHIFT) | flags;
4062         txd->vlan_tag = vlan_tag << TXD_VLAN_TAG_SHIFT;
4063 }
4064
4065 /* hard_start_xmit for devices that don't have any bugs and
4066  * support TG3_FLG2_HW_TSO_2 only.
4067  */
4068 static int tg3_start_xmit(struct sk_buff *skb, struct net_device *dev)
4069 {
4070         struct tg3 *tp = netdev_priv(dev);
4071         dma_addr_t mapping;
4072         u32 len, entry, base_flags, mss;
4073
4074         len = skb_headlen(skb);
4075
4076         /* We are running in BH disabled context with netif_tx_lock
4077          * and TX reclaim runs via tp->napi.poll inside of a software
4078          * interrupt.  Furthermore, IRQ processing runs lockless so we have
4079          * no IRQ context deadlocks to worry about either.  Rejoice!
4080          */
4081         if (unlikely(tg3_tx_avail(tp) <= (skb_shinfo(skb)->nr_frags + 1))) {
4082                 if (!netif_queue_stopped(dev)) {
4083                         netif_stop_queue(dev);
4084
4085                         /* This is a hard error, log it. */
4086                         printk(KERN_ERR PFX "%s: BUG! Tx Ring full when "
4087                                "queue awake!\n", dev->name);
4088                 }
4089                 return NETDEV_TX_BUSY;
4090         }
4091
4092         entry = tp->tx_prod;
4093         base_flags = 0;
4094         mss = 0;
4095         if ((mss = skb_shinfo(skb)->gso_size) != 0) {
4096                 int tcp_opt_len, ip_tcp_len;
4097
4098                 if (skb_header_cloned(skb) &&
4099                     pskb_expand_head(skb, 0, 0, GFP_ATOMIC)) {
4100                         dev_kfree_skb(skb);
4101                         goto out_unlock;
4102                 }
4103
4104                 if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6)
4105                         mss |= (skb_headlen(skb) - ETH_HLEN) << 9;
4106                 else {
4107                         struct iphdr *iph = ip_hdr(skb);
4108
4109                         tcp_opt_len = tcp_optlen(skb);
4110                         ip_tcp_len = ip_hdrlen(skb) + sizeof(struct tcphdr);
4111
4112                         iph->check = 0;
4113                         iph->tot_len = htons(mss + ip_tcp_len + tcp_opt_len);
4114                         mss |= (ip_tcp_len + tcp_opt_len) << 9;
4115                 }
4116
4117                 base_flags |= (TXD_FLAG_CPU_PRE_DMA |
4118                                TXD_FLAG_CPU_POST_DMA);
4119
4120                 tcp_hdr(skb)->check = 0;
4121
4122         }
4123         else if (skb->ip_summed == CHECKSUM_PARTIAL)
4124                 base_flags |= TXD_FLAG_TCPUDP_CSUM;
4125 #if TG3_VLAN_TAG_USED
4126         if (tp->vlgrp != NULL && vlan_tx_tag_present(skb))
4127                 base_flags |= (TXD_FLAG_VLAN |
4128                                (vlan_tx_tag_get(skb) << 16));
4129 #endif
4130
4131         /* Queue skb data, a.k.a. the main skb fragment. */
4132         mapping = pci_map_single(tp->pdev, skb->data, len, PCI_DMA_TODEVICE);
4133
4134         tp->tx_buffers[entry].skb = skb;
4135         pci_unmap_addr_set(&tp->tx_buffers[entry], mapping, mapping);
4136
4137         tg3_set_txd(tp, entry, mapping, len, base_flags,
4138                     (skb_shinfo(skb)->nr_frags == 0) | (mss << 1));
4139
4140         entry = NEXT_TX(entry);
4141
4142         /* Now loop through additional data fragments, and queue them. */
4143         if (skb_shinfo(skb)->nr_frags > 0) {
4144                 unsigned int i, last;
4145
4146                 last = skb_shinfo(skb)->nr_frags - 1;
4147                 for (i = 0; i <= last; i++) {
4148                         skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
4149
4150                         len = frag->size;
4151                         mapping = pci_map_page(tp->pdev,
4152                                                frag->page,
4153                                                frag->page_offset,
4154                                                len, PCI_DMA_TODEVICE);
4155
4156                         tp->tx_buffers[entry].skb = NULL;
4157                         pci_unmap_addr_set(&tp->tx_buffers[entry], mapping, mapping);
4158
4159                         tg3_set_txd(tp, entry, mapping, len,
4160                                     base_flags, (i == last) | (mss << 1));
4161
4162                         entry = NEXT_TX(entry);
4163                 }
4164         }
4165
4166         /* Packets are ready, update Tx producer idx local and on card. */
4167         tw32_tx_mbox((MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW), entry);
4168
4169         tp->tx_prod = entry;
4170         if (unlikely(tg3_tx_avail(tp) <= (MAX_SKB_FRAGS + 1))) {
4171                 netif_stop_queue(dev);
4172                 if (tg3_tx_avail(tp) > TG3_TX_WAKEUP_THRESH(tp))
4173                         netif_wake_queue(tp->dev);
4174         }
4175
4176 out_unlock:
4177         mmiowb();
4178
4179         dev->trans_start = jiffies;
4180
4181         return NETDEV_TX_OK;
4182 }
4183
4184 static int tg3_start_xmit_dma_bug(struct sk_buff *, struct net_device *);
4185
4186 /* Use GSO to workaround a rare TSO bug that may be triggered when the
4187  * TSO header is greater than 80 bytes.
4188  */
4189 static int tg3_tso_bug(struct tg3 *tp, struct sk_buff *skb)
4190 {
4191         struct sk_buff *segs, *nskb;
4192
4193         /* Estimate the number of fragments in the worst case */
4194         if (unlikely(tg3_tx_avail(tp) <= (skb_shinfo(skb)->gso_segs * 3))) {
4195                 netif_stop_queue(tp->dev);
4196                 if (tg3_tx_avail(tp) <= (skb_shinfo(skb)->gso_segs * 3))
4197                         return NETDEV_TX_BUSY;
4198
4199                 netif_wake_queue(tp->dev);
4200         }
4201
4202         segs = skb_gso_segment(skb, tp->dev->features & ~NETIF_F_TSO);
4203         if (unlikely(IS_ERR(segs)))
4204                 goto tg3_tso_bug_end;
4205
4206         do {
4207                 nskb = segs;
4208                 segs = segs->next;
4209                 nskb->next = NULL;
4210                 tg3_start_xmit_dma_bug(nskb, tp->dev);
4211         } while (segs);
4212
4213 tg3_tso_bug_end:
4214         dev_kfree_skb(skb);
4215
4216         return NETDEV_TX_OK;
4217 }
4218
4219 /* hard_start_xmit for devices that have the 4G bug and/or 40-bit bug and
4220  * support TG3_FLG2_HW_TSO_1 or firmware TSO only.
4221  */
4222 static int tg3_start_xmit_dma_bug(struct sk_buff *skb, struct net_device *dev)
4223 {
4224         struct tg3 *tp = netdev_priv(dev);
4225         dma_addr_t mapping;
4226         u32 len, entry, base_flags, mss;
4227         int would_hit_hwbug;
4228
4229         len = skb_headlen(skb);
4230
4231         /* We are running in BH disabled context with netif_tx_lock
4232          * and TX reclaim runs via tp->napi.poll inside of a software
4233          * interrupt.  Furthermore, IRQ processing runs lockless so we have
4234          * no IRQ context deadlocks to worry about either.  Rejoice!
4235          */
4236         if (unlikely(tg3_tx_avail(tp) <= (skb_shinfo(skb)->nr_frags + 1))) {
4237                 if (!netif_queue_stopped(dev)) {
4238                         netif_stop_queue(dev);
4239
4240                         /* This is a hard error, log it. */
4241                         printk(KERN_ERR PFX "%s: BUG! Tx Ring full when "
4242                                "queue awake!\n", dev->name);
4243                 }
4244                 return NETDEV_TX_BUSY;
4245         }
4246
4247         entry = tp->tx_prod;
4248         base_flags = 0;
4249         if (skb->ip_summed == CHECKSUM_PARTIAL)
4250                 base_flags |= TXD_FLAG_TCPUDP_CSUM;
4251         mss = 0;
4252         if ((mss = skb_shinfo(skb)->gso_size) != 0) {
4253                 struct iphdr *iph;
4254                 int tcp_opt_len, ip_tcp_len, hdr_len;
4255
4256                 if (skb_header_cloned(skb) &&
4257                     pskb_expand_head(skb, 0, 0, GFP_ATOMIC)) {
4258                         dev_kfree_skb(skb);
4259                         goto out_unlock;
4260                 }
4261
4262                 tcp_opt_len = tcp_optlen(skb);
4263                 ip_tcp_len = ip_hdrlen(skb) + sizeof(struct tcphdr);
4264
4265                 hdr_len = ip_tcp_len + tcp_opt_len;
4266                 if (unlikely((ETH_HLEN + hdr_len) > 80) &&
4267                              (tp->tg3_flags2 & TG3_FLG2_TSO_BUG))
4268                         return (tg3_tso_bug(tp, skb));
4269
4270                 base_flags |= (TXD_FLAG_CPU_PRE_DMA |
4271                                TXD_FLAG_CPU_POST_DMA);
4272
4273                 iph = ip_hdr(skb);
4274                 iph->check = 0;
4275                 iph->tot_len = htons(mss + hdr_len);
4276                 if (tp->tg3_flags2 & TG3_FLG2_HW_TSO) {
4277                         tcp_hdr(skb)->check = 0;
4278                         base_flags &= ~TXD_FLAG_TCPUDP_CSUM;
4279                 } else
4280                         tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr,
4281                                                                  iph->daddr, 0,
4282                                                                  IPPROTO_TCP,
4283                                                                  0);
4284
4285                 if ((tp->tg3_flags2 & TG3_FLG2_HW_TSO) ||
4286                     (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705)) {
4287                         if (tcp_opt_len || iph->ihl > 5) {
4288                                 int tsflags;
4289
4290                                 tsflags = (iph->ihl - 5) + (tcp_opt_len >> 2);
4291                                 mss |= (tsflags << 11);
4292                         }
4293                 } else {
4294                         if (tcp_opt_len || iph->ihl > 5) {
4295                                 int tsflags;
4296
4297                                 tsflags = (iph->ihl - 5) + (tcp_opt_len >> 2);
4298                                 base_flags |= tsflags << 12;
4299                         }
4300                 }
4301         }
4302 #if TG3_VLAN_TAG_USED
4303         if (tp->vlgrp != NULL && vlan_tx_tag_present(skb))
4304                 base_flags |= (TXD_FLAG_VLAN |
4305                                (vlan_tx_tag_get(skb) << 16));
4306 #endif
4307
4308         /* Queue skb data, a.k.a. the main skb fragment. */
4309         mapping = pci_map_single(tp->pdev, skb->data, len, PCI_DMA_TODEVICE);
4310
4311         tp->tx_buffers[entry].skb = skb;
4312         pci_unmap_addr_set(&tp->tx_buffers[entry], mapping, mapping);
4313
4314         would_hit_hwbug = 0;
4315
4316         if (tg3_4g_overflow_test(mapping, len))
4317                 would_hit_hwbug = 1;
4318
4319         tg3_set_txd(tp, entry, mapping, len, base_flags,
4320                     (skb_shinfo(skb)->nr_frags == 0) | (mss << 1));
4321
4322         entry = NEXT_TX(entry);
4323
4324         /* Now loop through additional data fragments, and queue them. */
4325         if (skb_shinfo(skb)->nr_frags > 0) {
4326                 unsigned int i, last;
4327
4328                 last = skb_shinfo(skb)->nr_frags - 1;
4329                 for (i = 0; i <= last; i++) {
4330                         skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
4331
4332                         len = frag->size;
4333                         mapping = pci_map_page(tp->pdev,
4334                                                frag->page,
4335                                                frag->page_offset,
4336                                                len, PCI_DMA_TODEVICE);
4337
4338                         tp->tx_buffers[entry].skb = NULL;
4339                         pci_unmap_addr_set(&tp->tx_buffers[entry], mapping, mapping);
4340
4341                         if (tg3_4g_overflow_test(mapping, len))
4342                                 would_hit_hwbug = 1;
4343
4344                         if (tg3_40bit_overflow_test(tp, mapping, len))
4345                                 would_hit_hwbug = 1;
4346
4347                         if (tp->tg3_flags2 & TG3_FLG2_HW_TSO)
4348                                 tg3_set_txd(tp, entry, mapping, len,
4349                                             base_flags, (i == last)|(mss << 1));
4350                         else
4351                                 tg3_set_txd(tp, entry, mapping, len,
4352                                             base_flags, (i == last));
4353
4354                         entry = NEXT_TX(entry);
4355                 }
4356         }
4357
4358         if (would_hit_hwbug) {
4359                 u32 last_plus_one = entry;
4360                 u32 start;
4361
4362                 start = entry - 1 - skb_shinfo(skb)->nr_frags;
4363                 start &= (TG3_TX_RING_SIZE - 1);
4364
4365                 /* If the workaround fails due to memory/mapping
4366                  * failure, silently drop this packet.
4367                  */
4368                 if (tigon3_dma_hwbug_workaround(tp, skb, last_plus_one,
4369                                                 &start, base_flags, mss))
4370                         goto out_unlock;
4371
4372                 entry = start;
4373         }
4374
4375         /* Packets are ready, update Tx producer idx local and on card. */
4376         tw32_tx_mbox((MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW), entry);
4377
4378         tp->tx_prod = entry;
4379         if (unlikely(tg3_tx_avail(tp) <= (MAX_SKB_FRAGS + 1))) {
4380                 netif_stop_queue(dev);
4381                 if (tg3_tx_avail(tp) > TG3_TX_WAKEUP_THRESH(tp))
4382                         netif_wake_queue(tp->dev);
4383         }
4384
4385 out_unlock:
4386         mmiowb();
4387
4388         dev->trans_start = jiffies;
4389
4390         return NETDEV_TX_OK;
4391 }
4392
4393 static inline void tg3_set_mtu(struct net_device *dev, struct tg3 *tp,
4394                                int new_mtu)
4395 {
4396         dev->mtu = new_mtu;
4397
4398         if (new_mtu > ETH_DATA_LEN) {
4399                 if (tp->tg3_flags2 & TG3_FLG2_5780_CLASS) {
4400                         tp->tg3_flags2 &= ~TG3_FLG2_TSO_CAPABLE;
4401                         ethtool_op_set_tso(dev, 0);
4402                 }
4403                 else
4404                         tp->tg3_flags |= TG3_FLAG_JUMBO_RING_ENABLE;
4405         } else {
4406                 if (tp->tg3_flags2 & TG3_FLG2_5780_CLASS)
4407                         tp->tg3_flags2 |= TG3_FLG2_TSO_CAPABLE;
4408                 tp->tg3_flags &= ~TG3_FLAG_JUMBO_RING_ENABLE;
4409         }
4410 }
4411
4412 static int tg3_change_mtu(struct net_device *dev, int new_mtu)
4413 {
4414         struct tg3 *tp = netdev_priv(dev);
4415         int err;
4416
4417         if (new_mtu < TG3_MIN_MTU || new_mtu > TG3_MAX_MTU(tp))
4418                 return -EINVAL;
4419
4420         if (!netif_running(dev)) {
4421                 /* We'll just catch it later when the
4422                  * device is up'd.
4423                  */
4424                 tg3_set_mtu(dev, tp, new_mtu);
4425                 return 0;
4426         }
4427
4428         tg3_netif_stop(tp);
4429
4430         tg3_full_lock(tp, 1);
4431
4432         tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
4433
4434         tg3_set_mtu(dev, tp, new_mtu);
4435
4436         err = tg3_restart_hw(tp, 0);
4437
4438         if (!err)
4439                 tg3_netif_start(tp);
4440
4441         tg3_full_unlock(tp);
4442
4443         return err;
4444 }
4445
4446 /* Free up pending packets in all rx/tx rings.
4447  *
4448  * The chip has been shut down and the driver detached from
4449  * the networking, so no interrupts or new tx packets will
4450  * end up in the driver.  tp->{tx,}lock is not held and we are not
4451  * in an interrupt context and thus may sleep.
4452  */
4453 static void tg3_free_rings(struct tg3 *tp)
4454 {
4455         struct ring_info *rxp;
4456         int i;
4457
4458         for (i = 0; i < TG3_RX_RING_SIZE; i++) {
4459                 rxp = &tp->rx_std_buffers[i];
4460
4461                 if (rxp->skb == NULL)
4462                         continue;
4463                 pci_unmap_single(tp->pdev,
4464                                  pci_unmap_addr(rxp, mapping),
4465                                  tp->rx_pkt_buf_sz - tp->rx_offset,
4466                                  PCI_DMA_FROMDEVICE);
4467                 dev_kfree_skb_any(rxp->skb);
4468                 rxp->skb = NULL;
4469         }
4470
4471         for (i = 0; i < TG3_RX_JUMBO_RING_SIZE; i++) {
4472                 rxp = &tp->rx_jumbo_buffers[i];
4473
4474                 if (rxp->skb == NULL)
4475                         continue;
4476                 pci_unmap_single(tp->pdev,
4477                                  pci_unmap_addr(rxp, mapping),
4478                                  RX_JUMBO_PKT_BUF_SZ - tp->rx_offset,
4479                                  PCI_DMA_FROMDEVICE);
4480                 dev_kfree_skb_any(rxp->skb);
4481                 rxp->skb = NULL;
4482         }
4483
4484         for (i = 0; i < TG3_TX_RING_SIZE; ) {
4485                 struct tx_ring_info *txp;
4486                 struct sk_buff *skb;
4487                 int j;
4488
4489                 txp = &tp->tx_buffers[i];
4490                 skb = txp->skb;
4491
4492                 if (skb == NULL) {
4493                         i++;
4494                         continue;
4495                 }
4496
4497                 pci_unmap_single(tp->pdev,
4498                                  pci_unmap_addr(txp, mapping),
4499                                  skb_headlen(skb),
4500                                  PCI_DMA_TODEVICE);
4501                 txp->skb = NULL;
4502
4503                 i++;
4504
4505                 for (j = 0; j < skb_shinfo(skb)->nr_frags; j++) {
4506                         txp = &tp->tx_buffers[i & (TG3_TX_RING_SIZE - 1)];
4507                         pci_unmap_page(tp->pdev,
4508                                        pci_unmap_addr(txp, mapping),
4509                                        skb_shinfo(skb)->frags[j].size,
4510                                        PCI_DMA_TODEVICE);
4511                         i++;
4512                 }
4513
4514                 dev_kfree_skb_any(skb);
4515         }
4516 }
4517
4518 /* Initialize tx/rx rings for packet processing.
4519  *
4520  * The chip has been shut down and the driver detached from
4521  * the networking, so no interrupts or new tx packets will
4522  * end up in the driver.  tp->{tx,}lock are held and thus
4523  * we may not sleep.
4524  */
4525 static int tg3_init_rings(struct tg3 *tp)
4526 {
4527         u32 i;
4528
4529         /* Free up all the SKBs. */
4530         tg3_free_rings(tp);
4531
4532         /* Zero out all descriptors. */
4533         memset(tp->rx_std, 0, TG3_RX_RING_BYTES);
4534         memset(tp->rx_jumbo, 0, TG3_RX_JUMBO_RING_BYTES);
4535         memset(tp->rx_rcb, 0, TG3_RX_RCB_RING_BYTES(tp));
4536         memset(tp->tx_ring, 0, TG3_TX_RING_BYTES);
4537
4538         tp->rx_pkt_buf_sz = RX_PKT_BUF_SZ;
4539         if ((tp->tg3_flags2 & TG3_FLG2_5780_CLASS) &&
4540             (tp->dev->mtu > ETH_DATA_LEN))
4541                 tp->rx_pkt_buf_sz = RX_JUMBO_PKT_BUF_SZ;
4542
4543         /* Initialize invariants of the rings, we only set this
4544          * stuff once.  This works because the card does not
4545          * write into the rx buffer posting rings.
4546          */
4547         for (i = 0; i < TG3_RX_RING_SIZE; i++) {
4548                 struct tg3_rx_buffer_desc *rxd;
4549
4550                 rxd = &tp->rx_std[i];
4551                 rxd->idx_len = (tp->rx_pkt_buf_sz - tp->rx_offset - 64)
4552                         << RXD_LEN_SHIFT;
4553                 rxd->type_flags = (RXD_FLAG_END << RXD_FLAGS_SHIFT);
4554                 rxd->opaque = (RXD_OPAQUE_RING_STD |
4555                                (i << RXD_OPAQUE_INDEX_SHIFT));
4556         }
4557
4558         if (tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE) {
4559                 for (i = 0; i < TG3_RX_JUMBO_RING_SIZE; i++) {
4560                         struct tg3_rx_buffer_desc *rxd;
4561
4562                         rxd = &tp->rx_jumbo[i];
4563                         rxd->idx_len = (RX_JUMBO_PKT_BUF_SZ - tp->rx_offset - 64)
4564                                 << RXD_LEN_SHIFT;
4565                         rxd->type_flags = (RXD_FLAG_END << RXD_FLAGS_SHIFT) |
4566                                 RXD_FLAG_JUMBO;
4567                         rxd->opaque = (RXD_OPAQUE_RING_JUMBO |
4568                                (i << RXD_OPAQUE_INDEX_SHIFT));
4569                 }
4570         }
4571
4572         /* Now allocate fresh SKBs for each rx ring. */
4573         for (i = 0; i < tp->rx_pending; i++) {
4574                 if (tg3_alloc_rx_skb(tp, RXD_OPAQUE_RING_STD, -1, i) < 0) {
4575                         printk(KERN_WARNING PFX
4576                                "%s: Using a smaller RX standard ring, "
4577                                "only %d out of %d buffers were allocated "
4578                                "successfully.\n",
4579                                tp->dev->name, i, tp->rx_pending);
4580                         if (i == 0)
4581                                 return -ENOMEM;
4582                         tp->rx_pending = i;
4583                         break;
4584                 }
4585         }
4586
4587         if (tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE) {
4588                 for (i = 0; i < tp->rx_jumbo_pending; i++) {
4589                         if (tg3_alloc_rx_skb(tp, RXD_OPAQUE_RING_JUMBO,
4590                                              -1, i) < 0) {
4591                                 printk(KERN_WARNING PFX
4592                                        "%s: Using a smaller RX jumbo ring, "
4593                                        "only %d out of %d buffers were "
4594                                        "allocated successfully.\n",
4595                                        tp->dev->name, i, tp->rx_jumbo_pending);
4596                                 if (i == 0) {
4597                                         tg3_free_rings(tp);
4598                                         return -ENOMEM;
4599                                 }
4600                                 tp->rx_jumbo_pending = i;
4601                                 break;
4602                         }
4603                 }
4604         }
4605         return 0;
4606 }
4607
4608 /*
4609  * Must not be invoked with interrupt sources disabled and
4610  * the hardware shutdown down.
4611  */
4612 static void tg3_free_consistent(struct tg3 *tp)
4613 {
4614         kfree(tp->rx_std_buffers);
4615         tp->rx_std_buffers = NULL;
4616         if (tp->rx_std) {
4617                 pci_free_consistent(tp->pdev, TG3_RX_RING_BYTES,
4618                                     tp->rx_std, tp->rx_std_mapping);
4619                 tp->rx_std = NULL;
4620         }
4621         if (tp->rx_jumbo) {
4622                 pci_free_consistent(tp->pdev, TG3_RX_JUMBO_RING_BYTES,
4623                                     tp->rx_jumbo, tp->rx_jumbo_mapping);
4624                 tp->rx_jumbo = NULL;
4625         }
4626         if (tp->rx_rcb) {
4627                 pci_free_consistent(tp->pdev, TG3_RX_RCB_RING_BYTES(tp),
4628                                     tp->rx_rcb, tp->rx_rcb_mapping);
4629                 tp->rx_rcb = NULL;
4630         }
4631         if (tp->tx_ring) {
4632                 pci_free_consistent(tp->pdev, TG3_TX_RING_BYTES,
4633                         tp->tx_ring, tp->tx_desc_mapping);
4634                 tp->tx_ring = NULL;
4635         }
4636         if (tp->hw_status) {
4637                 pci_free_consistent(tp->pdev, TG3_HW_STATUS_SIZE,
4638                                     tp->hw_status, tp->status_mapping);
4639                 tp->hw_status = NULL;
4640         }
4641         if (tp->hw_stats) {
4642                 pci_free_consistent(tp->pdev, sizeof(struct tg3_hw_stats),
4643                                     tp->hw_stats, tp->stats_mapping);
4644                 tp->hw_stats = NULL;
4645         }
4646 }
4647
4648 /*
4649  * Must not be invoked with interrupt sources disabled and
4650  * the hardware shutdown down.  Can sleep.
4651  */
4652 static int tg3_alloc_consistent(struct tg3 *tp)
4653 {
4654         tp->rx_std_buffers = kzalloc((sizeof(struct ring_info) *
4655                                       (TG3_RX_RING_SIZE +
4656                                        TG3_RX_JUMBO_RING_SIZE)) +
4657                                      (sizeof(struct tx_ring_info) *
4658                                       TG3_TX_RING_SIZE),
4659                                      GFP_KERNEL);
4660         if (!tp->rx_std_buffers)
4661                 return -ENOMEM;
4662
4663         tp->rx_jumbo_buffers = &tp->rx_std_buffers[TG3_RX_RING_SIZE];
4664         tp->tx_buffers = (struct tx_ring_info *)
4665                 &tp->rx_jumbo_buffers[TG3_RX_JUMBO_RING_SIZE];
4666
4667         tp->rx_std = pci_alloc_consistent(tp->pdev, TG3_RX_RING_BYTES,
4668                                           &tp->rx_std_mapping);
4669         if (!tp->rx_std)
4670                 goto err_out;
4671
4672         tp->rx_jumbo = pci_alloc_consistent(tp->pdev, TG3_RX_JUMBO_RING_BYTES,
4673                                             &tp->rx_jumbo_mapping);
4674
4675         if (!tp->rx_jumbo)
4676                 goto err_out;
4677
4678         tp->rx_rcb = pci_alloc_consistent(tp->pdev, TG3_RX_RCB_RING_BYTES(tp),
4679                                           &tp->rx_rcb_mapping);
4680         if (!tp->rx_rcb)
4681                 goto err_out;
4682
4683         tp->tx_ring = pci_alloc_consistent(tp->pdev, TG3_TX_RING_BYTES,
4684                                            &tp->tx_desc_mapping);
4685         if (!tp->tx_ring)
4686                 goto err_out;
4687
4688         tp->hw_status = pci_alloc_consistent(tp->pdev,
4689                                              TG3_HW_STATUS_SIZE,
4690                                              &tp->status_mapping);
4691         if (!tp->hw_status)
4692                 goto err_out;
4693
4694         tp->hw_stats = pci_alloc_consistent(tp->pdev,
4695                                             sizeof(struct tg3_hw_stats),
4696                                             &tp->stats_mapping);
4697         if (!tp->hw_stats)
4698                 goto err_out;
4699
4700         memset(tp->hw_status, 0, TG3_HW_STATUS_SIZE);
4701         memset(tp->hw_stats, 0, sizeof(struct tg3_hw_stats));
4702
4703         return 0;
4704
4705 err_out:
4706         tg3_free_consistent(tp);
4707         return -ENOMEM;
4708 }
4709
4710 #define MAX_WAIT_CNT 1000
4711
4712 /* To stop a block, clear the enable bit and poll till it
4713  * clears.  tp->lock is held.
4714  */
4715 static int tg3_stop_block(struct tg3 *tp, unsigned long ofs, u32 enable_bit, int silent)
4716 {
4717         unsigned int i;
4718         u32 val;
4719
4720         if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
4721                 switch (ofs) {
4722                 case RCVLSC_MODE:
4723                 case DMAC_MODE:
4724                 case MBFREE_MODE:
4725                 case BUFMGR_MODE:
4726                 case MEMARB_MODE:
4727                         /* We can't enable/disable these bits of the
4728                          * 5705/5750, just say success.
4729                          */
4730                         return 0;
4731
4732                 default:
4733                         break;
4734                 };
4735         }
4736
4737         val = tr32(ofs);
4738         val &= ~enable_bit;
4739         tw32_f(ofs, val);
4740
4741         for (i = 0; i < MAX_WAIT_CNT; i++) {
4742                 udelay(100);
4743                 val = tr32(ofs);
4744                 if ((val & enable_bit) == 0)
4745                         break;
4746         }
4747
4748         if (i == MAX_WAIT_CNT && !silent) {
4749                 printk(KERN_ERR PFX "tg3_stop_block timed out, "
4750                        "ofs=%lx enable_bit=%x\n",
4751                        ofs, enable_bit);
4752                 return -ENODEV;
4753         }
4754
4755         return 0;
4756 }
4757
4758 /* tp->lock is held. */
4759 static int tg3_abort_hw(struct tg3 *tp, int silent)
4760 {
4761         int i, err;
4762
4763         tg3_disable_ints(tp);
4764
4765         tp->rx_mode &= ~RX_MODE_ENABLE;
4766         tw32_f(MAC_RX_MODE, tp->rx_mode);
4767         udelay(10);
4768
4769         err  = tg3_stop_block(tp, RCVBDI_MODE, RCVBDI_MODE_ENABLE, silent);
4770         err |= tg3_stop_block(tp, RCVLPC_MODE, RCVLPC_MODE_ENABLE, silent);
4771         err |= tg3_stop_block(tp, RCVLSC_MODE, RCVLSC_MODE_ENABLE, silent);
4772         err |= tg3_stop_block(tp, RCVDBDI_MODE, RCVDBDI_MODE_ENABLE, silent);
4773         err |= tg3_stop_block(tp, RCVDCC_MODE, RCVDCC_MODE_ENABLE, silent);
4774         err |= tg3_stop_block(tp, RCVCC_MODE, RCVCC_MODE_ENABLE, silent);
4775
4776         err |= tg3_stop_block(tp, SNDBDS_MODE, SNDBDS_MODE_ENABLE, silent);
4777         err |= tg3_stop_block(tp, SNDBDI_MODE, SNDBDI_MODE_ENABLE, silent);
4778         err |= tg3_stop_block(tp, SNDDATAI_MODE, SNDDATAI_MODE_ENABLE, silent);
4779         err |= tg3_stop_block(tp, RDMAC_MODE, RDMAC_MODE_ENABLE, silent);
4780         err |= tg3_stop_block(tp, SNDDATAC_MODE, SNDDATAC_MODE_ENABLE, silent);
4781         err |= tg3_stop_block(tp, DMAC_MODE, DMAC_MODE_ENABLE, silent);
4782         err |= tg3_stop_block(tp, SNDBDC_MODE, SNDBDC_MODE_ENABLE, silent);
4783
4784         tp->mac_mode &= ~MAC_MODE_TDE_ENABLE;
4785         tw32_f(MAC_MODE, tp->mac_mode);
4786         udelay(40);
4787
4788         tp->tx_mode &= ~TX_MODE_ENABLE;
4789         tw32_f(MAC_TX_MODE, tp->tx_mode);
4790
4791         for (i = 0; i < MAX_WAIT_CNT; i++) {
4792                 udelay(100);
4793                 if (!(tr32(MAC_TX_MODE) & TX_MODE_ENABLE))
4794                         break;
4795         }
4796         if (i >= MAX_WAIT_CNT) {
4797                 printk(KERN_ERR PFX "tg3_abort_hw timed out for %s, "
4798                        "TX_MODE_ENABLE will not clear MAC_TX_MODE=%08x\n",
4799                        tp->dev->name, tr32(MAC_TX_MODE));
4800                 err |= -ENODEV;
4801         }
4802
4803         err |= tg3_stop_block(tp, HOSTCC_MODE, HOSTCC_MODE_ENABLE, silent);
4804         err |= tg3_stop_block(tp, WDMAC_MODE, WDMAC_MODE_ENABLE, silent);
4805         err |= tg3_stop_block(tp, MBFREE_MODE, MBFREE_MODE_ENABLE, silent);
4806
4807         tw32(FTQ_RESET, 0xffffffff);
4808         tw32(FTQ_RESET, 0x00000000);
4809
4810         err |= tg3_stop_block(tp, BUFMGR_MODE, BUFMGR_MODE_ENABLE, silent);
4811         err |= tg3_stop_block(tp, MEMARB_MODE, MEMARB_MODE_ENABLE, silent);
4812
4813         if (tp->hw_status)
4814                 memset(tp->hw_status, 0, TG3_HW_STATUS_SIZE);
4815         if (tp->hw_stats)
4816                 memset(tp->hw_stats, 0, sizeof(struct tg3_hw_stats));
4817
4818         return err;
4819 }
4820
4821 /* tp->lock is held. */
4822 static int tg3_nvram_lock(struct tg3 *tp)
4823 {
4824         if (tp->tg3_flags & TG3_FLAG_NVRAM) {
4825                 int i;
4826
4827                 if (tp->nvram_lock_cnt == 0) {
4828                         tw32(NVRAM_SWARB, SWARB_REQ_SET1);
4829                         for (i = 0; i < 8000; i++) {
4830                                 if (tr32(NVRAM_SWARB) & SWARB_GNT1)
4831                                         break;
4832                                 udelay(20);
4833                         }
4834                         if (i == 8000) {
4835                                 tw32(NVRAM_SWARB, SWARB_REQ_CLR1);
4836                                 return -ENODEV;
4837                         }
4838                 }
4839                 tp->nvram_lock_cnt++;
4840         }
4841         return 0;
4842 }
4843
4844 /* tp->lock is held. */
4845 static void tg3_nvram_unlock(struct tg3 *tp)
4846 {
4847         if (tp->tg3_flags & TG3_FLAG_NVRAM) {
4848                 if (tp->nvram_lock_cnt > 0)
4849                         tp->nvram_lock_cnt--;
4850                 if (tp->nvram_lock_cnt == 0)
4851                         tw32_f(NVRAM_SWARB, SWARB_REQ_CLR1);
4852         }
4853 }
4854
4855 /* tp->lock is held. */
4856 static void tg3_enable_nvram_access(struct tg3 *tp)
4857 {
4858         if ((tp->tg3_flags2 & TG3_FLG2_5750_PLUS) &&
4859             !(tp->tg3_flags2 & TG3_FLG2_PROTECTED_NVRAM)) {
4860                 u32 nvaccess = tr32(NVRAM_ACCESS);
4861
4862                 tw32(NVRAM_ACCESS, nvaccess | ACCESS_ENABLE);
4863         }
4864 }
4865
4866 /* tp->lock is held. */
4867 static void tg3_disable_nvram_access(struct tg3 *tp)
4868 {
4869         if ((tp->tg3_flags2 & TG3_FLG2_5750_PLUS) &&
4870             !(tp->tg3_flags2 & TG3_FLG2_PROTECTED_NVRAM)) {
4871                 u32 nvaccess = tr32(NVRAM_ACCESS);
4872
4873                 tw32(NVRAM_ACCESS, nvaccess & ~ACCESS_ENABLE);
4874         }
4875 }
4876
4877 static void tg3_ape_send_event(struct tg3 *tp, u32 event)
4878 {
4879         int i;
4880         u32 apedata;
4881
4882         apedata = tg3_ape_read32(tp, TG3_APE_SEG_SIG);
4883         if (apedata != APE_SEG_SIG_MAGIC)
4884                 return;
4885
4886         apedata = tg3_ape_read32(tp, TG3_APE_FW_STATUS);
4887         if (apedata != APE_FW_STATUS_READY)
4888                 return;
4889
4890         /* Wait for up to 1 millisecond for APE to service previous event. */
4891         for (i = 0; i < 10; i++) {
4892                 if (tg3_ape_lock(tp, TG3_APE_LOCK_MEM))
4893                         return;
4894
4895                 apedata = tg3_ape_read32(tp, TG3_APE_EVENT_STATUS);
4896
4897                 if (!(apedata & APE_EVENT_STATUS_EVENT_PENDING))
4898                         tg3_ape_write32(tp, TG3_APE_EVENT_STATUS,
4899                                         event | APE_EVENT_STATUS_EVENT_PENDING);
4900
4901                 tg3_ape_unlock(tp, TG3_APE_LOCK_MEM);
4902
4903                 if (!(apedata & APE_EVENT_STATUS_EVENT_PENDING))
4904                         break;
4905
4906                 udelay(100);
4907         }
4908
4909         if (!(apedata & APE_EVENT_STATUS_EVENT_PENDING))
4910                 tg3_ape_write32(tp, TG3_APE_EVENT, APE_EVENT_1);
4911 }
4912
4913 static void tg3_ape_driver_state_change(struct tg3 *tp, int kind)
4914 {
4915         u32 event;
4916         u32 apedata;
4917
4918         if (!(tp->tg3_flags3 & TG3_FLG3_ENABLE_APE))
4919                 return;
4920
4921         switch (kind) {
4922                 case RESET_KIND_INIT:
4923                         tg3_ape_write32(tp, TG3_APE_HOST_SEG_SIG,
4924                                         APE_HOST_SEG_SIG_MAGIC);
4925                         tg3_ape_write32(tp, TG3_APE_HOST_SEG_LEN,
4926                                         APE_HOST_SEG_LEN_MAGIC);
4927                         apedata = tg3_ape_read32(tp, TG3_APE_HOST_INIT_COUNT);
4928                         tg3_ape_write32(tp, TG3_APE_HOST_INIT_COUNT, ++apedata);
4929                         tg3_ape_write32(tp, TG3_APE_HOST_DRIVER_ID,
4930                                         APE_HOST_DRIVER_ID_MAGIC);
4931                         tg3_ape_write32(tp, TG3_APE_HOST_BEHAVIOR,
4932                                         APE_HOST_BEHAV_NO_PHYLOCK);
4933
4934                         event = APE_EVENT_STATUS_STATE_START;
4935                         break;
4936                 case RESET_KIND_SHUTDOWN:
4937                         event = APE_EVENT_STATUS_STATE_UNLOAD;
4938                         break;
4939                 case RESET_KIND_SUSPEND:
4940                         event = APE_EVENT_STATUS_STATE_SUSPEND;
4941                         break;
4942                 default:
4943                         return;
4944         }
4945
4946         event |= APE_EVENT_STATUS_DRIVER_EVNT | APE_EVENT_STATUS_STATE_CHNGE;
4947
4948         tg3_ape_send_event(tp, event);
4949 }
4950
4951 /* tp->lock is held. */
4952 static void tg3_write_sig_pre_reset(struct tg3 *tp, int kind)
4953 {
4954         tg3_write_mem(tp, NIC_SRAM_FIRMWARE_MBOX,
4955                       NIC_SRAM_FIRMWARE_MBOX_MAGIC1);
4956
4957         if (tp->tg3_flags2 & TG3_FLG2_ASF_NEW_HANDSHAKE) {
4958                 switch (kind) {
4959                 case RESET_KIND_INIT:
4960                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
4961                                       DRV_STATE_START);
4962                         break;
4963
4964                 case RESET_KIND_SHUTDOWN:
4965                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
4966                                       DRV_STATE_UNLOAD);
4967                         break;
4968
4969                 case RESET_KIND_SUSPEND:
4970                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
4971                                       DRV_STATE_SUSPEND);
4972                         break;
4973
4974                 default:
4975                         break;
4976                 };
4977         }
4978
4979         if (kind == RESET_KIND_INIT ||
4980             kind == RESET_KIND_SUSPEND)
4981                 tg3_ape_driver_state_change(tp, kind);
4982 }
4983
4984 /* tp->lock is held. */
4985 static void tg3_write_sig_post_reset(struct tg3 *tp, int kind)
4986 {
4987         if (tp->tg3_flags2 & TG3_FLG2_ASF_NEW_HANDSHAKE) {
4988                 switch (kind) {
4989                 case RESET_KIND_INIT:
4990                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
4991                                       DRV_STATE_START_DONE);
4992                         break;
4993
4994                 case RESET_KIND_SHUTDOWN:
4995                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
4996                                       DRV_STATE_UNLOAD_DONE);
4997                         break;
4998
4999                 default:
5000                         break;
5001                 };
5002         }
5003
5004         if (kind == RESET_KIND_SHUTDOWN)
5005                 tg3_ape_driver_state_change(tp, kind);
5006 }
5007
5008 /* tp->lock is held. */
5009 static void tg3_write_sig_legacy(struct tg3 *tp, int kind)
5010 {
5011         if (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) {
5012                 switch (kind) {
5013                 case RESET_KIND_INIT:
5014                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
5015                                       DRV_STATE_START);
5016                         break;
5017
5018                 case RESET_KIND_SHUTDOWN:
5019                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
5020                                       DRV_STATE_UNLOAD);
5021                         break;
5022
5023                 case RESET_KIND_SUSPEND:
5024                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
5025                                       DRV_STATE_SUSPEND);
5026                         break;
5027
5028                 default:
5029                         break;
5030                 };
5031         }
5032 }
5033
5034 static int tg3_poll_fw(struct tg3 *tp)
5035 {
5036         int i;
5037         u32 val;
5038
5039         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
5040                 /* Wait up to 20ms for init done. */
5041                 for (i = 0; i < 200; i++) {
5042                         if (tr32(VCPU_STATUS) & VCPU_STATUS_INIT_DONE)
5043                                 return 0;
5044                         udelay(100);
5045                 }
5046                 return -ENODEV;
5047         }
5048
5049         /* Wait for firmware initialization to complete. */
5050         for (i = 0; i < 100000; i++) {
5051                 tg3_read_mem(tp, NIC_SRAM_FIRMWARE_MBOX, &val);
5052                 if (val == ~NIC_SRAM_FIRMWARE_MBOX_MAGIC1)
5053                         break;
5054                 udelay(10);
5055         }
5056
5057         /* Chip might not be fitted with firmware.  Some Sun onboard
5058          * parts are configured like that.  So don't signal the timeout
5059          * of the above loop as an error, but do report the lack of
5060          * running firmware once.
5061          */
5062         if (i >= 100000 &&
5063             !(tp->tg3_flags2 & TG3_FLG2_NO_FWARE_REPORTED)) {
5064                 tp->tg3_flags2 |= TG3_FLG2_NO_FWARE_REPORTED;
5065
5066                 printk(KERN_INFO PFX "%s: No firmware running.\n",
5067                        tp->dev->name);
5068         }
5069
5070         return 0;
5071 }
5072
5073 /* Save PCI command register before chip reset */
5074 static void tg3_save_pci_state(struct tg3 *tp)
5075 {
5076         pci_read_config_word(tp->pdev, PCI_COMMAND, &tp->pci_cmd);
5077 }
5078
5079 /* Restore PCI state after chip reset */
5080 static void tg3_restore_pci_state(struct tg3 *tp)
5081 {
5082         u32 val;
5083
5084         /* Re-enable indirect register accesses. */
5085         pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
5086                                tp->misc_host_ctrl);
5087
5088         /* Set MAX PCI retry to zero. */
5089         val = (PCISTATE_ROM_ENABLE | PCISTATE_ROM_RETRY_ENABLE);
5090         if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0 &&
5091             (tp->tg3_flags & TG3_FLAG_PCIX_MODE))
5092                 val |= PCISTATE_RETRY_SAME_DMA;
5093         /* Allow reads and writes to the APE register and memory space. */
5094         if (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE)
5095                 val |= PCISTATE_ALLOW_APE_CTLSPC_WR |
5096                        PCISTATE_ALLOW_APE_SHMEM_WR;
5097         pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE, val);
5098
5099         pci_write_config_word(tp->pdev, PCI_COMMAND, tp->pci_cmd);
5100
5101         if (!(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS)) {
5102                 pci_write_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE,
5103                                       tp->pci_cacheline_sz);
5104                 pci_write_config_byte(tp->pdev, PCI_LATENCY_TIMER,
5105                                       tp->pci_lat_timer);
5106         }
5107         /* Make sure PCI-X relaxed ordering bit is clear. */
5108         if (tp->pcix_cap) {
5109                 u16 pcix_cmd;
5110
5111                 pci_read_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
5112                                      &pcix_cmd);
5113                 pcix_cmd &= ~PCI_X_CMD_ERO;
5114                 pci_write_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
5115                                       pcix_cmd);
5116         }
5117
5118         if (tp->tg3_flags2 & TG3_FLG2_5780_CLASS) {
5119
5120                 /* Chip reset on 5780 will reset MSI enable bit,
5121                  * so need to restore it.
5122                  */
5123                 if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
5124                         u16 ctrl;
5125
5126                         pci_read_config_word(tp->pdev,
5127                                              tp->msi_cap + PCI_MSI_FLAGS,
5128                                              &ctrl);
5129                         pci_write_config_word(tp->pdev,
5130                                               tp->msi_cap + PCI_MSI_FLAGS,
5131                                               ctrl | PCI_MSI_FLAGS_ENABLE);
5132                         val = tr32(MSGINT_MODE);
5133                         tw32(MSGINT_MODE, val | MSGINT_MODE_ENABLE);
5134                 }
5135         }
5136 }
5137
5138 static void tg3_stop_fw(struct tg3 *);
5139
5140 /* tp->lock is held. */
5141 static int tg3_chip_reset(struct tg3 *tp)
5142 {
5143         u32 val;
5144         void (*write_op)(struct tg3 *, u32, u32);
5145         int err;
5146
5147         tg3_nvram_lock(tp);
5148
5149         /* No matching tg3_nvram_unlock() after this because
5150          * chip reset below will undo the nvram lock.
5151          */
5152         tp->nvram_lock_cnt = 0;
5153
5154         /* GRC_MISC_CFG core clock reset will clear the memory
5155          * enable bit in PCI register 4 and the MSI enable bit
5156          * on some chips, so we save relevant registers here.
5157          */
5158         tg3_save_pci_state(tp);
5159
5160         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752 ||
5161             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
5162             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
5163             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
5164             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
5165                 tw32(GRC_FASTBOOT_PC, 0);
5166
5167         /*
5168          * We must avoid the readl() that normally takes place.
5169          * It locks machines, causes machine checks, and other
5170          * fun things.  So, temporarily disable the 5701
5171          * hardware workaround, while we do the reset.
5172          */
5173         write_op = tp->write32;
5174         if (write_op == tg3_write_flush_reg32)
5175                 tp->write32 = tg3_write32;
5176
5177         /* Prevent the irq handler from reading or writing PCI registers
5178          * during chip reset when the memory enable bit in the PCI command
5179          * register may be cleared.  The chip does not generate interrupt
5180          * at this time, but the irq handler may still be called due to irq
5181          * sharing or irqpoll.
5182          */
5183         tp->tg3_flags |= TG3_FLAG_CHIP_RESETTING;
5184         if (tp->hw_status) {
5185                 tp->hw_status->status = 0;
5186                 tp->hw_status->status_tag = 0;
5187         }
5188         tp->last_tag = 0;
5189         smp_mb();
5190         synchronize_irq(tp->pdev->irq);
5191
5192         /* do the reset */
5193         val = GRC_MISC_CFG_CORECLK_RESET;
5194
5195         if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) {
5196                 if (tr32(0x7e2c) == 0x60) {
5197                         tw32(0x7e2c, 0x20);
5198                 }
5199                 if (tp->pci_chip_rev_id != CHIPREV_ID_5750_A0) {
5200                         tw32(GRC_MISC_CFG, (1 << 29));
5201                         val |= (1 << 29);
5202                 }
5203         }
5204
5205         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
5206                 tw32(VCPU_STATUS, tr32(VCPU_STATUS) | VCPU_STATUS_DRV_RESET);
5207                 tw32(GRC_VCPU_EXT_CTRL,
5208                      tr32(GRC_VCPU_EXT_CTRL) & ~GRC_VCPU_EXT_CTRL_HALT_CPU);
5209         }
5210
5211         if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS)
5212                 val |= GRC_MISC_CFG_KEEP_GPHY_POWER;
5213         tw32(GRC_MISC_CFG, val);
5214
5215         /* restore 5701 hardware bug workaround write method */
5216         tp->write32 = write_op;
5217
5218         /* Unfortunately, we have to delay before the PCI read back.
5219          * Some 575X chips even will not respond to a PCI cfg access
5220          * when the reset command is given to the chip.
5221          *
5222          * How do these hardware designers expect things to work
5223          * properly if the PCI write is posted for a long period
5224          * of time?  It is always necessary to have some method by
5225          * which a register read back can occur to push the write
5226          * out which does the reset.
5227          *
5228          * For most tg3 variants the trick below was working.
5229          * Ho hum...
5230          */
5231         udelay(120);
5232
5233         /* Flush PCI posted writes.  The normal MMIO registers
5234          * are inaccessible at this time so this is the only
5235          * way to make this reliably (actually, this is no longer
5236          * the case, see above).  I tried to use indirect
5237          * register read/write but this upset some 5701 variants.
5238          */
5239         pci_read_config_dword(tp->pdev, PCI_COMMAND, &val);
5240
5241         udelay(120);
5242
5243         if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) {
5244                 if (tp->pci_chip_rev_id == CHIPREV_ID_5750_A0) {
5245                         int i;
5246                         u32 cfg_val;
5247
5248                         /* Wait for link training to complete.  */
5249                         for (i = 0; i < 5000; i++)
5250                                 udelay(100);
5251
5252                         pci_read_config_dword(tp->pdev, 0xc4, &cfg_val);
5253                         pci_write_config_dword(tp->pdev, 0xc4,
5254                                                cfg_val | (1 << 15));
5255                 }
5256                 /* Set PCIE max payload size and clear error status.  */
5257                 pci_write_config_dword(tp->pdev, 0xd8, 0xf5000);
5258         }
5259
5260         tg3_restore_pci_state(tp);
5261
5262         tp->tg3_flags &= ~TG3_FLAG_CHIP_RESETTING;
5263
5264         val = 0;
5265         if (tp->tg3_flags2 & TG3_FLG2_5780_CLASS)
5266                 val = tr32(MEMARB_MODE);
5267         tw32(MEMARB_MODE, val | MEMARB_MODE_ENABLE);
5268
5269         if (tp->pci_chip_rev_id == CHIPREV_ID_5750_A3) {
5270                 tg3_stop_fw(tp);
5271                 tw32(0x5000, 0x400);
5272         }
5273
5274         tw32(GRC_MODE, tp->grc_mode);
5275
5276         if (tp->pci_chip_rev_id == CHIPREV_ID_5705_A0) {
5277                 val = tr32(0xc4);
5278
5279                 tw32(0xc4, val | (1 << 15));
5280         }
5281
5282         if ((tp->nic_sram_data_cfg & NIC_SRAM_DATA_CFG_MINI_PCI) != 0 &&
5283             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
5284                 tp->pci_clock_ctrl |= CLOCK_CTRL_CLKRUN_OENABLE;
5285                 if (tp->pci_chip_rev_id == CHIPREV_ID_5705_A0)
5286                         tp->pci_clock_ctrl |= CLOCK_CTRL_FORCE_CLKRUN;
5287                 tw32(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl);
5288         }
5289
5290         if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) {
5291                 tp->mac_mode = MAC_MODE_PORT_MODE_TBI;
5292                 tw32_f(MAC_MODE, tp->mac_mode);
5293         } else if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES) {
5294                 tp->mac_mode = MAC_MODE_PORT_MODE_GMII;
5295                 tw32_f(MAC_MODE, tp->mac_mode);
5296         } else
5297                 tw32_f(MAC_MODE, 0);
5298         udelay(40);
5299
5300         err = tg3_poll_fw(tp);
5301         if (err)
5302                 return err;
5303
5304         if ((tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) &&
5305             tp->pci_chip_rev_id != CHIPREV_ID_5750_A0) {
5306                 val = tr32(0x7c00);
5307
5308                 tw32(0x7c00, val | (1 << 25));
5309         }
5310
5311         /* Reprobe ASF enable state.  */
5312         tp->tg3_flags &= ~TG3_FLAG_ENABLE_ASF;
5313         tp->tg3_flags2 &= ~TG3_FLG2_ASF_NEW_HANDSHAKE;
5314         tg3_read_mem(tp, NIC_SRAM_DATA_SIG, &val);
5315         if (val == NIC_SRAM_DATA_SIG_MAGIC) {
5316                 u32 nic_cfg;
5317
5318                 tg3_read_mem(tp, NIC_SRAM_DATA_CFG, &nic_cfg);
5319                 if (nic_cfg & NIC_SRAM_DATA_CFG_ASF_ENABLE) {
5320                         tp->tg3_flags |= TG3_FLAG_ENABLE_ASF;
5321                         if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS)
5322                                 tp->tg3_flags2 |= TG3_FLG2_ASF_NEW_HANDSHAKE;
5323                 }
5324         }
5325
5326         return 0;
5327 }
5328
5329 /* tp->lock is held. */
5330 static void tg3_stop_fw(struct tg3 *tp)
5331 {
5332         if ((tp->tg3_flags & TG3_FLAG_ENABLE_ASF) &&
5333            !(tp->tg3_flags3 & TG3_FLG3_ENABLE_APE)) {
5334                 u32 val;
5335                 int i;
5336
5337                 tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX, FWCMD_NICDRV_PAUSE_FW);
5338                 val = tr32(GRC_RX_CPU_EVENT);
5339                 val |= (1 << 14);
5340                 tw32(GRC_RX_CPU_EVENT, val);
5341
5342                 /* Wait for RX cpu to ACK the event.  */
5343                 for (i = 0; i < 100; i++) {
5344                         if (!(tr32(GRC_RX_CPU_EVENT) & (1 << 14)))
5345                                 break;
5346                         udelay(1);
5347                 }
5348         }
5349 }
5350
5351 /* tp->lock is held. */
5352 static int tg3_halt(struct tg3 *tp, int kind, int silent)
5353 {
5354         int err;
5355
5356         tg3_stop_fw(tp);
5357
5358         tg3_write_sig_pre_reset(tp, kind);
5359
5360         tg3_abort_hw(tp, silent);
5361         err = tg3_chip_reset(tp);
5362
5363         tg3_write_sig_legacy(tp, kind);
5364         tg3_write_sig_post_reset(tp, kind);
5365
5366         if (err)
5367                 return err;
5368
5369         return 0;
5370 }
5371
5372 #define TG3_FW_RELEASE_MAJOR    0x0
5373 #define TG3_FW_RELASE_MINOR     0x0
5374 #define TG3_FW_RELEASE_FIX      0x0
5375 #define TG3_FW_START_ADDR       0x08000000
5376 #define TG3_FW_TEXT_ADDR        0x08000000
5377 #define TG3_FW_TEXT_LEN         0x9c0
5378 #define TG3_FW_RODATA_ADDR      0x080009c0
5379 #define TG3_FW_RODATA_LEN       0x60
5380 #define TG3_FW_DATA_ADDR        0x08000a40
5381 #define TG3_FW_DATA_LEN         0x20
5382 #define TG3_FW_SBSS_ADDR        0x08000a60
5383 #define TG3_FW_SBSS_LEN         0xc
5384 #define TG3_FW_BSS_ADDR         0x08000a70
5385 #define TG3_FW_BSS_LEN          0x10
5386
5387 static const u32 tg3FwText[(TG3_FW_TEXT_LEN / sizeof(u32)) + 1] = {
5388         0x00000000, 0x10000003, 0x00000000, 0x0000000d, 0x0000000d, 0x3c1d0800,
5389         0x37bd3ffc, 0x03a0f021, 0x3c100800, 0x26100000, 0x0e000018, 0x00000000,
5390         0x0000000d, 0x3c1d0800, 0x37bd3ffc, 0x03a0f021, 0x3c100800, 0x26100034,
5391         0x0e00021c, 0x00000000, 0x0000000d, 0x00000000, 0x00000000, 0x00000000,
5392         0x27bdffe0, 0x3c1cc000, 0xafbf0018, 0xaf80680c, 0x0e00004c, 0x241b2105,
5393         0x97850000, 0x97870002, 0x9782002c, 0x9783002e, 0x3c040800, 0x248409c0,
5394         0xafa00014, 0x00021400, 0x00621825, 0x00052c00, 0xafa30010, 0x8f860010,
5395         0x00e52825, 0x0e000060, 0x24070102, 0x3c02ac00, 0x34420100, 0x3c03ac01,
5396         0x34630100, 0xaf820490, 0x3c02ffff, 0xaf820494, 0xaf830498, 0xaf82049c,
5397         0x24020001, 0xaf825ce0, 0x0e00003f, 0xaf825d00, 0x0e000140, 0x00000000,
5398         0x8fbf0018, 0x03e00008, 0x27bd0020, 0x2402ffff, 0xaf825404, 0x8f835400,
5399         0x34630400, 0xaf835400, 0xaf825404, 0x3c020800, 0x24420034, 0xaf82541c,
5400         0x03e00008, 0xaf805400, 0x00000000, 0x00000000, 0x3c020800, 0x34423000,
5401         0x3c030800, 0x34633000, 0x3c040800, 0x348437ff, 0x3c010800, 0xac220a64,
5402         0x24020040, 0x3c010800, 0xac220a68, 0x3c010800, 0xac200a60, 0xac600000,
5403         0x24630004, 0x0083102b, 0x5040fffd, 0xac600000, 0x03e00008, 0x00000000,
5404         0x00804821, 0x8faa0010, 0x3c020800, 0x8c420a60, 0x3c040800, 0x8c840a68,
5405         0x8fab0014, 0x24430001, 0x0044102b, 0x3c010800, 0xac230a60, 0x14400003,
5406         0x00004021, 0x3c010800, 0xac200a60, 0x3c020800, 0x8c420a60, 0x3c030800,
5407         0x8c630a64, 0x91240000, 0x00021140, 0x00431021, 0x00481021, 0x25080001,
5408         0xa0440000, 0x29020008, 0x1440fff4, 0x25290001, 0x3c020800, 0x8c420a60,
5409         0x3c030800, 0x8c630a64, 0x8f84680c, 0x00021140, 0x00431021, 0xac440008,
5410         0xac45000c, 0xac460010, 0xac470014, 0xac4a0018, 0x03e00008, 0xac4b001c,
5411         0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
5412         0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
5413         0, 0, 0, 0, 0, 0,
5414         0x02000008, 0x00000000, 0x0a0001e3, 0x3c0a0001, 0x0a0001e3, 0x3c0a0002,
5415         0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000,
5416         0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000,
5417         0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000,
5418         0x0a0001e3, 0x3c0a0007, 0x0a0001e3, 0x3c0a0008, 0x0a0001e3, 0x3c0a0009,
5419         0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x3c0a000b,
5420         0x0a0001e3, 0x3c0a000c, 0x0a0001e3, 0x3c0a000d, 0x0a0001e3, 0x00000000,
5421         0x0a0001e3, 0x00000000, 0x0a0001e3, 0x3c0a000e, 0x0a0001e3, 0x00000000,
5422         0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000,
5423         0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000,
5424         0x0a0001e3, 0x00000000, 0x0a0001e3, 0x3c0a0013, 0x0a0001e3, 0x3c0a0014,
5425         0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
5426         0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
5427         0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
5428         0x27bdffe0, 0x00001821, 0x00001021, 0xafbf0018, 0xafb10014, 0xafb00010,
5429         0x3c010800, 0x00220821, 0xac200a70, 0x3c010800, 0x00220821, 0xac200a74,
5430         0x3c010800, 0x00220821, 0xac200a78, 0x24630001, 0x1860fff5, 0x2442000c,
5431         0x24110001, 0x8f906810, 0x32020004, 0x14400005, 0x24040001, 0x3c020800,
5432         0x8c420a78, 0x18400003, 0x00002021, 0x0e000182, 0x00000000, 0x32020001,
5433         0x10400003, 0x00000000, 0x0e000169, 0x00000000, 0x0a000153, 0xaf915028,
5434         0x8fbf0018, 0x8fb10014, 0x8fb00010, 0x03e00008, 0x27bd0020, 0x3c050800,
5435         0x8ca50a70, 0x3c060800, 0x8cc60a80, 0x3c070800, 0x8ce70a78, 0x27bdffe0,
5436         0x3c040800, 0x248409d0, 0xafbf0018, 0xafa00010, 0x0e000060, 0xafa00014,
5437         0x0e00017b, 0x00002021, 0x8fbf0018, 0x03e00008, 0x27bd0020, 0x24020001,
5438         0x8f836810, 0x00821004, 0x00021027, 0x00621824, 0x03e00008, 0xaf836810,
5439         0x27bdffd8, 0xafbf0024, 0x1080002e, 0xafb00020, 0x8f825cec, 0xafa20018,
5440         0x8f825cec, 0x3c100800, 0x26100a78, 0xafa2001c, 0x34028000, 0xaf825cec,
5441         0x8e020000, 0x18400016, 0x00000000, 0x3c020800, 0x94420a74, 0x8fa3001c,
5442         0x000221c0, 0xac830004, 0x8fa2001c, 0x3c010800, 0x0e000201, 0xac220a74,
5443         0x10400005, 0x00000000, 0x8e020000, 0x24420001, 0x0a0001df, 0xae020000,
5444         0x3c020800, 0x8c420a70, 0x00021c02, 0x000321c0, 0x0a0001c5, 0xafa2001c,
5445         0x0e000201, 0x00000000, 0x1040001f, 0x00000000, 0x8e020000, 0x8fa3001c,
5446         0x24420001, 0x3c010800, 0xac230a70, 0x3c010800, 0xac230a74, 0x0a0001df,
5447         0xae020000, 0x3c100800, 0x26100a78, 0x8e020000, 0x18400028, 0x00000000,
5448         0x0e000201, 0x00000000, 0x14400024, 0x00000000, 0x8e020000, 0x3c030800,
5449         0x8c630a70, 0x2442ffff, 0xafa3001c, 0x18400006, 0xae020000, 0x00031402,
5450         0x000221c0, 0x8c820004, 0x3c010800, 0xac220a70, 0x97a2001e, 0x2442ff00,
5451         0x2c420300, 0x1440000b, 0x24024000, 0x3c040800, 0x248409dc, 0xafa00010,
5452         0xafa00014, 0x8fa6001c, 0x24050008, 0x0e000060, 0x00003821, 0x0a0001df,
5453         0x00000000, 0xaf825cf8, 0x3c020800, 0x8c420a40, 0x8fa3001c, 0x24420001,
5454         0xaf835cf8, 0x3c010800, 0xac220a40, 0x8fbf0024, 0x8fb00020, 0x03e00008,
5455         0x27bd0028, 0x27bdffe0, 0x3c040800, 0x248409e8, 0x00002821, 0x00003021,
5456         0x00003821, 0xafbf0018, 0xafa00010, 0x0e000060, 0xafa00014, 0x8fbf0018,
5457         0x03e00008, 0x27bd0020, 0x8f82680c, 0x8f85680c, 0x00021827, 0x0003182b,
5458         0x00031823, 0x00431024, 0x00441021, 0x00a2282b, 0x10a00006, 0x00000000,
5459         0x00401821, 0x8f82680c, 0x0043102b, 0x1440fffd, 0x00000000, 0x03e00008,
5460         0x00000000, 0x3c040800, 0x8c840000, 0x3c030800, 0x8c630a40, 0x0064102b,
5461         0x54400002, 0x00831023, 0x00641023, 0x2c420008, 0x03e00008, 0x38420001,
5462         0x27bdffe0, 0x00802821, 0x3c040800, 0x24840a00, 0x00003021, 0x00003821,
5463         0xafbf0018, 0xafa00010, 0x0e000060, 0xafa00014, 0x0a000216, 0x00000000,
5464         0x8fbf0018, 0x03e00008, 0x27bd0020, 0x00000000, 0x27bdffe0, 0x3c1cc000,
5465         0xafbf0018, 0x0e00004c, 0xaf80680c, 0x3c040800, 0x24840a10, 0x03802821,
5466         0x00003021, 0x00003821, 0xafa00010, 0x0e000060, 0xafa00014, 0x2402ffff,
5467         0xaf825404, 0x3c0200aa, 0x0e000234, 0xaf825434, 0x8fbf0018, 0x03e00008,
5468         0x27bd0020, 0x00000000, 0x00000000, 0x00000000, 0x27bdffe8, 0xafb00010,
5469         0x24100001, 0xafbf0014, 0x3c01c003, 0xac200000, 0x8f826810, 0x30422000,
5470         0x10400003, 0x00000000, 0x0e000246, 0x00000000, 0x0a00023a, 0xaf905428,
5471         0x8fbf0014, 0x8fb00010, 0x03e00008, 0x27bd0018, 0x27bdfff8, 0x8f845d0c,
5472         0x3c0200ff, 0x3c030800, 0x8c630a50, 0x3442fff8, 0x00821024, 0x1043001e,
5473         0x3c0500ff, 0x34a5fff8, 0x3c06c003, 0x3c074000, 0x00851824, 0x8c620010,
5474         0x3c010800, 0xac230a50, 0x30420008, 0x10400005, 0x00871025, 0x8cc20000,
5475         0x24420001, 0xacc20000, 0x00871025, 0xaf825d0c, 0x8fa20000, 0x24420001,
5476         0xafa20000, 0x8fa20000, 0x8fa20000, 0x24420001, 0xafa20000, 0x8fa20000,
5477         0x8f845d0c, 0x3c030800, 0x8c630a50, 0x00851024, 0x1443ffe8, 0x00851824,
5478         0x27bd0008, 0x03e00008, 0x00000000, 0x00000000, 0x00000000
5479 };
5480
5481 static const u32 tg3FwRodata[(TG3_FW_RODATA_LEN / sizeof(u32)) + 1] = {
5482         0x35373031, 0x726c7341, 0x00000000, 0x00000000, 0x53774576, 0x656e7430,
5483         0x00000000, 0x726c7045, 0x76656e74, 0x31000000, 0x556e6b6e, 0x45766e74,
5484         0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x66617461, 0x6c457272,
5485         0x00000000, 0x00000000, 0x4d61696e, 0x43707542, 0x00000000, 0x00000000,
5486         0x00000000
5487 };
5488
5489 #if 0 /* All zeros, don't eat up space with it. */
5490 u32 tg3FwData[(TG3_FW_DATA_LEN / sizeof(u32)) + 1] = {
5491         0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
5492         0x00000000, 0x00000000, 0x00000000, 0x00000000
5493 };
5494 #endif
5495
5496 #define RX_CPU_SCRATCH_BASE     0x30000
5497 #define RX_CPU_SCRATCH_SIZE     0x04000
5498 #define TX_CPU_SCRATCH_BASE     0x34000
5499 #define TX_CPU_SCRATCH_SIZE     0x04000
5500
5501 /* tp->lock is held. */
5502 static int tg3_halt_cpu(struct tg3 *tp, u32 offset)
5503 {
5504         int i;
5505
5506         BUG_ON(offset == TX_CPU_BASE &&
5507             (tp->tg3_flags2 & TG3_FLG2_5705_PLUS));
5508
5509         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
5510                 u32 val = tr32(GRC_VCPU_EXT_CTRL);
5511
5512                 tw32(GRC_VCPU_EXT_CTRL, val | GRC_VCPU_EXT_CTRL_HALT_CPU);
5513                 return 0;
5514         }
5515         if (offset == RX_CPU_BASE) {
5516                 for (i = 0; i < 10000; i++) {
5517                         tw32(offset + CPU_STATE, 0xffffffff);
5518                         tw32(offset + CPU_MODE,  CPU_MODE_HALT);
5519                         if (tr32(offset + CPU_MODE) & CPU_MODE_HALT)
5520                                 break;
5521                 }
5522
5523                 tw32(offset + CPU_STATE, 0xffffffff);
5524                 tw32_f(offset + CPU_MODE,  CPU_MODE_HALT);
5525                 udelay(10);
5526         } else {
5527                 for (i = 0; i < 10000; i++) {
5528                         tw32(offset + CPU_STATE, 0xffffffff);
5529                         tw32(offset + CPU_MODE,  CPU_MODE_HALT);
5530                         if (tr32(offset + CPU_MODE) & CPU_MODE_HALT)
5531                                 break;
5532                 }
5533         }
5534
5535         if (i >= 10000) {
5536                 printk(KERN_ERR PFX "tg3_reset_cpu timed out for %s, "
5537                        "and %s CPU\n",
5538                        tp->dev->name,
5539                        (offset == RX_CPU_BASE ? "RX" : "TX"));
5540                 return -ENODEV;
5541         }
5542
5543         /* Clear firmware's nvram arbitration. */
5544         if (tp->tg3_flags & TG3_FLAG_NVRAM)
5545                 tw32(NVRAM_SWARB, SWARB_REQ_CLR0);
5546         return 0;
5547 }
5548
5549 struct fw_info {
5550         unsigned int text_base;
5551         unsigned int text_len;
5552         const u32 *text_data;
5553         unsigned int rodata_base;
5554         unsigned int rodata_len;
5555         const u32 *rodata_data;
5556         unsigned int data_base;
5557         unsigned int data_len;
5558         const u32 *data_data;
5559 };
5560
5561 /* tp->lock is held. */
5562 static int tg3_load_firmware_cpu(struct tg3 *tp, u32 cpu_base, u32 cpu_scratch_base,
5563                                  int cpu_scratch_size, struct fw_info *info)
5564 {
5565         int err, lock_err, i;
5566         void (*write_op)(struct tg3 *, u32, u32);
5567
5568         if (cpu_base == TX_CPU_BASE &&
5569             (tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
5570                 printk(KERN_ERR PFX "tg3_load_firmware_cpu: Trying to load "
5571                        "TX cpu firmware on %s which is 5705.\n",
5572                        tp->dev->name);
5573                 return -EINVAL;
5574         }
5575
5576         if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS)
5577                 write_op = tg3_write_mem;
5578         else
5579                 write_op = tg3_write_indirect_reg32;
5580
5581         /* It is possible that bootcode is still loading at this point.
5582          * Get the nvram lock first before halting the cpu.
5583          */
5584         lock_err = tg3_nvram_lock(tp);
5585         err = tg3_halt_cpu(tp, cpu_base);
5586         if (!lock_err)
5587                 tg3_nvram_unlock(tp);
5588         if (err)
5589                 goto out;
5590
5591         for (i = 0; i < cpu_scratch_size; i += sizeof(u32))
5592                 write_op(tp, cpu_scratch_base + i, 0);
5593         tw32(cpu_base + CPU_STATE, 0xffffffff);
5594         tw32(cpu_base + CPU_MODE, tr32(cpu_base+CPU_MODE)|CPU_MODE_HALT);
5595         for (i = 0; i < (info->text_len / sizeof(u32)); i++)
5596                 write_op(tp, (cpu_scratch_base +
5597                               (info->text_base & 0xffff) +
5598                               (i * sizeof(u32))),
5599                          (info->text_data ?
5600                           info->text_data[i] : 0));
5601         for (i = 0; i < (info->rodata_len / sizeof(u32)); i++)
5602                 write_op(tp, (cpu_scratch_base +
5603                               (info->rodata_base & 0xffff) +
5604                               (i * sizeof(u32))),
5605                          (info->rodata_data ?
5606                           info->rodata_data[i] : 0));
5607         for (i = 0; i < (info->data_len / sizeof(u32)); i++)
5608                 write_op(tp, (cpu_scratch_base +
5609                               (info->data_base & 0xffff) +
5610                               (i * sizeof(u32))),
5611                          (info->data_data ?
5612                           info->data_data[i] : 0));
5613
5614         err = 0;
5615
5616 out:
5617         return err;
5618 }
5619
5620 /* tp->lock is held. */
5621 static int tg3_load_5701_a0_firmware_fix(struct tg3 *tp)
5622 {
5623         struct fw_info info;
5624         int err, i;
5625
5626         info.text_base = TG3_FW_TEXT_ADDR;
5627         info.text_len = TG3_FW_TEXT_LEN;
5628         info.text_data = &tg3FwText[0];
5629         info.rodata_base = TG3_FW_RODATA_ADDR;
5630         info.rodata_len = TG3_FW_RODATA_LEN;
5631         info.rodata_data = &tg3FwRodata[0];
5632         info.data_base = TG3_FW_DATA_ADDR;
5633         info.data_len = TG3_FW_DATA_LEN;
5634         info.data_data = NULL;
5635
5636         err = tg3_load_firmware_cpu(tp, RX_CPU_BASE,
5637                                     RX_CPU_SCRATCH_BASE, RX_CPU_SCRATCH_SIZE,
5638                                     &info);
5639         if (err)
5640                 return err;
5641
5642         err = tg3_load_firmware_cpu(tp, TX_CPU_BASE,
5643                                     TX_CPU_SCRATCH_BASE, TX_CPU_SCRATCH_SIZE,
5644                                     &info);
5645         if (err)
5646                 return err;
5647
5648         /* Now startup only the RX cpu. */
5649         tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
5650         tw32_f(RX_CPU_BASE + CPU_PC,    TG3_FW_TEXT_ADDR);
5651
5652         for (i = 0; i < 5; i++) {
5653                 if (tr32(RX_CPU_BASE + CPU_PC) == TG3_FW_TEXT_ADDR)
5654                         break;
5655                 tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
5656                 tw32(RX_CPU_BASE + CPU_MODE,  CPU_MODE_HALT);
5657                 tw32_f(RX_CPU_BASE + CPU_PC,    TG3_FW_TEXT_ADDR);
5658                 udelay(1000);
5659         }
5660         if (i >= 5) {
5661                 printk(KERN_ERR PFX "tg3_load_firmware fails for %s "
5662                        "to set RX CPU PC, is %08x should be %08x\n",
5663                        tp->dev->name, tr32(RX_CPU_BASE + CPU_PC),
5664                        TG3_FW_TEXT_ADDR);
5665                 return -ENODEV;
5666         }
5667         tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
5668         tw32_f(RX_CPU_BASE + CPU_MODE,  0x00000000);
5669
5670         return 0;
5671 }
5672
5673
5674 #define TG3_TSO_FW_RELEASE_MAJOR        0x1
5675 #define TG3_TSO_FW_RELASE_MINOR         0x6
5676 #define TG3_TSO_FW_RELEASE_FIX          0x0
5677 #define TG3_TSO_FW_START_ADDR           0x08000000
5678 #define TG3_TSO_FW_TEXT_ADDR            0x08000000
5679 #define TG3_TSO_FW_TEXT_LEN             0x1aa0
5680 #define TG3_TSO_FW_RODATA_ADDR          0x08001aa0
5681 #define TG3_TSO_FW_RODATA_LEN           0x60
5682 #define TG3_TSO_FW_DATA_ADDR            0x08001b20
5683 #define TG3_TSO_FW_DATA_LEN             0x30
5684 #define TG3_TSO_FW_SBSS_ADDR            0x08001b50
5685 #define TG3_TSO_FW_SBSS_LEN             0x2c
5686 #define TG3_TSO_FW_BSS_ADDR             0x08001b80
5687 #define TG3_TSO_FW_BSS_LEN              0x894
5688
5689 static const u32 tg3TsoFwText[(TG3_TSO_FW_TEXT_LEN / 4) + 1] = {
5690         0x0e000003, 0x00000000, 0x08001b24, 0x00000000, 0x10000003, 0x00000000,
5691         0x0000000d, 0x0000000d, 0x3c1d0800, 0x37bd4000, 0x03a0f021, 0x3c100800,
5692         0x26100000, 0x0e000010, 0x00000000, 0x0000000d, 0x27bdffe0, 0x3c04fefe,
5693         0xafbf0018, 0x0e0005d8, 0x34840002, 0x0e000668, 0x00000000, 0x3c030800,
5694         0x90631b68, 0x24020002, 0x3c040800, 0x24841aac, 0x14620003, 0x24050001,
5695         0x3c040800, 0x24841aa0, 0x24060006, 0x00003821, 0xafa00010, 0x0e00067c,
5696         0xafa00014, 0x8f625c50, 0x34420001, 0xaf625c50, 0x8f625c90, 0x34420001,
5697         0xaf625c90, 0x2402ffff, 0x0e000034, 0xaf625404, 0x8fbf0018, 0x03e00008,
5698         0x27bd0020, 0x00000000, 0x00000000, 0x00000000, 0x27bdffe0, 0xafbf001c,
5699         0xafb20018, 0xafb10014, 0x0e00005b, 0xafb00010, 0x24120002, 0x24110001,
5700         0x8f706820, 0x32020100, 0x10400003, 0x00000000, 0x0e0000bb, 0x00000000,