2 * tg3.c: Broadcom Tigon3 ethernet driver.
4 * Copyright (C) 2001, 2002, 2003, 2004 David S. Miller (davem@redhat.com)
5 * Copyright (C) 2001, 2002, 2003 Jeff Garzik (jgarzik@pobox.com)
6 * Copyright (C) 2004 Sun Microsystems Inc.
7 * Copyright (C) 2005-2007 Broadcom Corporation.
10 * Derived from proprietary unpublished source code,
11 * Copyright (C) 2000-2003 Broadcom Corporation.
13 * Permission is hereby granted for the distribution of this firmware
14 * data in hexadecimal or equivalent format, provided this copyright
15 * notice is accompanying it.
19 #include <linux/module.h>
20 #include <linux/moduleparam.h>
21 #include <linux/kernel.h>
22 #include <linux/types.h>
23 #include <linux/compiler.h>
24 #include <linux/slab.h>
25 #include <linux/delay.h>
27 #include <linux/init.h>
28 #include <linux/ioport.h>
29 #include <linux/pci.h>
30 #include <linux/netdevice.h>
31 #include <linux/etherdevice.h>
32 #include <linux/skbuff.h>
33 #include <linux/ethtool.h>
34 #include <linux/mii.h>
35 #include <linux/if_vlan.h>
37 #include <linux/tcp.h>
38 #include <linux/workqueue.h>
39 #include <linux/prefetch.h>
40 #include <linux/dma-mapping.h>
42 #include <net/checksum.h>
45 #include <asm/system.h>
47 #include <asm/byteorder.h>
48 #include <asm/uaccess.h>
51 #include <asm/idprom.h>
55 #if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE)
56 #define TG3_VLAN_TAG_USED 1
58 #define TG3_VLAN_TAG_USED 0
61 #define TG3_TSO_SUPPORT 1
65 #define DRV_MODULE_NAME "tg3"
66 #define PFX DRV_MODULE_NAME ": "
67 #define DRV_MODULE_VERSION "3.83"
68 #define DRV_MODULE_RELDATE "October 10, 2007"
70 #define TG3_DEF_MAC_MODE 0
71 #define TG3_DEF_RX_MODE 0
72 #define TG3_DEF_TX_MODE 0
73 #define TG3_DEF_MSG_ENABLE \
83 /* length of time before we decide the hardware is borked,
84 * and dev->tx_timeout() should be called to fix the problem
86 #define TG3_TX_TIMEOUT (5 * HZ)
88 /* hardware minimum and maximum for a single frame's data payload */
89 #define TG3_MIN_MTU 60
90 #define TG3_MAX_MTU(tp) \
91 ((tp->tg3_flags2 & TG3_FLG2_JUMBO_CAPABLE) ? 9000 : 1500)
93 /* These numbers seem to be hard coded in the NIC firmware somehow.
94 * You can't change the ring sizes, but you can change where you place
95 * them in the NIC onboard memory.
97 #define TG3_RX_RING_SIZE 512
98 #define TG3_DEF_RX_RING_PENDING 200
99 #define TG3_RX_JUMBO_RING_SIZE 256
100 #define TG3_DEF_RX_JUMBO_RING_PENDING 100
102 /* Do not place this n-ring entries value into the tp struct itself,
103 * we really want to expose these constants to GCC so that modulo et
104 * al. operations are done with shifts and masks instead of with
105 * hw multiply/modulo instructions. Another solution would be to
106 * replace things like '% foo' with '& (foo - 1)'.
108 #define TG3_RX_RCB_RING_SIZE(tp) \
109 ((tp->tg3_flags2 & TG3_FLG2_5705_PLUS) ? 512 : 1024)
111 #define TG3_TX_RING_SIZE 512
112 #define TG3_DEF_TX_RING_PENDING (TG3_TX_RING_SIZE - 1)
114 #define TG3_RX_RING_BYTES (sizeof(struct tg3_rx_buffer_desc) * \
116 #define TG3_RX_JUMBO_RING_BYTES (sizeof(struct tg3_rx_buffer_desc) * \
117 TG3_RX_JUMBO_RING_SIZE)
118 #define TG3_RX_RCB_RING_BYTES(tp) (sizeof(struct tg3_rx_buffer_desc) * \
119 TG3_RX_RCB_RING_SIZE(tp))
120 #define TG3_TX_RING_BYTES (sizeof(struct tg3_tx_buffer_desc) * \
122 #define NEXT_TX(N) (((N) + 1) & (TG3_TX_RING_SIZE - 1))
124 #define RX_PKT_BUF_SZ (1536 + tp->rx_offset + 64)
125 #define RX_JUMBO_PKT_BUF_SZ (9046 + tp->rx_offset + 64)
127 /* minimum number of free TX descriptors required to wake up TX process */
128 #define TG3_TX_WAKEUP_THRESH(tp) ((tp)->tx_pending / 4)
130 /* number of ETHTOOL_GSTATS u64's */
131 #define TG3_NUM_STATS (sizeof(struct tg3_ethtool_stats)/sizeof(u64))
133 #define TG3_NUM_TEST 6
135 static char version[] __devinitdata =
136 DRV_MODULE_NAME ".c:v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
138 MODULE_AUTHOR("David S. Miller (davem@redhat.com) and Jeff Garzik (jgarzik@pobox.com)");
139 MODULE_DESCRIPTION("Broadcom Tigon3 ethernet driver");
140 MODULE_LICENSE("GPL");
141 MODULE_VERSION(DRV_MODULE_VERSION);
143 static int tg3_debug = -1; /* -1 == use TG3_DEF_MSG_ENABLE as value */
144 module_param(tg3_debug, int, 0);
145 MODULE_PARM_DESC(tg3_debug, "Tigon3 bitmapped debugging message enable value");
147 static struct pci_device_id tg3_pci_tbl[] = {
148 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5700)},
149 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5701)},
150 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702)},
151 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703)},
152 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704)},
153 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702FE)},
154 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705)},
155 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705_2)},
156 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705M)},
157 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705M_2)},
158 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702X)},
159 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703X)},
160 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704S)},
161 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702A3)},
162 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703A3)},
163 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5782)},
164 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5788)},
165 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5789)},
166 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5901)},
167 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5901_2)},
168 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704S_2)},
169 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705F)},
170 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5720)},
171 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5721)},
172 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5722)},
173 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5750)},
174 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751)},
175 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5750M)},
176 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751M)},
177 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751F)},
178 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5752)},
179 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5752M)},
180 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753)},
181 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753M)},
182 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753F)},
183 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5754)},
184 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5754M)},
185 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5755)},
186 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5755M)},
187 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5756)},
188 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5786)},
189 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787)},
190 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787M)},
191 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787F)},
192 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5714)},
193 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5714S)},
194 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5715)},
195 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5715S)},
196 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5780)},
197 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5780S)},
198 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5781)},
199 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5906)},
200 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5906M)},
201 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5784)},
202 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5764)},
203 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5761)},
204 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5761E)},
205 {PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9DXX)},
206 {PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9MXX)},
207 {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1000)},
208 {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1001)},
209 {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1003)},
210 {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC9100)},
211 {PCI_DEVICE(PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_TIGON3)},
215 MODULE_DEVICE_TABLE(pci, tg3_pci_tbl);
217 static const struct {
218 const char string[ETH_GSTRING_LEN];
219 } ethtool_stats_keys[TG3_NUM_STATS] = {
222 { "rx_ucast_packets" },
223 { "rx_mcast_packets" },
224 { "rx_bcast_packets" },
226 { "rx_align_errors" },
227 { "rx_xon_pause_rcvd" },
228 { "rx_xoff_pause_rcvd" },
229 { "rx_mac_ctrl_rcvd" },
230 { "rx_xoff_entered" },
231 { "rx_frame_too_long_errors" },
233 { "rx_undersize_packets" },
234 { "rx_in_length_errors" },
235 { "rx_out_length_errors" },
236 { "rx_64_or_less_octet_packets" },
237 { "rx_65_to_127_octet_packets" },
238 { "rx_128_to_255_octet_packets" },
239 { "rx_256_to_511_octet_packets" },
240 { "rx_512_to_1023_octet_packets" },
241 { "rx_1024_to_1522_octet_packets" },
242 { "rx_1523_to_2047_octet_packets" },
243 { "rx_2048_to_4095_octet_packets" },
244 { "rx_4096_to_8191_octet_packets" },
245 { "rx_8192_to_9022_octet_packets" },
252 { "tx_flow_control" },
254 { "tx_single_collisions" },
255 { "tx_mult_collisions" },
257 { "tx_excessive_collisions" },
258 { "tx_late_collisions" },
259 { "tx_collide_2times" },
260 { "tx_collide_3times" },
261 { "tx_collide_4times" },
262 { "tx_collide_5times" },
263 { "tx_collide_6times" },
264 { "tx_collide_7times" },
265 { "tx_collide_8times" },
266 { "tx_collide_9times" },
267 { "tx_collide_10times" },
268 { "tx_collide_11times" },
269 { "tx_collide_12times" },
270 { "tx_collide_13times" },
271 { "tx_collide_14times" },
272 { "tx_collide_15times" },
273 { "tx_ucast_packets" },
274 { "tx_mcast_packets" },
275 { "tx_bcast_packets" },
276 { "tx_carrier_sense_errors" },
280 { "dma_writeq_full" },
281 { "dma_write_prioq_full" },
285 { "rx_threshold_hit" },
287 { "dma_readq_full" },
288 { "dma_read_prioq_full" },
289 { "tx_comp_queue_full" },
291 { "ring_set_send_prod_index" },
292 { "ring_status_update" },
294 { "nic_avoided_irqs" },
295 { "nic_tx_threshold_hit" }
298 static const struct {
299 const char string[ETH_GSTRING_LEN];
300 } ethtool_test_keys[TG3_NUM_TEST] = {
301 { "nvram test (online) " },
302 { "link test (online) " },
303 { "register test (offline)" },
304 { "memory test (offline)" },
305 { "loopback test (offline)" },
306 { "interrupt test (offline)" },
309 static void tg3_write32(struct tg3 *tp, u32 off, u32 val)
311 writel(val, tp->regs + off);
314 static u32 tg3_read32(struct tg3 *tp, u32 off)
316 return (readl(tp->regs + off));
319 static void tg3_ape_write32(struct tg3 *tp, u32 off, u32 val)
321 writel(val, tp->aperegs + off);
324 static u32 tg3_ape_read32(struct tg3 *tp, u32 off)
326 return (readl(tp->aperegs + off));
329 static void tg3_write_indirect_reg32(struct tg3 *tp, u32 off, u32 val)
333 spin_lock_irqsave(&tp->indirect_lock, flags);
334 pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off);
335 pci_write_config_dword(tp->pdev, TG3PCI_REG_DATA, val);
336 spin_unlock_irqrestore(&tp->indirect_lock, flags);
339 static void tg3_write_flush_reg32(struct tg3 *tp, u32 off, u32 val)
341 writel(val, tp->regs + off);
342 readl(tp->regs + off);
345 static u32 tg3_read_indirect_reg32(struct tg3 *tp, u32 off)
350 spin_lock_irqsave(&tp->indirect_lock, flags);
351 pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off);
352 pci_read_config_dword(tp->pdev, TG3PCI_REG_DATA, &val);
353 spin_unlock_irqrestore(&tp->indirect_lock, flags);
357 static void tg3_write_indirect_mbox(struct tg3 *tp, u32 off, u32 val)
361 if (off == (MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW)) {
362 pci_write_config_dword(tp->pdev, TG3PCI_RCV_RET_RING_CON_IDX +
363 TG3_64BIT_REG_LOW, val);
366 if (off == (MAILBOX_RCV_STD_PROD_IDX + TG3_64BIT_REG_LOW)) {
367 pci_write_config_dword(tp->pdev, TG3PCI_STD_RING_PROD_IDX +
368 TG3_64BIT_REG_LOW, val);
372 spin_lock_irqsave(&tp->indirect_lock, flags);
373 pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off + 0x5600);
374 pci_write_config_dword(tp->pdev, TG3PCI_REG_DATA, val);
375 spin_unlock_irqrestore(&tp->indirect_lock, flags);
377 /* In indirect mode when disabling interrupts, we also need
378 * to clear the interrupt bit in the GRC local ctrl register.
380 if ((off == (MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW)) &&
382 pci_write_config_dword(tp->pdev, TG3PCI_MISC_LOCAL_CTRL,
383 tp->grc_local_ctrl|GRC_LCLCTRL_CLEARINT);
387 static u32 tg3_read_indirect_mbox(struct tg3 *tp, u32 off)
392 spin_lock_irqsave(&tp->indirect_lock, flags);
393 pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off + 0x5600);
394 pci_read_config_dword(tp->pdev, TG3PCI_REG_DATA, &val);
395 spin_unlock_irqrestore(&tp->indirect_lock, flags);
399 /* usec_wait specifies the wait time in usec when writing to certain registers
400 * where it is unsafe to read back the register without some delay.
401 * GRC_LOCAL_CTRL is one example if the GPIOs are toggled to switch power.
402 * TG3PCI_CLOCK_CTRL is another example if the clock frequencies are changed.
404 static void _tw32_flush(struct tg3 *tp, u32 off, u32 val, u32 usec_wait)
406 if ((tp->tg3_flags & TG3_FLAG_PCIX_TARGET_HWBUG) ||
407 (tp->tg3_flags2 & TG3_FLG2_ICH_WORKAROUND))
408 /* Non-posted methods */
409 tp->write32(tp, off, val);
412 tg3_write32(tp, off, val);
417 /* Wait again after the read for the posted method to guarantee that
418 * the wait time is met.
424 static inline void tw32_mailbox_flush(struct tg3 *tp, u32 off, u32 val)
426 tp->write32_mbox(tp, off, val);
427 if (!(tp->tg3_flags & TG3_FLAG_MBOX_WRITE_REORDER) &&
428 !(tp->tg3_flags2 & TG3_FLG2_ICH_WORKAROUND))
429 tp->read32_mbox(tp, off);
432 static void tg3_write32_tx_mbox(struct tg3 *tp, u32 off, u32 val)
434 void __iomem *mbox = tp->regs + off;
436 if (tp->tg3_flags & TG3_FLAG_TXD_MBOX_HWBUG)
438 if (tp->tg3_flags & TG3_FLAG_MBOX_WRITE_REORDER)
442 static u32 tg3_read32_mbox_5906(struct tg3 *tp, u32 off)
444 return (readl(tp->regs + off + GRCMBOX_BASE));
447 static void tg3_write32_mbox_5906(struct tg3 *tp, u32 off, u32 val)
449 writel(val, tp->regs + off + GRCMBOX_BASE);
452 #define tw32_mailbox(reg, val) tp->write32_mbox(tp, reg, val)
453 #define tw32_mailbox_f(reg, val) tw32_mailbox_flush(tp, (reg), (val))
454 #define tw32_rx_mbox(reg, val) tp->write32_rx_mbox(tp, reg, val)
455 #define tw32_tx_mbox(reg, val) tp->write32_tx_mbox(tp, reg, val)
456 #define tr32_mailbox(reg) tp->read32_mbox(tp, reg)
458 #define tw32(reg,val) tp->write32(tp, reg, val)
459 #define tw32_f(reg,val) _tw32_flush(tp,(reg),(val), 0)
460 #define tw32_wait_f(reg,val,us) _tw32_flush(tp,(reg),(val), (us))
461 #define tr32(reg) tp->read32(tp, reg)
463 static void tg3_write_mem(struct tg3 *tp, u32 off, u32 val)
467 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) &&
468 (off >= NIC_SRAM_STATS_BLK) && (off < NIC_SRAM_TX_BUFFER_DESC))
471 spin_lock_irqsave(&tp->indirect_lock, flags);
472 if (tp->tg3_flags & TG3_FLAG_SRAM_USE_CONFIG) {
473 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off);
474 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
476 /* Always leave this as zero. */
477 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
479 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, off);
480 tw32_f(TG3PCI_MEM_WIN_DATA, val);
482 /* Always leave this as zero. */
483 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, 0);
485 spin_unlock_irqrestore(&tp->indirect_lock, flags);
488 static void tg3_read_mem(struct tg3 *tp, u32 off, u32 *val)
492 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) &&
493 (off >= NIC_SRAM_STATS_BLK) && (off < NIC_SRAM_TX_BUFFER_DESC)) {
498 spin_lock_irqsave(&tp->indirect_lock, flags);
499 if (tp->tg3_flags & TG3_FLAG_SRAM_USE_CONFIG) {
500 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off);
501 pci_read_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
503 /* Always leave this as zero. */
504 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
506 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, off);
507 *val = tr32(TG3PCI_MEM_WIN_DATA);
509 /* Always leave this as zero. */
510 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, 0);
512 spin_unlock_irqrestore(&tp->indirect_lock, flags);
515 static void tg3_ape_lock_init(struct tg3 *tp)
519 /* Make sure the driver hasn't any stale locks. */
520 for (i = 0; i < 8; i++)
521 tg3_ape_write32(tp, TG3_APE_LOCK_GRANT + 4 * i,
522 APE_LOCK_GRANT_DRIVER);
525 static int tg3_ape_lock(struct tg3 *tp, int locknum)
531 if (!(tp->tg3_flags3 & TG3_FLG3_ENABLE_APE))
535 case TG3_APE_LOCK_MEM:
543 tg3_ape_write32(tp, TG3_APE_LOCK_REQ + off, APE_LOCK_REQ_DRIVER);
545 /* Wait for up to 1 millisecond to acquire lock. */
546 for (i = 0; i < 100; i++) {
547 status = tg3_ape_read32(tp, TG3_APE_LOCK_GRANT + off);
548 if (status == APE_LOCK_GRANT_DRIVER)
553 if (status != APE_LOCK_GRANT_DRIVER) {
554 /* Revoke the lock request. */
555 tg3_ape_write32(tp, TG3_APE_LOCK_GRANT + off,
556 APE_LOCK_GRANT_DRIVER);
564 static void tg3_ape_unlock(struct tg3 *tp, int locknum)
568 if (!(tp->tg3_flags3 & TG3_FLG3_ENABLE_APE))
572 case TG3_APE_LOCK_MEM:
579 tg3_ape_write32(tp, TG3_APE_LOCK_GRANT + off, APE_LOCK_GRANT_DRIVER);
582 static void tg3_disable_ints(struct tg3 *tp)
584 tw32(TG3PCI_MISC_HOST_CTRL,
585 (tp->misc_host_ctrl | MISC_HOST_CTRL_MASK_PCI_INT));
586 tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
589 static inline void tg3_cond_int(struct tg3 *tp)
591 if (!(tp->tg3_flags & TG3_FLAG_TAGGED_STATUS) &&
592 (tp->hw_status->status & SD_STATUS_UPDATED))
593 tw32(GRC_LOCAL_CTRL, tp->grc_local_ctrl | GRC_LCLCTRL_SETINT);
595 tw32(HOSTCC_MODE, tp->coalesce_mode |
596 (HOSTCC_MODE_ENABLE | HOSTCC_MODE_NOW));
599 static void tg3_enable_ints(struct tg3 *tp)
604 tw32(TG3PCI_MISC_HOST_CTRL,
605 (tp->misc_host_ctrl & ~MISC_HOST_CTRL_MASK_PCI_INT));
606 tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
607 (tp->last_tag << 24));
608 if (tp->tg3_flags2 & TG3_FLG2_1SHOT_MSI)
609 tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
610 (tp->last_tag << 24));
614 static inline unsigned int tg3_has_work(struct tg3 *tp)
616 struct tg3_hw_status *sblk = tp->hw_status;
617 unsigned int work_exists = 0;
619 /* check for phy events */
620 if (!(tp->tg3_flags &
621 (TG3_FLAG_USE_LINKCHG_REG |
622 TG3_FLAG_POLL_SERDES))) {
623 if (sblk->status & SD_STATUS_LINK_CHG)
626 /* check for RX/TX work to do */
627 if (sblk->idx[0].tx_consumer != tp->tx_cons ||
628 sblk->idx[0].rx_producer != tp->rx_rcb_ptr)
635 * similar to tg3_enable_ints, but it accurately determines whether there
636 * is new work pending and can return without flushing the PIO write
637 * which reenables interrupts
639 static void tg3_restart_ints(struct tg3 *tp)
641 tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
645 /* When doing tagged status, this work check is unnecessary.
646 * The last_tag we write above tells the chip which piece of
647 * work we've completed.
649 if (!(tp->tg3_flags & TG3_FLAG_TAGGED_STATUS) &&
651 tw32(HOSTCC_MODE, tp->coalesce_mode |
652 (HOSTCC_MODE_ENABLE | HOSTCC_MODE_NOW));
655 static inline void tg3_netif_stop(struct tg3 *tp)
657 tp->dev->trans_start = jiffies; /* prevent tx timeout */
658 napi_disable(&tp->napi);
659 netif_tx_disable(tp->dev);
662 static inline void tg3_netif_start(struct tg3 *tp)
664 netif_wake_queue(tp->dev);
665 /* NOTE: unconditional netif_wake_queue is only appropriate
666 * so long as all callers are assured to have free tx slots
667 * (such as after tg3_init_hw)
669 napi_enable(&tp->napi);
670 tp->hw_status->status |= SD_STATUS_UPDATED;
674 static void tg3_switch_clocks(struct tg3 *tp)
676 u32 clock_ctrl = tr32(TG3PCI_CLOCK_CTRL);
679 if ((tp->tg3_flags & TG3_FLAG_CPMU_PRESENT) ||
680 (tp->tg3_flags2 & TG3_FLG2_5780_CLASS))
683 orig_clock_ctrl = clock_ctrl;
684 clock_ctrl &= (CLOCK_CTRL_FORCE_CLKRUN |
685 CLOCK_CTRL_CLKRUN_OENABLE |
687 tp->pci_clock_ctrl = clock_ctrl;
689 if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
690 if (orig_clock_ctrl & CLOCK_CTRL_625_CORE) {
691 tw32_wait_f(TG3PCI_CLOCK_CTRL,
692 clock_ctrl | CLOCK_CTRL_625_CORE, 40);
694 } else if ((orig_clock_ctrl & CLOCK_CTRL_44MHZ_CORE) != 0) {
695 tw32_wait_f(TG3PCI_CLOCK_CTRL,
697 (CLOCK_CTRL_44MHZ_CORE | CLOCK_CTRL_ALTCLK),
699 tw32_wait_f(TG3PCI_CLOCK_CTRL,
700 clock_ctrl | (CLOCK_CTRL_ALTCLK),
703 tw32_wait_f(TG3PCI_CLOCK_CTRL, clock_ctrl, 40);
706 #define PHY_BUSY_LOOPS 5000
708 static int tg3_readphy(struct tg3 *tp, int reg, u32 *val)
714 if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
716 (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
722 frame_val = ((PHY_ADDR << MI_COM_PHY_ADDR_SHIFT) &
723 MI_COM_PHY_ADDR_MASK);
724 frame_val |= ((reg << MI_COM_REG_ADDR_SHIFT) &
725 MI_COM_REG_ADDR_MASK);
726 frame_val |= (MI_COM_CMD_READ | MI_COM_START);
728 tw32_f(MAC_MI_COM, frame_val);
730 loops = PHY_BUSY_LOOPS;
733 frame_val = tr32(MAC_MI_COM);
735 if ((frame_val & MI_COM_BUSY) == 0) {
737 frame_val = tr32(MAC_MI_COM);
745 *val = frame_val & MI_COM_DATA_MASK;
749 if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
750 tw32_f(MAC_MI_MODE, tp->mi_mode);
757 static int tg3_writephy(struct tg3 *tp, int reg, u32 val)
763 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906 &&
764 (reg == MII_TG3_CTRL || reg == MII_TG3_AUX_CTRL))
767 if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
769 (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
773 frame_val = ((PHY_ADDR << MI_COM_PHY_ADDR_SHIFT) &
774 MI_COM_PHY_ADDR_MASK);
775 frame_val |= ((reg << MI_COM_REG_ADDR_SHIFT) &
776 MI_COM_REG_ADDR_MASK);
777 frame_val |= (val & MI_COM_DATA_MASK);
778 frame_val |= (MI_COM_CMD_WRITE | MI_COM_START);
780 tw32_f(MAC_MI_COM, frame_val);
782 loops = PHY_BUSY_LOOPS;
785 frame_val = tr32(MAC_MI_COM);
786 if ((frame_val & MI_COM_BUSY) == 0) {
788 frame_val = tr32(MAC_MI_COM);
798 if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
799 tw32_f(MAC_MI_MODE, tp->mi_mode);
806 static void tg3_phy_toggle_automdix(struct tg3 *tp, int enable)
810 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS) ||
811 (tp->tg3_flags2 & TG3_FLG2_ANY_SERDES))
814 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
817 if (!tg3_readphy(tp, MII_TG3_EPHY_TEST, &ephy)) {
818 tg3_writephy(tp, MII_TG3_EPHY_TEST,
819 ephy | MII_TG3_EPHY_SHADOW_EN);
820 if (!tg3_readphy(tp, MII_TG3_EPHYTST_MISCCTRL, &phy)) {
822 phy |= MII_TG3_EPHYTST_MISCCTRL_MDIX;
824 phy &= ~MII_TG3_EPHYTST_MISCCTRL_MDIX;
825 tg3_writephy(tp, MII_TG3_EPHYTST_MISCCTRL, phy);
827 tg3_writephy(tp, MII_TG3_EPHY_TEST, ephy);
830 phy = MII_TG3_AUXCTL_MISC_RDSEL_MISC |
831 MII_TG3_AUXCTL_SHDWSEL_MISC;
832 if (!tg3_writephy(tp, MII_TG3_AUX_CTRL, phy) &&
833 !tg3_readphy(tp, MII_TG3_AUX_CTRL, &phy)) {
835 phy |= MII_TG3_AUXCTL_MISC_FORCE_AMDIX;
837 phy &= ~MII_TG3_AUXCTL_MISC_FORCE_AMDIX;
838 phy |= MII_TG3_AUXCTL_MISC_WREN;
839 tg3_writephy(tp, MII_TG3_AUX_CTRL, phy);
844 static void tg3_phy_set_wirespeed(struct tg3 *tp)
848 if (tp->tg3_flags2 & TG3_FLG2_NO_ETH_WIRE_SPEED)
851 if (!tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x7007) &&
852 !tg3_readphy(tp, MII_TG3_AUX_CTRL, &val))
853 tg3_writephy(tp, MII_TG3_AUX_CTRL,
854 (val | (1 << 15) | (1 << 4)));
857 static int tg3_bmcr_reset(struct tg3 *tp)
862 /* OK, reset it, and poll the BMCR_RESET bit until it
863 * clears or we time out.
865 phy_control = BMCR_RESET;
866 err = tg3_writephy(tp, MII_BMCR, phy_control);
872 err = tg3_readphy(tp, MII_BMCR, &phy_control);
876 if ((phy_control & BMCR_RESET) == 0) {
888 static int tg3_wait_macro_done(struct tg3 *tp)
895 if (!tg3_readphy(tp, 0x16, &tmp32)) {
896 if ((tmp32 & 0x1000) == 0)
906 static int tg3_phy_write_and_check_testpat(struct tg3 *tp, int *resetp)
908 static const u32 test_pat[4][6] = {
909 { 0x00005555, 0x00000005, 0x00002aaa, 0x0000000a, 0x00003456, 0x00000003 },
910 { 0x00002aaa, 0x0000000a, 0x00003333, 0x00000003, 0x0000789a, 0x00000005 },
911 { 0x00005a5a, 0x00000005, 0x00002a6a, 0x0000000a, 0x00001bcd, 0x00000003 },
912 { 0x00002a5a, 0x0000000a, 0x000033c3, 0x00000003, 0x00002ef1, 0x00000005 }
916 for (chan = 0; chan < 4; chan++) {
919 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
920 (chan * 0x2000) | 0x0200);
921 tg3_writephy(tp, 0x16, 0x0002);
923 for (i = 0; i < 6; i++)
924 tg3_writephy(tp, MII_TG3_DSP_RW_PORT,
927 tg3_writephy(tp, 0x16, 0x0202);
928 if (tg3_wait_macro_done(tp)) {
933 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
934 (chan * 0x2000) | 0x0200);
935 tg3_writephy(tp, 0x16, 0x0082);
936 if (tg3_wait_macro_done(tp)) {
941 tg3_writephy(tp, 0x16, 0x0802);
942 if (tg3_wait_macro_done(tp)) {
947 for (i = 0; i < 6; i += 2) {
950 if (tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &low) ||
951 tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &high) ||
952 tg3_wait_macro_done(tp)) {
958 if (low != test_pat[chan][i] ||
959 high != test_pat[chan][i+1]) {
960 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000b);
961 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x4001);
962 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x4005);
972 static int tg3_phy_reset_chanpat(struct tg3 *tp)
976 for (chan = 0; chan < 4; chan++) {
979 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
980 (chan * 0x2000) | 0x0200);
981 tg3_writephy(tp, 0x16, 0x0002);
982 for (i = 0; i < 6; i++)
983 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x000);
984 tg3_writephy(tp, 0x16, 0x0202);
985 if (tg3_wait_macro_done(tp))
992 static int tg3_phy_reset_5703_4_5(struct tg3 *tp)
994 u32 reg32, phy9_orig;
995 int retries, do_phy_reset, err;
1001 err = tg3_bmcr_reset(tp);
1007 /* Disable transmitter and interrupt. */
1008 if (tg3_readphy(tp, MII_TG3_EXT_CTRL, ®32))
1012 tg3_writephy(tp, MII_TG3_EXT_CTRL, reg32);
1014 /* Set full-duplex, 1000 mbps. */
1015 tg3_writephy(tp, MII_BMCR,
1016 BMCR_FULLDPLX | TG3_BMCR_SPEED1000);
1018 /* Set to master mode. */
1019 if (tg3_readphy(tp, MII_TG3_CTRL, &phy9_orig))
1022 tg3_writephy(tp, MII_TG3_CTRL,
1023 (MII_TG3_CTRL_AS_MASTER |
1024 MII_TG3_CTRL_ENABLE_AS_MASTER));
1026 /* Enable SM_DSP_CLOCK and 6dB. */
1027 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0c00);
1029 /* Block the PHY control access. */
1030 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8005);
1031 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0800);
1033 err = tg3_phy_write_and_check_testpat(tp, &do_phy_reset);
1036 } while (--retries);
1038 err = tg3_phy_reset_chanpat(tp);
1042 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8005);
1043 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0000);
1045 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8200);
1046 tg3_writephy(tp, 0x16, 0x0000);
1048 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
1049 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
1050 /* Set Extended packet length bit for jumbo frames */
1051 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x4400);
1054 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0400);
1057 tg3_writephy(tp, MII_TG3_CTRL, phy9_orig);
1059 if (!tg3_readphy(tp, MII_TG3_EXT_CTRL, ®32)) {
1061 tg3_writephy(tp, MII_TG3_EXT_CTRL, reg32);
1068 static void tg3_link_report(struct tg3 *);
1070 /* This will reset the tigon3 PHY if there is no valid
1071 * link unless the FORCE argument is non-zero.
1073 static int tg3_phy_reset(struct tg3 *tp)
1078 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
1081 val = tr32(GRC_MISC_CFG);
1082 tw32_f(GRC_MISC_CFG, val & ~GRC_MISC_CFG_EPHY_IDDQ);
1085 err = tg3_readphy(tp, MII_BMSR, &phy_status);
1086 err |= tg3_readphy(tp, MII_BMSR, &phy_status);
1090 if (netif_running(tp->dev) && netif_carrier_ok(tp->dev)) {
1091 netif_carrier_off(tp->dev);
1092 tg3_link_report(tp);
1095 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
1096 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
1097 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
1098 err = tg3_phy_reset_5703_4_5(tp);
1104 err = tg3_bmcr_reset(tp);
1109 if (tp->tg3_flags2 & TG3_FLG2_PHY_ADC_BUG) {
1110 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0c00);
1111 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x201f);
1112 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x2aaa);
1113 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000a);
1114 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0323);
1115 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0400);
1117 if (tp->tg3_flags2 & TG3_FLG2_PHY_5704_A0_BUG) {
1118 tg3_writephy(tp, 0x1c, 0x8d68);
1119 tg3_writephy(tp, 0x1c, 0x8d68);
1121 if (tp->tg3_flags2 & TG3_FLG2_PHY_BER_BUG) {
1122 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0c00);
1123 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000a);
1124 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x310b);
1125 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x201f);
1126 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x9506);
1127 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x401f);
1128 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x14e2);
1129 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0400);
1131 else if (tp->tg3_flags2 & TG3_FLG2_PHY_JITTER_BUG) {
1132 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0c00);
1133 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000a);
1134 if (tp->tg3_flags2 & TG3_FLG2_PHY_ADJUST_TRIM) {
1135 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x110b);
1136 tg3_writephy(tp, MII_TG3_TEST1,
1137 MII_TG3_TEST1_TRIM_EN | 0x4);
1139 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x010b);
1140 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0400);
1142 /* Set Extended packet length bit (bit 14) on all chips that */
1143 /* support jumbo frames */
1144 if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5401) {
1145 /* Cannot do read-modify-write on 5401 */
1146 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x4c20);
1147 } else if (tp->tg3_flags2 & TG3_FLG2_JUMBO_CAPABLE) {
1150 /* Set bit 14 with read-modify-write to preserve other bits */
1151 if (!tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0007) &&
1152 !tg3_readphy(tp, MII_TG3_AUX_CTRL, &phy_reg))
1153 tg3_writephy(tp, MII_TG3_AUX_CTRL, phy_reg | 0x4000);
1156 /* Set phy register 0x10 bit 0 to high fifo elasticity to support
1157 * jumbo frames transmission.
1159 if (tp->tg3_flags2 & TG3_FLG2_JUMBO_CAPABLE) {
1162 if (!tg3_readphy(tp, MII_TG3_EXT_CTRL, &phy_reg))
1163 tg3_writephy(tp, MII_TG3_EXT_CTRL,
1164 phy_reg | MII_TG3_EXT_CTRL_FIFO_ELASTIC);
1167 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
1168 /* adjust output voltage */
1169 tg3_writephy(tp, MII_TG3_EPHY_PTEST, 0x12);
1172 tg3_phy_toggle_automdix(tp, 1);
1173 tg3_phy_set_wirespeed(tp);
1177 static void tg3_frob_aux_power(struct tg3 *tp)
1179 struct tg3 *tp_peer = tp;
1181 if ((tp->tg3_flags2 & TG3_FLG2_IS_NIC) == 0)
1184 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) ||
1185 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714)) {
1186 struct net_device *dev_peer;
1188 dev_peer = pci_get_drvdata(tp->pdev_peer);
1189 /* remove_one() may have been run on the peer. */
1193 tp_peer = netdev_priv(dev_peer);
1196 if ((tp->tg3_flags & TG3_FLAG_WOL_ENABLE) != 0 ||
1197 (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) != 0 ||
1198 (tp_peer->tg3_flags & TG3_FLAG_WOL_ENABLE) != 0 ||
1199 (tp_peer->tg3_flags & TG3_FLAG_ENABLE_ASF) != 0) {
1200 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
1201 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
1202 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1203 (GRC_LCLCTRL_GPIO_OE0 |
1204 GRC_LCLCTRL_GPIO_OE1 |
1205 GRC_LCLCTRL_GPIO_OE2 |
1206 GRC_LCLCTRL_GPIO_OUTPUT0 |
1207 GRC_LCLCTRL_GPIO_OUTPUT1),
1211 u32 grc_local_ctrl = 0;
1213 if (tp_peer != tp &&
1214 (tp_peer->tg3_flags & TG3_FLAG_INIT_COMPLETE) != 0)
1217 /* Workaround to prevent overdrawing Amps. */
1218 if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
1220 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE3;
1221 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1222 grc_local_ctrl, 100);
1225 /* On 5753 and variants, GPIO2 cannot be used. */
1226 no_gpio2 = tp->nic_sram_data_cfg &
1227 NIC_SRAM_DATA_CFG_NO_GPIO2;
1229 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE0 |
1230 GRC_LCLCTRL_GPIO_OE1 |
1231 GRC_LCLCTRL_GPIO_OE2 |
1232 GRC_LCLCTRL_GPIO_OUTPUT1 |
1233 GRC_LCLCTRL_GPIO_OUTPUT2;
1235 grc_local_ctrl &= ~(GRC_LCLCTRL_GPIO_OE2 |
1236 GRC_LCLCTRL_GPIO_OUTPUT2);
1238 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1239 grc_local_ctrl, 100);
1241 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OUTPUT0;
1243 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1244 grc_local_ctrl, 100);
1247 grc_local_ctrl &= ~GRC_LCLCTRL_GPIO_OUTPUT2;
1248 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1249 grc_local_ctrl, 100);
1253 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
1254 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701) {
1255 if (tp_peer != tp &&
1256 (tp_peer->tg3_flags & TG3_FLAG_INIT_COMPLETE) != 0)
1259 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1260 (GRC_LCLCTRL_GPIO_OE1 |
1261 GRC_LCLCTRL_GPIO_OUTPUT1), 100);
1263 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1264 GRC_LCLCTRL_GPIO_OE1, 100);
1266 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1267 (GRC_LCLCTRL_GPIO_OE1 |
1268 GRC_LCLCTRL_GPIO_OUTPUT1), 100);
1273 static int tg3_5700_link_polarity(struct tg3 *tp, u32 speed)
1275 if (tp->led_ctrl == LED_CTRL_MODE_PHY_2)
1277 else if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5411) {
1278 if (speed != SPEED_10)
1280 } else if (speed == SPEED_10)
1286 static int tg3_setup_phy(struct tg3 *, int);
1288 #define RESET_KIND_SHUTDOWN 0
1289 #define RESET_KIND_INIT 1
1290 #define RESET_KIND_SUSPEND 2
1292 static void tg3_write_sig_post_reset(struct tg3 *, int);
1293 static int tg3_halt_cpu(struct tg3 *, u32);
1294 static int tg3_nvram_lock(struct tg3 *);
1295 static void tg3_nvram_unlock(struct tg3 *);
1297 static void tg3_power_down_phy(struct tg3 *tp)
1299 if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) {
1300 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
1301 u32 sg_dig_ctrl = tr32(SG_DIG_CTRL);
1302 u32 serdes_cfg = tr32(MAC_SERDES_CFG);
1305 SG_DIG_USING_HW_AUTONEG | SG_DIG_SOFT_RESET;
1306 tw32(SG_DIG_CTRL, sg_dig_ctrl);
1307 tw32(MAC_SERDES_CFG, serdes_cfg | (1 << 15));
1312 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
1316 val = tr32(GRC_MISC_CFG);
1317 tw32_f(GRC_MISC_CFG, val | GRC_MISC_CFG_EPHY_IDDQ);
1321 tg3_writephy(tp, MII_TG3_EXT_CTRL,
1322 MII_TG3_EXT_CTRL_FORCE_LED_OFF);
1323 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x01b2);
1326 /* The PHY should not be powered down on some chips because
1329 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
1330 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
1331 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780 &&
1332 (tp->tg3_flags2 & TG3_FLG2_MII_SERDES)))
1334 tg3_writephy(tp, MII_BMCR, BMCR_PDOWN);
1337 static int tg3_set_power_state(struct tg3 *tp, pci_power_t state)
1340 u16 power_control, power_caps;
1341 int pm = tp->pm_cap;
1343 /* Make sure register accesses (indirect or otherwise)
1344 * will function correctly.
1346 pci_write_config_dword(tp->pdev,
1347 TG3PCI_MISC_HOST_CTRL,
1348 tp->misc_host_ctrl);
1350 pci_read_config_word(tp->pdev,
1353 power_control |= PCI_PM_CTRL_PME_STATUS;
1354 power_control &= ~(PCI_PM_CTRL_STATE_MASK);
1358 pci_write_config_word(tp->pdev,
1361 udelay(100); /* Delay after power state change */
1363 /* Switch out of Vaux if it is a NIC */
1364 if (tp->tg3_flags2 & TG3_FLG2_IS_NIC)
1365 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl, 100);
1382 printk(KERN_WARNING PFX "%s: Invalid power state (%d) "
1384 tp->dev->name, state);
1388 power_control |= PCI_PM_CTRL_PME_ENABLE;
1390 misc_host_ctrl = tr32(TG3PCI_MISC_HOST_CTRL);
1391 tw32(TG3PCI_MISC_HOST_CTRL,
1392 misc_host_ctrl | MISC_HOST_CTRL_MASK_PCI_INT);
1394 if (tp->link_config.phy_is_low_power == 0) {
1395 tp->link_config.phy_is_low_power = 1;
1396 tp->link_config.orig_speed = tp->link_config.speed;
1397 tp->link_config.orig_duplex = tp->link_config.duplex;
1398 tp->link_config.orig_autoneg = tp->link_config.autoneg;
1401 if (!(tp->tg3_flags2 & TG3_FLG2_ANY_SERDES)) {
1402 tp->link_config.speed = SPEED_10;
1403 tp->link_config.duplex = DUPLEX_HALF;
1404 tp->link_config.autoneg = AUTONEG_ENABLE;
1405 tg3_setup_phy(tp, 0);
1408 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
1411 val = tr32(GRC_VCPU_EXT_CTRL);
1412 tw32(GRC_VCPU_EXT_CTRL, val | GRC_VCPU_EXT_CTRL_DISABLE_WOL);
1413 } else if (!(tp->tg3_flags & TG3_FLAG_ENABLE_ASF)) {
1417 for (i = 0; i < 200; i++) {
1418 tg3_read_mem(tp, NIC_SRAM_FW_ASF_STATUS_MBOX, &val);
1419 if (val == ~NIC_SRAM_FIRMWARE_MBOX_MAGIC1)
1424 if (tp->tg3_flags & TG3_FLAG_WOL_CAP)
1425 tg3_write_mem(tp, NIC_SRAM_WOL_MBOX, WOL_SIGNATURE |
1426 WOL_DRV_STATE_SHUTDOWN |
1430 pci_read_config_word(tp->pdev, pm + PCI_PM_PMC, &power_caps);
1432 if (tp->tg3_flags & TG3_FLAG_WOL_ENABLE) {
1435 if (!(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)) {
1436 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x5a);
1439 if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES)
1440 mac_mode = MAC_MODE_PORT_MODE_GMII;
1442 mac_mode = MAC_MODE_PORT_MODE_MII;
1444 mac_mode |= tp->mac_mode & MAC_MODE_LINK_POLARITY;
1445 if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
1447 u32 speed = (tp->tg3_flags &
1448 TG3_FLAG_WOL_SPEED_100MB) ?
1449 SPEED_100 : SPEED_10;
1450 if (tg3_5700_link_polarity(tp, speed))
1451 mac_mode |= MAC_MODE_LINK_POLARITY;
1453 mac_mode &= ~MAC_MODE_LINK_POLARITY;
1456 mac_mode = MAC_MODE_PORT_MODE_TBI;
1459 if (!(tp->tg3_flags2 & TG3_FLG2_5750_PLUS))
1460 tw32(MAC_LED_CTRL, tp->led_ctrl);
1462 if (((power_caps & PCI_PM_CAP_PME_D3cold) &&
1463 (tp->tg3_flags & TG3_FLAG_WOL_ENABLE)))
1464 mac_mode |= MAC_MODE_MAGIC_PKT_ENABLE;
1466 tw32_f(MAC_MODE, mac_mode);
1469 tw32_f(MAC_RX_MODE, RX_MODE_ENABLE);
1473 if (!(tp->tg3_flags & TG3_FLAG_WOL_SPEED_100MB) &&
1474 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
1475 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)) {
1478 base_val = tp->pci_clock_ctrl;
1479 base_val |= (CLOCK_CTRL_RXCLK_DISABLE |
1480 CLOCK_CTRL_TXCLK_DISABLE);
1482 tw32_wait_f(TG3PCI_CLOCK_CTRL, base_val | CLOCK_CTRL_ALTCLK |
1483 CLOCK_CTRL_PWRDOWN_PLL133, 40);
1484 } else if ((tp->tg3_flags2 & TG3_FLG2_5780_CLASS) ||
1485 (tp->tg3_flags & TG3_FLAG_CPMU_PRESENT) ||
1486 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)) {
1488 } else if (!((tp->tg3_flags2 & TG3_FLG2_5750_PLUS) &&
1489 (tp->tg3_flags & TG3_FLAG_ENABLE_ASF))) {
1490 u32 newbits1, newbits2;
1492 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
1493 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
1494 newbits1 = (CLOCK_CTRL_RXCLK_DISABLE |
1495 CLOCK_CTRL_TXCLK_DISABLE |
1497 newbits2 = newbits1 | CLOCK_CTRL_44MHZ_CORE;
1498 } else if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
1499 newbits1 = CLOCK_CTRL_625_CORE;
1500 newbits2 = newbits1 | CLOCK_CTRL_ALTCLK;
1502 newbits1 = CLOCK_CTRL_ALTCLK;
1503 newbits2 = newbits1 | CLOCK_CTRL_44MHZ_CORE;
1506 tw32_wait_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl | newbits1,
1509 tw32_wait_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl | newbits2,
1512 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
1515 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
1516 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
1517 newbits3 = (CLOCK_CTRL_RXCLK_DISABLE |
1518 CLOCK_CTRL_TXCLK_DISABLE |
1519 CLOCK_CTRL_44MHZ_CORE);
1521 newbits3 = CLOCK_CTRL_44MHZ_CORE;
1524 tw32_wait_f(TG3PCI_CLOCK_CTRL,
1525 tp->pci_clock_ctrl | newbits3, 40);
1529 if (!(tp->tg3_flags & TG3_FLAG_WOL_ENABLE) &&
1530 !(tp->tg3_flags & TG3_FLAG_ENABLE_ASF) &&
1531 !(tp->tg3_flags3 & TG3_FLG3_ENABLE_APE))
1532 tg3_power_down_phy(tp);
1534 tg3_frob_aux_power(tp);
1536 /* Workaround for unstable PLL clock */
1537 if ((GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_AX) ||
1538 (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_BX)) {
1539 u32 val = tr32(0x7d00);
1541 val &= ~((1 << 16) | (1 << 4) | (1 << 2) | (1 << 1) | 1);
1543 if (!(tp->tg3_flags & TG3_FLAG_ENABLE_ASF)) {
1546 err = tg3_nvram_lock(tp);
1547 tg3_halt_cpu(tp, RX_CPU_BASE);
1549 tg3_nvram_unlock(tp);
1553 tg3_write_sig_post_reset(tp, RESET_KIND_SHUTDOWN);
1555 /* Finally, set the new power state. */
1556 pci_write_config_word(tp->pdev, pm + PCI_PM_CTRL, power_control);
1557 udelay(100); /* Delay after power state change */
1562 static void tg3_link_report(struct tg3 *tp)
1564 if (!netif_carrier_ok(tp->dev)) {
1565 if (netif_msg_link(tp))
1566 printk(KERN_INFO PFX "%s: Link is down.\n",
1568 } else if (netif_msg_link(tp)) {
1569 printk(KERN_INFO PFX "%s: Link is up at %d Mbps, %s duplex.\n",
1571 (tp->link_config.active_speed == SPEED_1000 ?
1573 (tp->link_config.active_speed == SPEED_100 ?
1575 (tp->link_config.active_duplex == DUPLEX_FULL ?
1578 printk(KERN_INFO PFX "%s: Flow control is %s for TX and "
1581 (tp->tg3_flags & TG3_FLAG_TX_PAUSE) ? "on" : "off",
1582 (tp->tg3_flags & TG3_FLAG_RX_PAUSE) ? "on" : "off");
1586 static void tg3_setup_flow_control(struct tg3 *tp, u32 local_adv, u32 remote_adv)
1588 u32 new_tg3_flags = 0;
1589 u32 old_rx_mode = tp->rx_mode;
1590 u32 old_tx_mode = tp->tx_mode;
1592 if (tp->tg3_flags & TG3_FLAG_PAUSE_AUTONEG) {
1594 /* Convert 1000BaseX flow control bits to 1000BaseT
1595 * bits before resolving flow control.
1597 if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES) {
1598 local_adv &= ~(ADVERTISE_PAUSE_CAP |
1599 ADVERTISE_PAUSE_ASYM);
1600 remote_adv &= ~(LPA_PAUSE_CAP | LPA_PAUSE_ASYM);
1602 if (local_adv & ADVERTISE_1000XPAUSE)
1603 local_adv |= ADVERTISE_PAUSE_CAP;
1604 if (local_adv & ADVERTISE_1000XPSE_ASYM)
1605 local_adv |= ADVERTISE_PAUSE_ASYM;
1606 if (remote_adv & LPA_1000XPAUSE)
1607 remote_adv |= LPA_PAUSE_CAP;
1608 if (remote_adv & LPA_1000XPAUSE_ASYM)
1609 remote_adv |= LPA_PAUSE_ASYM;
1612 if (local_adv & ADVERTISE_PAUSE_CAP) {
1613 if (local_adv & ADVERTISE_PAUSE_ASYM) {
1614 if (remote_adv & LPA_PAUSE_CAP)
1616 (TG3_FLAG_RX_PAUSE |
1618 else if (remote_adv & LPA_PAUSE_ASYM)
1620 (TG3_FLAG_RX_PAUSE);
1622 if (remote_adv & LPA_PAUSE_CAP)
1624 (TG3_FLAG_RX_PAUSE |
1627 } else if (local_adv & ADVERTISE_PAUSE_ASYM) {
1628 if ((remote_adv & LPA_PAUSE_CAP) &&
1629 (remote_adv & LPA_PAUSE_ASYM))
1630 new_tg3_flags |= TG3_FLAG_TX_PAUSE;
1633 tp->tg3_flags &= ~(TG3_FLAG_RX_PAUSE | TG3_FLAG_TX_PAUSE);
1634 tp->tg3_flags |= new_tg3_flags;
1636 new_tg3_flags = tp->tg3_flags;
1639 if (new_tg3_flags & TG3_FLAG_RX_PAUSE)
1640 tp->rx_mode |= RX_MODE_FLOW_CTRL_ENABLE;
1642 tp->rx_mode &= ~RX_MODE_FLOW_CTRL_ENABLE;
1644 if (old_rx_mode != tp->rx_mode) {
1645 tw32_f(MAC_RX_MODE, tp->rx_mode);
1648 if (new_tg3_flags & TG3_FLAG_TX_PAUSE)
1649 tp->tx_mode |= TX_MODE_FLOW_CTRL_ENABLE;
1651 tp->tx_mode &= ~TX_MODE_FLOW_CTRL_ENABLE;
1653 if (old_tx_mode != tp->tx_mode) {
1654 tw32_f(MAC_TX_MODE, tp->tx_mode);
1658 static void tg3_aux_stat_to_speed_duplex(struct tg3 *tp, u32 val, u16 *speed, u8 *duplex)
1660 switch (val & MII_TG3_AUX_STAT_SPDMASK) {
1661 case MII_TG3_AUX_STAT_10HALF:
1663 *duplex = DUPLEX_HALF;
1666 case MII_TG3_AUX_STAT_10FULL:
1668 *duplex = DUPLEX_FULL;
1671 case MII_TG3_AUX_STAT_100HALF:
1673 *duplex = DUPLEX_HALF;
1676 case MII_TG3_AUX_STAT_100FULL:
1678 *duplex = DUPLEX_FULL;
1681 case MII_TG3_AUX_STAT_1000HALF:
1682 *speed = SPEED_1000;
1683 *duplex = DUPLEX_HALF;
1686 case MII_TG3_AUX_STAT_1000FULL:
1687 *speed = SPEED_1000;
1688 *duplex = DUPLEX_FULL;
1692 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
1693 *speed = (val & MII_TG3_AUX_STAT_100) ? SPEED_100 :
1695 *duplex = (val & MII_TG3_AUX_STAT_FULL) ? DUPLEX_FULL :
1699 *speed = SPEED_INVALID;
1700 *duplex = DUPLEX_INVALID;
1705 static void tg3_phy_copper_begin(struct tg3 *tp)
1710 if (tp->link_config.phy_is_low_power) {
1711 /* Entering low power mode. Disable gigabit and
1712 * 100baseT advertisements.
1714 tg3_writephy(tp, MII_TG3_CTRL, 0);
1716 new_adv = (ADVERTISE_10HALF | ADVERTISE_10FULL |
1717 ADVERTISE_CSMA | ADVERTISE_PAUSE_CAP);
1718 if (tp->tg3_flags & TG3_FLAG_WOL_SPEED_100MB)
1719 new_adv |= (ADVERTISE_100HALF | ADVERTISE_100FULL);
1721 tg3_writephy(tp, MII_ADVERTISE, new_adv);
1722 } else if (tp->link_config.speed == SPEED_INVALID) {
1723 if (tp->tg3_flags & TG3_FLAG_10_100_ONLY)
1724 tp->link_config.advertising &=
1725 ~(ADVERTISED_1000baseT_Half |
1726 ADVERTISED_1000baseT_Full);
1728 new_adv = (ADVERTISE_CSMA | ADVERTISE_PAUSE_CAP);
1729 if (tp->link_config.advertising & ADVERTISED_10baseT_Half)
1730 new_adv |= ADVERTISE_10HALF;
1731 if (tp->link_config.advertising & ADVERTISED_10baseT_Full)
1732 new_adv |= ADVERTISE_10FULL;
1733 if (tp->link_config.advertising & ADVERTISED_100baseT_Half)
1734 new_adv |= ADVERTISE_100HALF;
1735 if (tp->link_config.advertising & ADVERTISED_100baseT_Full)
1736 new_adv |= ADVERTISE_100FULL;
1737 tg3_writephy(tp, MII_ADVERTISE, new_adv);
1739 if (tp->link_config.advertising &
1740 (ADVERTISED_1000baseT_Half | ADVERTISED_1000baseT_Full)) {
1742 if (tp->link_config.advertising & ADVERTISED_1000baseT_Half)
1743 new_adv |= MII_TG3_CTRL_ADV_1000_HALF;
1744 if (tp->link_config.advertising & ADVERTISED_1000baseT_Full)
1745 new_adv |= MII_TG3_CTRL_ADV_1000_FULL;
1746 if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY) &&
1747 (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
1748 tp->pci_chip_rev_id == CHIPREV_ID_5701_B0))
1749 new_adv |= (MII_TG3_CTRL_AS_MASTER |
1750 MII_TG3_CTRL_ENABLE_AS_MASTER);
1751 tg3_writephy(tp, MII_TG3_CTRL, new_adv);
1753 tg3_writephy(tp, MII_TG3_CTRL, 0);
1756 /* Asking for a specific link mode. */
1757 if (tp->link_config.speed == SPEED_1000) {
1758 new_adv = ADVERTISE_CSMA | ADVERTISE_PAUSE_CAP;
1759 tg3_writephy(tp, MII_ADVERTISE, new_adv);
1761 if (tp->link_config.duplex == DUPLEX_FULL)
1762 new_adv = MII_TG3_CTRL_ADV_1000_FULL;
1764 new_adv = MII_TG3_CTRL_ADV_1000_HALF;
1765 if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
1766 tp->pci_chip_rev_id == CHIPREV_ID_5701_B0)
1767 new_adv |= (MII_TG3_CTRL_AS_MASTER |
1768 MII_TG3_CTRL_ENABLE_AS_MASTER);
1769 tg3_writephy(tp, MII_TG3_CTRL, new_adv);
1771 tg3_writephy(tp, MII_TG3_CTRL, 0);
1773 new_adv = ADVERTISE_CSMA | ADVERTISE_PAUSE_CAP;
1774 if (tp->link_config.speed == SPEED_100) {
1775 if (tp->link_config.duplex == DUPLEX_FULL)
1776 new_adv |= ADVERTISE_100FULL;
1778 new_adv |= ADVERTISE_100HALF;
1780 if (tp->link_config.duplex == DUPLEX_FULL)
1781 new_adv |= ADVERTISE_10FULL;
1783 new_adv |= ADVERTISE_10HALF;
1785 tg3_writephy(tp, MII_ADVERTISE, new_adv);
1789 if (tp->link_config.autoneg == AUTONEG_DISABLE &&
1790 tp->link_config.speed != SPEED_INVALID) {
1791 u32 bmcr, orig_bmcr;
1793 tp->link_config.active_speed = tp->link_config.speed;
1794 tp->link_config.active_duplex = tp->link_config.duplex;
1797 switch (tp->link_config.speed) {
1803 bmcr |= BMCR_SPEED100;
1807 bmcr |= TG3_BMCR_SPEED1000;
1811 if (tp->link_config.duplex == DUPLEX_FULL)
1812 bmcr |= BMCR_FULLDPLX;
1814 if (!tg3_readphy(tp, MII_BMCR, &orig_bmcr) &&
1815 (bmcr != orig_bmcr)) {
1816 tg3_writephy(tp, MII_BMCR, BMCR_LOOPBACK);
1817 for (i = 0; i < 1500; i++) {
1821 if (tg3_readphy(tp, MII_BMSR, &tmp) ||
1822 tg3_readphy(tp, MII_BMSR, &tmp))
1824 if (!(tmp & BMSR_LSTATUS)) {
1829 tg3_writephy(tp, MII_BMCR, bmcr);
1833 tg3_writephy(tp, MII_BMCR,
1834 BMCR_ANENABLE | BMCR_ANRESTART);
1838 static int tg3_init_5401phy_dsp(struct tg3 *tp)
1842 /* Turn off tap power management. */
1843 /* Set Extended packet length bit */
1844 err = tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x4c20);
1846 err |= tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x0012);
1847 err |= tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x1804);
1849 err |= tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x0013);
1850 err |= tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x1204);
1852 err |= tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8006);
1853 err |= tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0132);
1855 err |= tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8006);
1856 err |= tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0232);
1858 err |= tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x201f);
1859 err |= tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0a20);
1866 static int tg3_copper_is_advertising_all(struct tg3 *tp, u32 mask)
1868 u32 adv_reg, all_mask = 0;
1870 if (mask & ADVERTISED_10baseT_Half)
1871 all_mask |= ADVERTISE_10HALF;
1872 if (mask & ADVERTISED_10baseT_Full)
1873 all_mask |= ADVERTISE_10FULL;
1874 if (mask & ADVERTISED_100baseT_Half)
1875 all_mask |= ADVERTISE_100HALF;
1876 if (mask & ADVERTISED_100baseT_Full)
1877 all_mask |= ADVERTISE_100FULL;
1879 if (tg3_readphy(tp, MII_ADVERTISE, &adv_reg))
1882 if ((adv_reg & all_mask) != all_mask)
1884 if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY)) {
1888 if (mask & ADVERTISED_1000baseT_Half)
1889 all_mask |= ADVERTISE_1000HALF;
1890 if (mask & ADVERTISED_1000baseT_Full)
1891 all_mask |= ADVERTISE_1000FULL;
1893 if (tg3_readphy(tp, MII_TG3_CTRL, &tg3_ctrl))
1896 if ((tg3_ctrl & all_mask) != all_mask)
1902 static int tg3_setup_copper_phy(struct tg3 *tp, int force_reset)
1904 int current_link_up;
1913 (MAC_STATUS_SYNC_CHANGED |
1914 MAC_STATUS_CFG_CHANGED |
1915 MAC_STATUS_MI_COMPLETION |
1916 MAC_STATUS_LNKSTATE_CHANGED));
1919 tp->mi_mode = MAC_MI_MODE_BASE;
1920 tw32_f(MAC_MI_MODE, tp->mi_mode);
1923 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x02);
1925 /* Some third-party PHYs need to be reset on link going
1928 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
1929 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
1930 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) &&
1931 netif_carrier_ok(tp->dev)) {
1932 tg3_readphy(tp, MII_BMSR, &bmsr);
1933 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
1934 !(bmsr & BMSR_LSTATUS))
1940 if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5401) {
1941 tg3_readphy(tp, MII_BMSR, &bmsr);
1942 if (tg3_readphy(tp, MII_BMSR, &bmsr) ||
1943 !(tp->tg3_flags & TG3_FLAG_INIT_COMPLETE))
1946 if (!(bmsr & BMSR_LSTATUS)) {
1947 err = tg3_init_5401phy_dsp(tp);
1951 tg3_readphy(tp, MII_BMSR, &bmsr);
1952 for (i = 0; i < 1000; i++) {
1954 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
1955 (bmsr & BMSR_LSTATUS)) {
1961 if ((tp->phy_id & PHY_ID_REV_MASK) == PHY_REV_BCM5401_B0 &&
1962 !(bmsr & BMSR_LSTATUS) &&
1963 tp->link_config.active_speed == SPEED_1000) {
1964 err = tg3_phy_reset(tp);
1966 err = tg3_init_5401phy_dsp(tp);
1971 } else if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
1972 tp->pci_chip_rev_id == CHIPREV_ID_5701_B0) {
1973 /* 5701 {A0,B0} CRC bug workaround */
1974 tg3_writephy(tp, 0x15, 0x0a75);
1975 tg3_writephy(tp, 0x1c, 0x8c68);
1976 tg3_writephy(tp, 0x1c, 0x8d68);
1977 tg3_writephy(tp, 0x1c, 0x8c68);
1980 /* Clear pending interrupts... */
1981 tg3_readphy(tp, MII_TG3_ISTAT, &dummy);
1982 tg3_readphy(tp, MII_TG3_ISTAT, &dummy);
1984 if (tp->tg3_flags & TG3_FLAG_USE_MI_INTERRUPT)
1985 tg3_writephy(tp, MII_TG3_IMASK, ~MII_TG3_INT_LINKCHG);
1986 else if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5906)
1987 tg3_writephy(tp, MII_TG3_IMASK, ~0);
1989 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
1990 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
1991 if (tp->led_ctrl == LED_CTRL_MODE_PHY_1)
1992 tg3_writephy(tp, MII_TG3_EXT_CTRL,
1993 MII_TG3_EXT_CTRL_LNK3_LED_MODE);
1995 tg3_writephy(tp, MII_TG3_EXT_CTRL, 0);
1998 current_link_up = 0;
1999 current_speed = SPEED_INVALID;
2000 current_duplex = DUPLEX_INVALID;
2002 if (tp->tg3_flags2 & TG3_FLG2_CAPACITIVE_COUPLING) {
2005 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x4007);
2006 tg3_readphy(tp, MII_TG3_AUX_CTRL, &val);
2007 if (!(val & (1 << 10))) {
2009 tg3_writephy(tp, MII_TG3_AUX_CTRL, val);
2015 for (i = 0; i < 100; i++) {
2016 tg3_readphy(tp, MII_BMSR, &bmsr);
2017 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
2018 (bmsr & BMSR_LSTATUS))
2023 if (bmsr & BMSR_LSTATUS) {
2026 tg3_readphy(tp, MII_TG3_AUX_STAT, &aux_stat);
2027 for (i = 0; i < 2000; i++) {
2029 if (!tg3_readphy(tp, MII_TG3_AUX_STAT, &aux_stat) &&
2034 tg3_aux_stat_to_speed_duplex(tp, aux_stat,
2039 for (i = 0; i < 200; i++) {
2040 tg3_readphy(tp, MII_BMCR, &bmcr);
2041 if (tg3_readphy(tp, MII_BMCR, &bmcr))
2043 if (bmcr && bmcr != 0x7fff)
2048 if (tp->link_config.autoneg == AUTONEG_ENABLE) {
2049 if (bmcr & BMCR_ANENABLE) {
2050 current_link_up = 1;
2052 /* Force autoneg restart if we are exiting
2055 if (!tg3_copper_is_advertising_all(tp,
2056 tp->link_config.advertising))
2057 current_link_up = 0;
2059 current_link_up = 0;
2062 if (!(bmcr & BMCR_ANENABLE) &&
2063 tp->link_config.speed == current_speed &&
2064 tp->link_config.duplex == current_duplex) {
2065 current_link_up = 1;
2067 current_link_up = 0;
2071 tp->link_config.active_speed = current_speed;
2072 tp->link_config.active_duplex = current_duplex;
2075 if (current_link_up == 1 &&
2076 (tp->link_config.active_duplex == DUPLEX_FULL) &&
2077 (tp->link_config.autoneg == AUTONEG_ENABLE)) {
2078 u32 local_adv, remote_adv;
2080 if (tg3_readphy(tp, MII_ADVERTISE, &local_adv))
2082 local_adv &= (ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM);
2084 if (tg3_readphy(tp, MII_LPA, &remote_adv))
2087 remote_adv &= (LPA_PAUSE_CAP | LPA_PAUSE_ASYM);
2089 /* If we are not advertising full pause capability,
2090 * something is wrong. Bring the link down and reconfigure.
2092 if (local_adv != ADVERTISE_PAUSE_CAP) {
2093 current_link_up = 0;
2095 tg3_setup_flow_control(tp, local_adv, remote_adv);
2099 if (current_link_up == 0 || tp->link_config.phy_is_low_power) {
2102 tg3_phy_copper_begin(tp);
2104 tg3_readphy(tp, MII_BMSR, &tmp);
2105 if (!tg3_readphy(tp, MII_BMSR, &tmp) &&
2106 (tmp & BMSR_LSTATUS))
2107 current_link_up = 1;
2110 tp->mac_mode &= ~MAC_MODE_PORT_MODE_MASK;
2111 if (current_link_up == 1) {
2112 if (tp->link_config.active_speed == SPEED_100 ||
2113 tp->link_config.active_speed == SPEED_10)
2114 tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
2116 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
2118 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
2120 tp->mac_mode &= ~MAC_MODE_HALF_DUPLEX;
2121 if (tp->link_config.active_duplex == DUPLEX_HALF)
2122 tp->mac_mode |= MAC_MODE_HALF_DUPLEX;
2124 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700) {
2125 if (current_link_up == 1 &&
2126 tg3_5700_link_polarity(tp, tp->link_config.active_speed))
2127 tp->mac_mode |= MAC_MODE_LINK_POLARITY;
2129 tp->mac_mode &= ~MAC_MODE_LINK_POLARITY;
2132 /* ??? Without this setting Netgear GA302T PHY does not
2133 * ??? send/receive packets...
2135 if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5411 &&
2136 tp->pci_chip_rev_id == CHIPREV_ID_5700_ALTIMA) {
2137 tp->mi_mode |= MAC_MI_MODE_AUTO_POLL;
2138 tw32_f(MAC_MI_MODE, tp->mi_mode);
2142 tw32_f(MAC_MODE, tp->mac_mode);
2145 if (tp->tg3_flags & TG3_FLAG_USE_LINKCHG_REG) {
2146 /* Polled via timer. */
2147 tw32_f(MAC_EVENT, 0);
2149 tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
2153 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 &&
2154 current_link_up == 1 &&
2155 tp->link_config.active_speed == SPEED_1000 &&
2156 ((tp->tg3_flags & TG3_FLAG_PCIX_MODE) ||
2157 (tp->tg3_flags & TG3_FLAG_PCI_HIGH_SPEED))) {
2160 (MAC_STATUS_SYNC_CHANGED |
2161 MAC_STATUS_CFG_CHANGED));
2164 NIC_SRAM_FIRMWARE_MBOX,
2165 NIC_SRAM_FIRMWARE_MBOX_MAGIC2);
2168 if (current_link_up != netif_carrier_ok(tp->dev)) {
2169 if (current_link_up)
2170 netif_carrier_on(tp->dev);
2172 netif_carrier_off(tp->dev);
2173 tg3_link_report(tp);
2179 struct tg3_fiber_aneginfo {
2181 #define ANEG_STATE_UNKNOWN 0
2182 #define ANEG_STATE_AN_ENABLE 1
2183 #define ANEG_STATE_RESTART_INIT 2
2184 #define ANEG_STATE_RESTART 3
2185 #define ANEG_STATE_DISABLE_LINK_OK 4
2186 #define ANEG_STATE_ABILITY_DETECT_INIT 5
2187 #define ANEG_STATE_ABILITY_DETECT 6
2188 #define ANEG_STATE_ACK_DETECT_INIT 7
2189 #define ANEG_STATE_ACK_DETECT 8
2190 #define ANEG_STATE_COMPLETE_ACK_INIT 9
2191 #define ANEG_STATE_COMPLETE_ACK 10
2192 #define ANEG_STATE_IDLE_DETECT_INIT 11
2193 #define ANEG_STATE_IDLE_DETECT 12
2194 #define ANEG_STATE_LINK_OK 13
2195 #define ANEG_STATE_NEXT_PAGE_WAIT_INIT 14
2196 #define ANEG_STATE_NEXT_PAGE_WAIT 15
2199 #define MR_AN_ENABLE 0x00000001
2200 #define MR_RESTART_AN 0x00000002
2201 #define MR_AN_COMPLETE 0x00000004
2202 #define MR_PAGE_RX 0x00000008
2203 #define MR_NP_LOADED 0x00000010
2204 #define MR_TOGGLE_TX 0x00000020
2205 #define MR_LP_ADV_FULL_DUPLEX 0x00000040
2206 #define MR_LP_ADV_HALF_DUPLEX 0x00000080
2207 #define MR_LP_ADV_SYM_PAUSE 0x00000100
2208 #define MR_LP_ADV_ASYM_PAUSE 0x00000200
2209 #define MR_LP_ADV_REMOTE_FAULT1 0x00000400
2210 #define MR_LP_ADV_REMOTE_FAULT2 0x00000800
2211 #define MR_LP_ADV_NEXT_PAGE 0x00001000
2212 #define MR_TOGGLE_RX 0x00002000
2213 #define MR_NP_RX 0x00004000
2215 #define MR_LINK_OK 0x80000000
2217 unsigned long link_time, cur_time;
2219 u32 ability_match_cfg;
2220 int ability_match_count;
2222 char ability_match, idle_match, ack_match;
2224 u32 txconfig, rxconfig;
2225 #define ANEG_CFG_NP 0x00000080
2226 #define ANEG_CFG_ACK 0x00000040
2227 #define ANEG_CFG_RF2 0x00000020
2228 #define ANEG_CFG_RF1 0x00000010
2229 #define ANEG_CFG_PS2 0x00000001
2230 #define ANEG_CFG_PS1 0x00008000
2231 #define ANEG_CFG_HD 0x00004000
2232 #define ANEG_CFG_FD 0x00002000
2233 #define ANEG_CFG_INVAL 0x00001f06
2238 #define ANEG_TIMER_ENAB 2
2239 #define ANEG_FAILED -1
2241 #define ANEG_STATE_SETTLE_TIME 10000
2243 static int tg3_fiber_aneg_smachine(struct tg3 *tp,
2244 struct tg3_fiber_aneginfo *ap)
2246 unsigned long delta;
2250 if (ap->state == ANEG_STATE_UNKNOWN) {
2254 ap->ability_match_cfg = 0;
2255 ap->ability_match_count = 0;
2256 ap->ability_match = 0;
2262 if (tr32(MAC_STATUS) & MAC_STATUS_RCVD_CFG) {
2263 rx_cfg_reg = tr32(MAC_RX_AUTO_NEG);
2265 if (rx_cfg_reg != ap->ability_match_cfg) {
2266 ap->ability_match_cfg = rx_cfg_reg;
2267 ap->ability_match = 0;
2268 ap->ability_match_count = 0;
2270 if (++ap->ability_match_count > 1) {
2271 ap->ability_match = 1;
2272 ap->ability_match_cfg = rx_cfg_reg;
2275 if (rx_cfg_reg & ANEG_CFG_ACK)
2283 ap->ability_match_cfg = 0;
2284 ap->ability_match_count = 0;
2285 ap->ability_match = 0;
2291 ap->rxconfig = rx_cfg_reg;
2295 case ANEG_STATE_UNKNOWN:
2296 if (ap->flags & (MR_AN_ENABLE | MR_RESTART_AN))
2297 ap->state = ANEG_STATE_AN_ENABLE;
2300 case ANEG_STATE_AN_ENABLE:
2301 ap->flags &= ~(MR_AN_COMPLETE | MR_PAGE_RX);
2302 if (ap->flags & MR_AN_ENABLE) {
2305 ap->ability_match_cfg = 0;
2306 ap->ability_match_count = 0;
2307 ap->ability_match = 0;
2311 ap->state = ANEG_STATE_RESTART_INIT;
2313 ap->state = ANEG_STATE_DISABLE_LINK_OK;
2317 case ANEG_STATE_RESTART_INIT:
2318 ap->link_time = ap->cur_time;
2319 ap->flags &= ~(MR_NP_LOADED);
2321 tw32(MAC_TX_AUTO_NEG, 0);
2322 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
2323 tw32_f(MAC_MODE, tp->mac_mode);
2326 ret = ANEG_TIMER_ENAB;
2327 ap->state = ANEG_STATE_RESTART;
2330 case ANEG_STATE_RESTART:
2331 delta = ap->cur_time - ap->link_time;
2332 if (delta > ANEG_STATE_SETTLE_TIME) {
2333 ap->state = ANEG_STATE_ABILITY_DETECT_INIT;
2335 ret = ANEG_TIMER_ENAB;
2339 case ANEG_STATE_DISABLE_LINK_OK:
2343 case ANEG_STATE_ABILITY_DETECT_INIT:
2344 ap->flags &= ~(MR_TOGGLE_TX);
2345 ap->txconfig = (ANEG_CFG_FD | ANEG_CFG_PS1);
2346 tw32(MAC_TX_AUTO_NEG, ap->txconfig);
2347 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
2348 tw32_f(MAC_MODE, tp->mac_mode);
2351 ap->state = ANEG_STATE_ABILITY_DETECT;
2354 case ANEG_STATE_ABILITY_DETECT:
2355 if (ap->ability_match != 0 && ap->rxconfig != 0) {
2356 ap->state = ANEG_STATE_ACK_DETECT_INIT;
2360 case ANEG_STATE_ACK_DETECT_INIT:
2361 ap->txconfig |= ANEG_CFG_ACK;
2362 tw32(MAC_TX_AUTO_NEG, ap->txconfig);
2363 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
2364 tw32_f(MAC_MODE, tp->mac_mode);
2367 ap->state = ANEG_STATE_ACK_DETECT;
2370 case ANEG_STATE_ACK_DETECT:
2371 if (ap->ack_match != 0) {
2372 if ((ap->rxconfig & ~ANEG_CFG_ACK) ==
2373 (ap->ability_match_cfg & ~ANEG_CFG_ACK)) {
2374 ap->state = ANEG_STATE_COMPLETE_ACK_INIT;
2376 ap->state = ANEG_STATE_AN_ENABLE;
2378 } else if (ap->ability_match != 0 &&
2379 ap->rxconfig == 0) {
2380 ap->state = ANEG_STATE_AN_ENABLE;
2384 case ANEG_STATE_COMPLETE_ACK_INIT:
2385 if (ap->rxconfig & ANEG_CFG_INVAL) {
2389 ap->flags &= ~(MR_LP_ADV_FULL_DUPLEX |
2390 MR_LP_ADV_HALF_DUPLEX |
2391 MR_LP_ADV_SYM_PAUSE |
2392 MR_LP_ADV_ASYM_PAUSE |
2393 MR_LP_ADV_REMOTE_FAULT1 |
2394 MR_LP_ADV_REMOTE_FAULT2 |
2395 MR_LP_ADV_NEXT_PAGE |
2398 if (ap->rxconfig & ANEG_CFG_FD)
2399 ap->flags |= MR_LP_ADV_FULL_DUPLEX;
2400 if (ap->rxconfig & ANEG_CFG_HD)
2401 ap->flags |= MR_LP_ADV_HALF_DUPLEX;
2402 if (ap->rxconfig & ANEG_CFG_PS1)
2403 ap->flags |= MR_LP_ADV_SYM_PAUSE;
2404 if (ap->rxconfig & ANEG_CFG_PS2)
2405 ap->flags |= MR_LP_ADV_ASYM_PAUSE;
2406 if (ap->rxconfig & ANEG_CFG_RF1)
2407 ap->flags |= MR_LP_ADV_REMOTE_FAULT1;
2408 if (ap->rxconfig & ANEG_CFG_RF2)
2409 ap->flags |= MR_LP_ADV_REMOTE_FAULT2;
2410 if (ap->rxconfig & ANEG_CFG_NP)
2411 ap->flags |= MR_LP_ADV_NEXT_PAGE;
2413 ap->link_time = ap->cur_time;
2415 ap->flags ^= (MR_TOGGLE_TX);
2416 if (ap->rxconfig & 0x0008)
2417 ap->flags |= MR_TOGGLE_RX;
2418 if (ap->rxconfig & ANEG_CFG_NP)
2419 ap->flags |= MR_NP_RX;
2420 ap->flags |= MR_PAGE_RX;
2422 ap->state = ANEG_STATE_COMPLETE_ACK;
2423 ret = ANEG_TIMER_ENAB;
2426 case ANEG_STATE_COMPLETE_ACK:
2427 if (ap->ability_match != 0 &&
2428 ap->rxconfig == 0) {
2429 ap->state = ANEG_STATE_AN_ENABLE;
2432 delta = ap->cur_time - ap->link_time;
2433 if (delta > ANEG_STATE_SETTLE_TIME) {
2434 if (!(ap->flags & (MR_LP_ADV_NEXT_PAGE))) {
2435 ap->state = ANEG_STATE_IDLE_DETECT_INIT;
2437 if ((ap->txconfig & ANEG_CFG_NP) == 0 &&
2438 !(ap->flags & MR_NP_RX)) {
2439 ap->state = ANEG_STATE_IDLE_DETECT_INIT;
2447 case ANEG_STATE_IDLE_DETECT_INIT:
2448 ap->link_time = ap->cur_time;
2449 tp->mac_mode &= ~MAC_MODE_SEND_CONFIGS;
2450 tw32_f(MAC_MODE, tp->mac_mode);
2453 ap->state = ANEG_STATE_IDLE_DETECT;
2454 ret = ANEG_TIMER_ENAB;
2457 case ANEG_STATE_IDLE_DETECT:
2458 if (ap->ability_match != 0 &&
2459 ap->rxconfig == 0) {
2460 ap->state = ANEG_STATE_AN_ENABLE;
2463 delta = ap->cur_time - ap->link_time;
2464 if (delta > ANEG_STATE_SETTLE_TIME) {
2465 /* XXX another gem from the Broadcom driver :( */
2466 ap->state = ANEG_STATE_LINK_OK;
2470 case ANEG_STATE_LINK_OK:
2471 ap->flags |= (MR_AN_COMPLETE | MR_LINK_OK);
2475 case ANEG_STATE_NEXT_PAGE_WAIT_INIT:
2476 /* ??? unimplemented */
2479 case ANEG_STATE_NEXT_PAGE_WAIT:
2480 /* ??? unimplemented */
2491 static int fiber_autoneg(struct tg3 *tp, u32 *flags)
2494 struct tg3_fiber_aneginfo aninfo;
2495 int status = ANEG_FAILED;
2499 tw32_f(MAC_TX_AUTO_NEG, 0);
2501 tmp = tp->mac_mode & ~MAC_MODE_PORT_MODE_MASK;
2502 tw32_f(MAC_MODE, tmp | MAC_MODE_PORT_MODE_GMII);
2505 tw32_f(MAC_MODE, tp->mac_mode | MAC_MODE_SEND_CONFIGS);
2508 memset(&aninfo, 0, sizeof(aninfo));
2509 aninfo.flags |= MR_AN_ENABLE;
2510 aninfo.state = ANEG_STATE_UNKNOWN;
2511 aninfo.cur_time = 0;
2513 while (++tick < 195000) {
2514 status = tg3_fiber_aneg_smachine(tp, &aninfo);
2515 if (status == ANEG_DONE || status == ANEG_FAILED)
2521 tp->mac_mode &= ~MAC_MODE_SEND_CONFIGS;
2522 tw32_f(MAC_MODE, tp->mac_mode);
2525 *flags = aninfo.flags;
2527 if (status == ANEG_DONE &&
2528 (aninfo.flags & (MR_AN_COMPLETE | MR_LINK_OK |
2529 MR_LP_ADV_FULL_DUPLEX)))
2535 static void tg3_init_bcm8002(struct tg3 *tp)
2537 u32 mac_status = tr32(MAC_STATUS);
2540 /* Reset when initting first time or we have a link. */
2541 if ((tp->tg3_flags & TG3_FLAG_INIT_COMPLETE) &&
2542 !(mac_status & MAC_STATUS_PCS_SYNCED))
2545 /* Set PLL lock range. */
2546 tg3_writephy(tp, 0x16, 0x8007);
2549 tg3_writephy(tp, MII_BMCR, BMCR_RESET);
2551 /* Wait for reset to complete. */
2552 /* XXX schedule_timeout() ... */
2553 for (i = 0; i < 500; i++)
2556 /* Config mode; select PMA/Ch 1 regs. */
2557 tg3_writephy(tp, 0x10, 0x8411);
2559 /* Enable auto-lock and comdet, select txclk for tx. */
2560 tg3_writephy(tp, 0x11, 0x0a10);
2562 tg3_writephy(tp, 0x18, 0x00a0);
2563 tg3_writephy(tp, 0x16, 0x41ff);
2565 /* Assert and deassert POR. */
2566 tg3_writephy(tp, 0x13, 0x0400);
2568 tg3_writephy(tp, 0x13, 0x0000);
2570 tg3_writephy(tp, 0x11, 0x0a50);
2572 tg3_writephy(tp, 0x11, 0x0a10);
2574 /* Wait for signal to stabilize */
2575 /* XXX schedule_timeout() ... */
2576 for (i = 0; i < 15000; i++)
2579 /* Deselect the channel register so we can read the PHYID
2582 tg3_writephy(tp, 0x10, 0x8011);
2585 static int tg3_setup_fiber_hw_autoneg(struct tg3 *tp, u32 mac_status)
2587 u32 sg_dig_ctrl, sg_dig_status;
2588 u32 serdes_cfg, expected_sg_dig_ctrl;
2589 int workaround, port_a;
2590 int current_link_up;
2593 expected_sg_dig_ctrl = 0;
2596 current_link_up = 0;
2598 if (tp->pci_chip_rev_id != CHIPREV_ID_5704_A0 &&
2599 tp->pci_chip_rev_id != CHIPREV_ID_5704_A1) {
2601 if (tr32(TG3PCI_DUAL_MAC_CTRL) & DUAL_MAC_CTRL_ID)
2604 /* preserve bits 0-11,13,14 for signal pre-emphasis */
2605 /* preserve bits 20-23 for voltage regulator */
2606 serdes_cfg = tr32(MAC_SERDES_CFG) & 0x00f06fff;
2609 sg_dig_ctrl = tr32(SG_DIG_CTRL);
2611 if (tp->link_config.autoneg != AUTONEG_ENABLE) {
2612 if (sg_dig_ctrl & (1 << 31)) {
2614 u32 val = serdes_cfg;
2620 tw32_f(MAC_SERDES_CFG, val);
2622 tw32_f(SG_DIG_CTRL, 0x01388400);
2624 if (mac_status & MAC_STATUS_PCS_SYNCED) {
2625 tg3_setup_flow_control(tp, 0, 0);
2626 current_link_up = 1;
2631 /* Want auto-negotiation. */
2632 expected_sg_dig_ctrl = 0x81388400;
2634 /* Pause capability */
2635 expected_sg_dig_ctrl |= (1 << 11);
2637 /* Asymettric pause */
2638 expected_sg_dig_ctrl |= (1 << 12);
2640 if (sg_dig_ctrl != expected_sg_dig_ctrl) {
2641 if ((tp->tg3_flags2 & TG3_FLG2_PARALLEL_DETECT) &&
2642 tp->serdes_counter &&
2643 ((mac_status & (MAC_STATUS_PCS_SYNCED |
2644 MAC_STATUS_RCVD_CFG)) ==
2645 MAC_STATUS_PCS_SYNCED)) {
2646 tp->serdes_counter--;
2647 current_link_up = 1;
2652 tw32_f(MAC_SERDES_CFG, serdes_cfg | 0xc011000);
2653 tw32_f(SG_DIG_CTRL, expected_sg_dig_ctrl | (1 << 30));
2655 tw32_f(SG_DIG_CTRL, expected_sg_dig_ctrl);
2657 tp->serdes_counter = SERDES_AN_TIMEOUT_5704S;
2658 tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
2659 } else if (mac_status & (MAC_STATUS_PCS_SYNCED |
2660 MAC_STATUS_SIGNAL_DET)) {
2661 sg_dig_status = tr32(SG_DIG_STATUS);
2662 mac_status = tr32(MAC_STATUS);
2664 if ((sg_dig_status & (1 << 1)) &&
2665 (mac_status & MAC_STATUS_PCS_SYNCED)) {
2666 u32 local_adv, remote_adv;
2668 local_adv = ADVERTISE_PAUSE_CAP;
2670 if (sg_dig_status & (1 << 19))
2671 remote_adv |= LPA_PAUSE_CAP;
2672 if (sg_dig_status & (1 << 20))
2673 remote_adv |= LPA_PAUSE_ASYM;
2675 tg3_setup_flow_control(tp, local_adv, remote_adv);
2676 current_link_up = 1;
2677 tp->serdes_counter = 0;
2678 tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
2679 } else if (!(sg_dig_status & (1 << 1))) {
2680 if (tp->serdes_counter)
2681 tp->serdes_counter--;
2684 u32 val = serdes_cfg;
2691 tw32_f(MAC_SERDES_CFG, val);
2694 tw32_f(SG_DIG_CTRL, 0x01388400);
2697 /* Link parallel detection - link is up */
2698 /* only if we have PCS_SYNC and not */
2699 /* receiving config code words */
2700 mac_status = tr32(MAC_STATUS);
2701 if ((mac_status & MAC_STATUS_PCS_SYNCED) &&
2702 !(mac_status & MAC_STATUS_RCVD_CFG)) {
2703 tg3_setup_flow_control(tp, 0, 0);
2704 current_link_up = 1;
2706 TG3_FLG2_PARALLEL_DETECT;
2707 tp->serdes_counter =
2708 SERDES_PARALLEL_DET_TIMEOUT;
2710 goto restart_autoneg;
2714 tp->serdes_counter = SERDES_AN_TIMEOUT_5704S;
2715 tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
2719 return current_link_up;
2722 static int tg3_setup_fiber_by_hand(struct tg3 *tp, u32 mac_status)
2724 int current_link_up = 0;
2726 if (!(mac_status & MAC_STATUS_PCS_SYNCED))
2729 if (tp->link_config.autoneg == AUTONEG_ENABLE) {
2733 if (fiber_autoneg(tp, &flags)) {
2734 u32 local_adv, remote_adv;
2736 local_adv = ADVERTISE_PAUSE_CAP;
2738 if (flags & MR_LP_ADV_SYM_PAUSE)
2739 remote_adv |= LPA_PAUSE_CAP;
2740 if (flags & MR_LP_ADV_ASYM_PAUSE)
2741 remote_adv |= LPA_PAUSE_ASYM;
2743 tg3_setup_flow_control(tp, local_adv, remote_adv);
2745 current_link_up = 1;
2747 for (i = 0; i < 30; i++) {
2750 (MAC_STATUS_SYNC_CHANGED |
2751 MAC_STATUS_CFG_CHANGED));
2753 if ((tr32(MAC_STATUS) &
2754 (MAC_STATUS_SYNC_CHANGED |
2755 MAC_STATUS_CFG_CHANGED)) == 0)
2759 mac_status = tr32(MAC_STATUS);
2760 if (current_link_up == 0 &&
2761 (mac_status & MAC_STATUS_PCS_SYNCED) &&
2762 !(mac_status & MAC_STATUS_RCVD_CFG))
2763 current_link_up = 1;
2765 /* Forcing 1000FD link up. */
2766 current_link_up = 1;
2768 tw32_f(MAC_MODE, (tp->mac_mode | MAC_MODE_SEND_CONFIGS));
2771 tw32_f(MAC_MODE, tp->mac_mode);
2776 return current_link_up;
2779 static int tg3_setup_fiber_phy(struct tg3 *tp, int force_reset)
2782 u16 orig_active_speed;
2783 u8 orig_active_duplex;
2785 int current_link_up;
2789 (tp->tg3_flags & (TG3_FLAG_RX_PAUSE |
2790 TG3_FLAG_TX_PAUSE));
2791 orig_active_speed = tp->link_config.active_speed;
2792 orig_active_duplex = tp->link_config.active_duplex;
2794 if (!(tp->tg3_flags2 & TG3_FLG2_HW_AUTONEG) &&
2795 netif_carrier_ok(tp->dev) &&
2796 (tp->tg3_flags & TG3_FLAG_INIT_COMPLETE)) {
2797 mac_status = tr32(MAC_STATUS);
2798 mac_status &= (MAC_STATUS_PCS_SYNCED |
2799 MAC_STATUS_SIGNAL_DET |
2800 MAC_STATUS_CFG_CHANGED |
2801 MAC_STATUS_RCVD_CFG);
2802 if (mac_status == (MAC_STATUS_PCS_SYNCED |
2803 MAC_STATUS_SIGNAL_DET)) {
2804 tw32_f(MAC_STATUS, (MAC_STATUS_SYNC_CHANGED |
2805 MAC_STATUS_CFG_CHANGED));
2810 tw32_f(MAC_TX_AUTO_NEG, 0);
2812 tp->mac_mode &= ~(MAC_MODE_PORT_MODE_MASK | MAC_MODE_HALF_DUPLEX);
2813 tp->mac_mode |= MAC_MODE_PORT_MODE_TBI;
2814 tw32_f(MAC_MODE, tp->mac_mode);
2817 if (tp->phy_id == PHY_ID_BCM8002)
2818 tg3_init_bcm8002(tp);
2820 /* Enable link change event even when serdes polling. */
2821 tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
2824 current_link_up = 0;
2825 mac_status = tr32(MAC_STATUS);
2827 if (tp->tg3_flags2 & TG3_FLG2_HW_AUTONEG)
2828 current_link_up = tg3_setup_fiber_hw_autoneg(tp, mac_status);
2830 current_link_up = tg3_setup_fiber_by_hand(tp, mac_status);
2832 tp->hw_status->status =
2833 (SD_STATUS_UPDATED |
2834 (tp->hw_status->status & ~SD_STATUS_LINK_CHG));
2836 for (i = 0; i < 100; i++) {
2837 tw32_f(MAC_STATUS, (MAC_STATUS_SYNC_CHANGED |
2838 MAC_STATUS_CFG_CHANGED));
2840 if ((tr32(MAC_STATUS) & (MAC_STATUS_SYNC_CHANGED |
2841 MAC_STATUS_CFG_CHANGED |
2842 MAC_STATUS_LNKSTATE_CHANGED)) == 0)
2846 mac_status = tr32(MAC_STATUS);
2847 if ((mac_status & MAC_STATUS_PCS_SYNCED) == 0) {
2848 current_link_up = 0;
2849 if (tp->link_config.autoneg == AUTONEG_ENABLE &&
2850 tp->serdes_counter == 0) {
2851 tw32_f(MAC_MODE, (tp->mac_mode |
2852 MAC_MODE_SEND_CONFIGS));
2854 tw32_f(MAC_MODE, tp->mac_mode);
2858 if (current_link_up == 1) {
2859 tp->link_config.active_speed = SPEED_1000;
2860 tp->link_config.active_duplex = DUPLEX_FULL;
2861 tw32(MAC_LED_CTRL, (tp->led_ctrl |
2862 LED_CTRL_LNKLED_OVERRIDE |
2863 LED_CTRL_1000MBPS_ON));
2865 tp->link_config.active_speed = SPEED_INVALID;
2866 tp->link_config.active_duplex = DUPLEX_INVALID;
2867 tw32(MAC_LED_CTRL, (tp->led_ctrl |
2868 LED_CTRL_LNKLED_OVERRIDE |
2869 LED_CTRL_TRAFFIC_OVERRIDE));
2872 if (current_link_up != netif_carrier_ok(tp->dev)) {
2873 if (current_link_up)
2874 netif_carrier_on(tp->dev);
2876 netif_carrier_off(tp->dev);
2877 tg3_link_report(tp);
2880 tp->tg3_flags & (TG3_FLAG_RX_PAUSE |
2882 if (orig_pause_cfg != now_pause_cfg ||
2883 orig_active_speed != tp->link_config.active_speed ||
2884 orig_active_duplex != tp->link_config.active_duplex)
2885 tg3_link_report(tp);
2891 static int tg3_setup_fiber_mii_phy(struct tg3 *tp, int force_reset)
2893 int current_link_up, err = 0;
2898 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
2899 tw32_f(MAC_MODE, tp->mac_mode);
2905 (MAC_STATUS_SYNC_CHANGED |
2906 MAC_STATUS_CFG_CHANGED |
2907 MAC_STATUS_MI_COMPLETION |
2908 MAC_STATUS_LNKSTATE_CHANGED));
2914 current_link_up = 0;
2915 current_speed = SPEED_INVALID;
2916 current_duplex = DUPLEX_INVALID;
2918 err |= tg3_readphy(tp, MII_BMSR, &bmsr);
2919 err |= tg3_readphy(tp, MII_BMSR, &bmsr);
2920 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) {
2921 if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
2922 bmsr |= BMSR_LSTATUS;
2924 bmsr &= ~BMSR_LSTATUS;
2927 err |= tg3_readphy(tp, MII_BMCR, &bmcr);
2929 if ((tp->link_config.autoneg == AUTONEG_ENABLE) && !force_reset &&
2930 (tp->tg3_flags2 & TG3_FLG2_PARALLEL_DETECT)) {
2931 /* do nothing, just check for link up at the end */
2932 } else if (tp->link_config.autoneg == AUTONEG_ENABLE) {
2935 err |= tg3_readphy(tp, MII_ADVERTISE, &adv);
2936 new_adv = adv & ~(ADVERTISE_1000XFULL | ADVERTISE_1000XHALF |
2937 ADVERTISE_1000XPAUSE |
2938 ADVERTISE_1000XPSE_ASYM |
2941 /* Always advertise symmetric PAUSE just like copper */
2942 new_adv |= ADVERTISE_1000XPAUSE;
2944 if (tp->link_config.advertising & ADVERTISED_1000baseT_Half)
2945 new_adv |= ADVERTISE_1000XHALF;
2946 if (tp->link_config.advertising & ADVERTISED_1000baseT_Full)
2947 new_adv |= ADVERTISE_1000XFULL;
2949 if ((new_adv != adv) || !(bmcr & BMCR_ANENABLE)) {
2950 tg3_writephy(tp, MII_ADVERTISE, new_adv);
2951 bmcr |= BMCR_ANENABLE | BMCR_ANRESTART;
2952 tg3_writephy(tp, MII_BMCR, bmcr);
2954 tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
2955 tp->serdes_counter = SERDES_AN_TIMEOUT_5714S;
2956 tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
2963 bmcr &= ~BMCR_SPEED1000;
2964 new_bmcr = bmcr & ~(BMCR_ANENABLE | BMCR_FULLDPLX);
2966 if (tp->link_config.duplex == DUPLEX_FULL)
2967 new_bmcr |= BMCR_FULLDPLX;
2969 if (new_bmcr != bmcr) {
2970 /* BMCR_SPEED1000 is a reserved bit that needs
2971 * to be set on write.
2973 new_bmcr |= BMCR_SPEED1000;
2975 /* Force a linkdown */
2976 if (netif_carrier_ok(tp->dev)) {
2979 err |= tg3_readphy(tp, MII_ADVERTISE, &adv);
2980 adv &= ~(ADVERTISE_1000XFULL |
2981 ADVERTISE_1000XHALF |
2983 tg3_writephy(tp, MII_ADVERTISE, adv);
2984 tg3_writephy(tp, MII_BMCR, bmcr |
2988 netif_carrier_off(tp->dev);
2990 tg3_writephy(tp, MII_BMCR, new_bmcr);
2992 err |= tg3_readphy(tp, MII_BMSR, &bmsr);
2993 err |= tg3_readphy(tp, MII_BMSR, &bmsr);
2994 if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
2996 if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
2997 bmsr |= BMSR_LSTATUS;
2999 bmsr &= ~BMSR_LSTATUS;
3001 tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
3005 if (bmsr & BMSR_LSTATUS) {
3006 current_speed = SPEED_1000;
3007 current_link_up = 1;
3008 if (bmcr & BMCR_FULLDPLX)
3009 current_duplex = DUPLEX_FULL;
3011 current_duplex = DUPLEX_HALF;
3013 if (bmcr & BMCR_ANENABLE) {
3014 u32 local_adv, remote_adv, common;
3016 err |= tg3_readphy(tp, MII_ADVERTISE, &local_adv);
3017 err |= tg3_readphy(tp, MII_LPA, &remote_adv);
3018 common = local_adv & remote_adv;
3019 if (common & (ADVERTISE_1000XHALF |
3020 ADVERTISE_1000XFULL)) {
3021 if (common & ADVERTISE_1000XFULL)
3022 current_duplex = DUPLEX_FULL;
3024 current_duplex = DUPLEX_HALF;
3026 tg3_setup_flow_control(tp, local_adv,
3030 current_link_up = 0;
3034 tp->mac_mode &= ~MAC_MODE_HALF_DUPLEX;
3035 if (tp->link_config.active_duplex == DUPLEX_HALF)
3036 tp->mac_mode |= MAC_MODE_HALF_DUPLEX;
3038 tw32_f(MAC_MODE, tp->mac_mode);
3041 tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
3043 tp->link_config.active_speed = current_speed;
3044 tp->link_config.active_duplex = current_duplex;
3046 if (current_link_up != netif_carrier_ok(tp->dev)) {
3047 if (current_link_up)
3048 netif_carrier_on(tp->dev);
3050 netif_carrier_off(tp->dev);
3051 tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
3053 tg3_link_report(tp);
3058 static void tg3_serdes_parallel_detect(struct tg3 *tp)
3060 if (tp->serdes_counter) {
3061 /* Give autoneg time to complete. */
3062 tp->serdes_counter--;
3065 if (!netif_carrier_ok(tp->dev) &&
3066 (tp->link_config.autoneg == AUTONEG_ENABLE)) {
3069 tg3_readphy(tp, MII_BMCR, &bmcr);
3070 if (bmcr & BMCR_ANENABLE) {
3073 /* Select shadow register 0x1f */
3074 tg3_writephy(tp, 0x1c, 0x7c00);
3075 tg3_readphy(tp, 0x1c, &phy1);
3077 /* Select expansion interrupt status register */
3078 tg3_writephy(tp, 0x17, 0x0f01);
3079 tg3_readphy(tp, 0x15, &phy2);
3080 tg3_readphy(tp, 0x15, &phy2);
3082 if ((phy1 & 0x10) && !(phy2 & 0x20)) {
3083 /* We have signal detect and not receiving
3084 * config code words, link is up by parallel
3088 bmcr &= ~BMCR_ANENABLE;
3089 bmcr |= BMCR_SPEED1000 | BMCR_FULLDPLX;
3090 tg3_writephy(tp, MII_BMCR, bmcr);
3091 tp->tg3_flags2 |= TG3_FLG2_PARALLEL_DETECT;
3095 else if (netif_carrier_ok(tp->dev) &&
3096 (tp->link_config.autoneg == AUTONEG_ENABLE) &&
3097 (tp->tg3_flags2 & TG3_FLG2_PARALLEL_DETECT)) {
3100 /* Select expansion interrupt status register */
3101 tg3_writephy(tp, 0x17, 0x0f01);
3102 tg3_readphy(tp, 0x15, &phy2);
3106 /* Config code words received, turn on autoneg. */
3107 tg3_readphy(tp, MII_BMCR, &bmcr);
3108 tg3_writephy(tp, MII_BMCR, bmcr | BMCR_ANENABLE);
3110 tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
3116 static int tg3_setup_phy(struct tg3 *tp, int force_reset)
3120 if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) {
3121 err = tg3_setup_fiber_phy(tp, force_reset);
3122 } else if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES) {
3123 err = tg3_setup_fiber_mii_phy(tp, force_reset);
3125 err = tg3_setup_copper_phy(tp, force_reset);
3128 if (tp->link_config.active_speed == SPEED_1000 &&
3129 tp->link_config.active_duplex == DUPLEX_HALF)
3130 tw32(MAC_TX_LENGTHS,
3131 ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
3132 (6 << TX_LENGTHS_IPG_SHIFT) |
3133 (0xff << TX_LENGTHS_SLOT_TIME_SHIFT)));
3135 tw32(MAC_TX_LENGTHS,
3136 ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
3137 (6 << TX_LENGTHS_IPG_SHIFT) |
3138 (32 << TX_LENGTHS_SLOT_TIME_SHIFT)));
3140 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
3141 if (netif_carrier_ok(tp->dev)) {
3142 tw32(HOSTCC_STAT_COAL_TICKS,
3143 tp->coal.stats_block_coalesce_usecs);
3145 tw32(HOSTCC_STAT_COAL_TICKS, 0);
3149 if (tp->tg3_flags & TG3_FLAG_ASPM_WORKAROUND) {
3150 u32 val = tr32(PCIE_PWR_MGMT_THRESH);
3151 if (!netif_carrier_ok(tp->dev))
3152 val = (val & ~PCIE_PWR_MGMT_L1_THRESH_MSK) |
3155 val |= PCIE_PWR_MGMT_L1_THRESH_MSK;
3156 tw32(PCIE_PWR_MGMT_THRESH, val);
3162 /* This is called whenever we suspect that the system chipset is re-
3163 * ordering the sequence of MMIO to the tx send mailbox. The symptom
3164 * is bogus tx completions. We try to recover by setting the
3165 * TG3_FLAG_MBOX_WRITE_REORDER flag and resetting the chip later
3168 static void tg3_tx_recover(struct tg3 *tp)
3170 BUG_ON((tp->tg3_flags & TG3_FLAG_MBOX_WRITE_REORDER) ||
3171 tp->write32_tx_mbox == tg3_write_indirect_mbox);
3173 printk(KERN_WARNING PFX "%s: The system may be re-ordering memory-"
3174 "mapped I/O cycles to the network device, attempting to "
3175 "recover. Please report the problem to the driver maintainer "
3176 "and include system chipset information.\n", tp->dev->name);
3178 spin_lock(&tp->lock);
3179 tp->tg3_flags |= TG3_FLAG_TX_RECOVERY_PENDING;
3180 spin_unlock(&tp->lock);
3183 static inline u32 tg3_tx_avail(struct tg3 *tp)
3186 return (tp->tx_pending -
3187 ((tp->tx_prod - tp->tx_cons) & (TG3_TX_RING_SIZE - 1)));
3190 /* Tigon3 never reports partial packet sends. So we do not
3191 * need special logic to handle SKBs that have not had all
3192 * of their frags sent yet, like SunGEM does.
3194 static void tg3_tx(struct tg3 *tp)
3196 u32 hw_idx = tp->hw_status->idx[0].tx_consumer;
3197 u32 sw_idx = tp->tx_cons;
3199 while (sw_idx != hw_idx) {
3200 struct tx_ring_info *ri = &tp->tx_buffers[sw_idx];
3201 struct sk_buff *skb = ri->skb;
3204 if (unlikely(skb == NULL)) {
3209 pci_unmap_single(tp->pdev,
3210 pci_unmap_addr(ri, mapping),
3216 sw_idx = NEXT_TX(sw_idx);
3218 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
3219 ri = &tp->tx_buffers[sw_idx];
3220 if (unlikely(ri->skb != NULL || sw_idx == hw_idx))