2 * tg3.c: Broadcom Tigon3 ethernet driver.
4 * Copyright (C) 2001, 2002, 2003, 2004 David S. Miller (davem@redhat.com)
5 * Copyright (C) 2001, 2002, 2003 Jeff Garzik (jgarzik@pobox.com)
6 * Copyright (C) 2004 Sun Microsystems Inc.
7 * Copyright (C) 2005-2010 Broadcom Corporation.
10 * Derived from proprietary unpublished source code,
11 * Copyright (C) 2000-2003 Broadcom Corporation.
13 * Permission is hereby granted for the distribution of this firmware
14 * data in hexadecimal or equivalent format, provided this copyright
15 * notice is accompanying it.
19 #include <linux/module.h>
20 #include <linux/moduleparam.h>
21 #include <linux/stringify.h>
22 #include <linux/kernel.h>
23 #include <linux/types.h>
24 #include <linux/compiler.h>
25 #include <linux/slab.h>
26 #include <linux/delay.h>
28 #include <linux/init.h>
29 #include <linux/ioport.h>
30 #include <linux/pci.h>
31 #include <linux/netdevice.h>
32 #include <linux/etherdevice.h>
33 #include <linux/skbuff.h>
34 #include <linux/ethtool.h>
35 #include <linux/mii.h>
36 #include <linux/phy.h>
37 #include <linux/brcmphy.h>
38 #include <linux/if_vlan.h>
40 #include <linux/tcp.h>
41 #include <linux/workqueue.h>
42 #include <linux/prefetch.h>
43 #include <linux/dma-mapping.h>
44 #include <linux/firmware.h>
46 #include <net/checksum.h>
49 #include <asm/system.h>
51 #include <asm/byteorder.h>
52 #include <asm/uaccess.h>
55 #include <asm/idprom.h>
62 #if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE)
63 #define TG3_VLAN_TAG_USED 1
65 #define TG3_VLAN_TAG_USED 0
70 #define DRV_MODULE_NAME "tg3"
72 #define TG3_MIN_NUM 112
73 #define DRV_MODULE_VERSION \
74 __stringify(TG3_MAJ_NUM) "." __stringify(TG3_MIN_NUM)
75 #define DRV_MODULE_RELDATE "July 11, 2010"
77 #define TG3_DEF_MAC_MODE 0
78 #define TG3_DEF_RX_MODE 0
79 #define TG3_DEF_TX_MODE 0
80 #define TG3_DEF_MSG_ENABLE \
90 /* length of time before we decide the hardware is borked,
91 * and dev->tx_timeout() should be called to fix the problem
93 #define TG3_TX_TIMEOUT (5 * HZ)
95 /* hardware minimum and maximum for a single frame's data payload */
96 #define TG3_MIN_MTU 60
97 #define TG3_MAX_MTU(tp) \
98 ((tp->tg3_flags & TG3_FLAG_JUMBO_CAPABLE) ? 9000 : 1500)
100 /* These numbers seem to be hard coded in the NIC firmware somehow.
101 * You can't change the ring sizes, but you can change where you place
102 * them in the NIC onboard memory.
104 #define TG3_RX_RING_SIZE 512
105 #define TG3_DEF_RX_RING_PENDING 200
106 #define TG3_RX_JUMBO_RING_SIZE 256
107 #define TG3_DEF_RX_JUMBO_RING_PENDING 100
108 #define TG3_RSS_INDIR_TBL_SIZE 128
110 /* Do not place this n-ring entries value into the tp struct itself,
111 * we really want to expose these constants to GCC so that modulo et
112 * al. operations are done with shifts and masks instead of with
113 * hw multiply/modulo instructions. Another solution would be to
114 * replace things like '% foo' with '& (foo - 1)'.
116 #define TG3_RX_RCB_RING_SIZE(tp) \
117 (((tp->tg3_flags & TG3_FLAG_JUMBO_CAPABLE) && \
118 !(tp->tg3_flags2 & TG3_FLG2_5780_CLASS)) ? 1024 : 512)
120 #define TG3_TX_RING_SIZE 512
121 #define TG3_DEF_TX_RING_PENDING (TG3_TX_RING_SIZE - 1)
123 #define TG3_RX_RING_BYTES (sizeof(struct tg3_rx_buffer_desc) * \
125 #define TG3_RX_JUMBO_RING_BYTES (sizeof(struct tg3_ext_rx_buffer_desc) * \
126 TG3_RX_JUMBO_RING_SIZE)
127 #define TG3_RX_RCB_RING_BYTES(tp) (sizeof(struct tg3_rx_buffer_desc) * \
128 TG3_RX_RCB_RING_SIZE(tp))
129 #define TG3_TX_RING_BYTES (sizeof(struct tg3_tx_buffer_desc) * \
131 #define NEXT_TX(N) (((N) + 1) & (TG3_TX_RING_SIZE - 1))
133 #define TG3_RX_DMA_ALIGN 16
134 #define TG3_RX_HEADROOM ALIGN(VLAN_HLEN, TG3_RX_DMA_ALIGN)
136 #define TG3_DMA_BYTE_ENAB 64
138 #define TG3_RX_STD_DMA_SZ 1536
139 #define TG3_RX_JMB_DMA_SZ 9046
141 #define TG3_RX_DMA_TO_MAP_SZ(x) ((x) + TG3_DMA_BYTE_ENAB)
143 #define TG3_RX_STD_MAP_SZ TG3_RX_DMA_TO_MAP_SZ(TG3_RX_STD_DMA_SZ)
144 #define TG3_RX_JMB_MAP_SZ TG3_RX_DMA_TO_MAP_SZ(TG3_RX_JMB_DMA_SZ)
146 #define TG3_RX_STD_BUFF_RING_SIZE \
147 (sizeof(struct ring_info) * TG3_RX_RING_SIZE)
149 #define TG3_RX_JMB_BUFF_RING_SIZE \
150 (sizeof(struct ring_info) * TG3_RX_JUMBO_RING_SIZE)
152 /* Due to a hardware bug, the 5701 can only DMA to memory addresses
153 * that are at least dword aligned when used in PCIX mode. The driver
154 * works around this bug by double copying the packet. This workaround
155 * is built into the normal double copy length check for efficiency.
157 * However, the double copy is only necessary on those architectures
158 * where unaligned memory accesses are inefficient. For those architectures
159 * where unaligned memory accesses incur little penalty, we can reintegrate
160 * the 5701 in the normal rx path. Doing so saves a device structure
161 * dereference by hardcoding the double copy threshold in place.
163 #define TG3_RX_COPY_THRESHOLD 256
164 #if NET_IP_ALIGN == 0 || defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS)
165 #define TG3_RX_COPY_THRESH(tp) TG3_RX_COPY_THRESHOLD
167 #define TG3_RX_COPY_THRESH(tp) ((tp)->rx_copy_thresh)
170 /* minimum number of free TX descriptors required to wake up TX process */
171 #define TG3_TX_WAKEUP_THRESH(tnapi) ((tnapi)->tx_pending / 4)
173 #define TG3_RAW_IP_ALIGN 2
175 /* number of ETHTOOL_GSTATS u64's */
176 #define TG3_NUM_STATS (sizeof(struct tg3_ethtool_stats)/sizeof(u64))
178 #define TG3_NUM_TEST 6
180 #define TG3_FW_UPDATE_TIMEOUT_SEC 5
182 #define FIRMWARE_TG3 "tigon/tg3.bin"
183 #define FIRMWARE_TG3TSO "tigon/tg3_tso.bin"
184 #define FIRMWARE_TG3TSO5 "tigon/tg3_tso5.bin"
186 static char version[] __devinitdata =
187 DRV_MODULE_NAME ".c:v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")";
189 MODULE_AUTHOR("David S. Miller (davem@redhat.com) and Jeff Garzik (jgarzik@pobox.com)");
190 MODULE_DESCRIPTION("Broadcom Tigon3 ethernet driver");
191 MODULE_LICENSE("GPL");
192 MODULE_VERSION(DRV_MODULE_VERSION);
193 MODULE_FIRMWARE(FIRMWARE_TG3);
194 MODULE_FIRMWARE(FIRMWARE_TG3TSO);
195 MODULE_FIRMWARE(FIRMWARE_TG3TSO5);
197 static int tg3_debug = -1; /* -1 == use TG3_DEF_MSG_ENABLE as value */
198 module_param(tg3_debug, int, 0);
199 MODULE_PARM_DESC(tg3_debug, "Tigon3 bitmapped debugging message enable value");
201 static DEFINE_PCI_DEVICE_TABLE(tg3_pci_tbl) = {
202 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5700)},
203 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5701)},
204 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702)},
205 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703)},
206 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704)},
207 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702FE)},
208 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705)},
209 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705_2)},
210 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705M)},
211 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705M_2)},
212 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702X)},
213 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703X)},
214 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704S)},
215 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702A3)},
216 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703A3)},
217 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5782)},
218 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5788)},
219 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5789)},
220 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5901)},
221 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5901_2)},
222 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704S_2)},
223 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705F)},
224 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5720)},
225 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5721)},
226 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5722)},
227 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5750)},
228 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751)},
229 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5750M)},
230 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751M)},
231 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751F)},
232 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5752)},
233 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5752M)},
234 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753)},
235 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753M)},
236 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753F)},
237 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5754)},
238 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5754M)},
239 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5755)},
240 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5755M)},
241 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5756)},
242 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5786)},
243 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787)},
244 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787M)},
245 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787F)},
246 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5714)},
247 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5714S)},
248 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5715)},
249 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5715S)},
250 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5780)},
251 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5780S)},
252 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5781)},
253 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5906)},
254 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5906M)},
255 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5784)},
256 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5764)},
257 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5723)},
258 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5761)},
259 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5761E)},
260 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5761S)},
261 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5761SE)},
262 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5785_G)},
263 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5785_F)},
264 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57780)},
265 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57760)},
266 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57790)},
267 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57788)},
268 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5717)},
269 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5718)},
270 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5724)},
271 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57781)},
272 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57785)},
273 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57761)},
274 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57765)},
275 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57791)},
276 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57795)},
277 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5719)},
278 {PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9DXX)},
279 {PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9MXX)},
280 {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1000)},
281 {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1001)},
282 {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1003)},
283 {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC9100)},
284 {PCI_DEVICE(PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_TIGON3)},
288 MODULE_DEVICE_TABLE(pci, tg3_pci_tbl);
290 static const struct {
291 const char string[ETH_GSTRING_LEN];
292 } ethtool_stats_keys[TG3_NUM_STATS] = {
295 { "rx_ucast_packets" },
296 { "rx_mcast_packets" },
297 { "rx_bcast_packets" },
299 { "rx_align_errors" },
300 { "rx_xon_pause_rcvd" },
301 { "rx_xoff_pause_rcvd" },
302 { "rx_mac_ctrl_rcvd" },
303 { "rx_xoff_entered" },
304 { "rx_frame_too_long_errors" },
306 { "rx_undersize_packets" },
307 { "rx_in_length_errors" },
308 { "rx_out_length_errors" },
309 { "rx_64_or_less_octet_packets" },
310 { "rx_65_to_127_octet_packets" },
311 { "rx_128_to_255_octet_packets" },
312 { "rx_256_to_511_octet_packets" },
313 { "rx_512_to_1023_octet_packets" },
314 { "rx_1024_to_1522_octet_packets" },
315 { "rx_1523_to_2047_octet_packets" },
316 { "rx_2048_to_4095_octet_packets" },
317 { "rx_4096_to_8191_octet_packets" },
318 { "rx_8192_to_9022_octet_packets" },
325 { "tx_flow_control" },
327 { "tx_single_collisions" },
328 { "tx_mult_collisions" },
330 { "tx_excessive_collisions" },
331 { "tx_late_collisions" },
332 { "tx_collide_2times" },
333 { "tx_collide_3times" },
334 { "tx_collide_4times" },
335 { "tx_collide_5times" },
336 { "tx_collide_6times" },
337 { "tx_collide_7times" },
338 { "tx_collide_8times" },
339 { "tx_collide_9times" },
340 { "tx_collide_10times" },
341 { "tx_collide_11times" },
342 { "tx_collide_12times" },
343 { "tx_collide_13times" },
344 { "tx_collide_14times" },
345 { "tx_collide_15times" },
346 { "tx_ucast_packets" },
347 { "tx_mcast_packets" },
348 { "tx_bcast_packets" },
349 { "tx_carrier_sense_errors" },
353 { "dma_writeq_full" },
354 { "dma_write_prioq_full" },
358 { "rx_threshold_hit" },
360 { "dma_readq_full" },
361 { "dma_read_prioq_full" },
362 { "tx_comp_queue_full" },
364 { "ring_set_send_prod_index" },
365 { "ring_status_update" },
367 { "nic_avoided_irqs" },
368 { "nic_tx_threshold_hit" }
371 static const struct {
372 const char string[ETH_GSTRING_LEN];
373 } ethtool_test_keys[TG3_NUM_TEST] = {
374 { "nvram test (online) " },
375 { "link test (online) " },
376 { "register test (offline)" },
377 { "memory test (offline)" },
378 { "loopback test (offline)" },
379 { "interrupt test (offline)" },
382 static void tg3_write32(struct tg3 *tp, u32 off, u32 val)
384 writel(val, tp->regs + off);
387 static u32 tg3_read32(struct tg3 *tp, u32 off)
389 return readl(tp->regs + off);
392 static void tg3_ape_write32(struct tg3 *tp, u32 off, u32 val)
394 writel(val, tp->aperegs + off);
397 static u32 tg3_ape_read32(struct tg3 *tp, u32 off)
399 return readl(tp->aperegs + off);
402 static void tg3_write_indirect_reg32(struct tg3 *tp, u32 off, u32 val)
406 spin_lock_irqsave(&tp->indirect_lock, flags);
407 pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off);
408 pci_write_config_dword(tp->pdev, TG3PCI_REG_DATA, val);
409 spin_unlock_irqrestore(&tp->indirect_lock, flags);
412 static void tg3_write_flush_reg32(struct tg3 *tp, u32 off, u32 val)
414 writel(val, tp->regs + off);
415 readl(tp->regs + off);
418 static u32 tg3_read_indirect_reg32(struct tg3 *tp, u32 off)
423 spin_lock_irqsave(&tp->indirect_lock, flags);
424 pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off);
425 pci_read_config_dword(tp->pdev, TG3PCI_REG_DATA, &val);
426 spin_unlock_irqrestore(&tp->indirect_lock, flags);
430 static void tg3_write_indirect_mbox(struct tg3 *tp, u32 off, u32 val)
434 if (off == (MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW)) {
435 pci_write_config_dword(tp->pdev, TG3PCI_RCV_RET_RING_CON_IDX +
436 TG3_64BIT_REG_LOW, val);
439 if (off == TG3_RX_STD_PROD_IDX_REG) {
440 pci_write_config_dword(tp->pdev, TG3PCI_STD_RING_PROD_IDX +
441 TG3_64BIT_REG_LOW, val);
445 spin_lock_irqsave(&tp->indirect_lock, flags);
446 pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off + 0x5600);
447 pci_write_config_dword(tp->pdev, TG3PCI_REG_DATA, val);
448 spin_unlock_irqrestore(&tp->indirect_lock, flags);
450 /* In indirect mode when disabling interrupts, we also need
451 * to clear the interrupt bit in the GRC local ctrl register.
453 if ((off == (MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW)) &&
455 pci_write_config_dword(tp->pdev, TG3PCI_MISC_LOCAL_CTRL,
456 tp->grc_local_ctrl|GRC_LCLCTRL_CLEARINT);
460 static u32 tg3_read_indirect_mbox(struct tg3 *tp, u32 off)
465 spin_lock_irqsave(&tp->indirect_lock, flags);
466 pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off + 0x5600);
467 pci_read_config_dword(tp->pdev, TG3PCI_REG_DATA, &val);
468 spin_unlock_irqrestore(&tp->indirect_lock, flags);
472 /* usec_wait specifies the wait time in usec when writing to certain registers
473 * where it is unsafe to read back the register without some delay.
474 * GRC_LOCAL_CTRL is one example if the GPIOs are toggled to switch power.
475 * TG3PCI_CLOCK_CTRL is another example if the clock frequencies are changed.
477 static void _tw32_flush(struct tg3 *tp, u32 off, u32 val, u32 usec_wait)
479 if ((tp->tg3_flags & TG3_FLAG_PCIX_TARGET_HWBUG) ||
480 (tp->tg3_flags2 & TG3_FLG2_ICH_WORKAROUND))
481 /* Non-posted methods */
482 tp->write32(tp, off, val);
485 tg3_write32(tp, off, val);
490 /* Wait again after the read for the posted method to guarantee that
491 * the wait time is met.
497 static inline void tw32_mailbox_flush(struct tg3 *tp, u32 off, u32 val)
499 tp->write32_mbox(tp, off, val);
500 if (!(tp->tg3_flags & TG3_FLAG_MBOX_WRITE_REORDER) &&
501 !(tp->tg3_flags2 & TG3_FLG2_ICH_WORKAROUND))
502 tp->read32_mbox(tp, off);
505 static void tg3_write32_tx_mbox(struct tg3 *tp, u32 off, u32 val)
507 void __iomem *mbox = tp->regs + off;
509 if (tp->tg3_flags & TG3_FLAG_TXD_MBOX_HWBUG)
511 if (tp->tg3_flags & TG3_FLAG_MBOX_WRITE_REORDER)
515 static u32 tg3_read32_mbox_5906(struct tg3 *tp, u32 off)
517 return readl(tp->regs + off + GRCMBOX_BASE);
520 static void tg3_write32_mbox_5906(struct tg3 *tp, u32 off, u32 val)
522 writel(val, tp->regs + off + GRCMBOX_BASE);
525 #define tw32_mailbox(reg, val) tp->write32_mbox(tp, reg, val)
526 #define tw32_mailbox_f(reg, val) tw32_mailbox_flush(tp, (reg), (val))
527 #define tw32_rx_mbox(reg, val) tp->write32_rx_mbox(tp, reg, val)
528 #define tw32_tx_mbox(reg, val) tp->write32_tx_mbox(tp, reg, val)
529 #define tr32_mailbox(reg) tp->read32_mbox(tp, reg)
531 #define tw32(reg, val) tp->write32(tp, reg, val)
532 #define tw32_f(reg, val) _tw32_flush(tp, (reg), (val), 0)
533 #define tw32_wait_f(reg, val, us) _tw32_flush(tp, (reg), (val), (us))
534 #define tr32(reg) tp->read32(tp, reg)
536 static void tg3_write_mem(struct tg3 *tp, u32 off, u32 val)
540 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) &&
541 (off >= NIC_SRAM_STATS_BLK) && (off < NIC_SRAM_TX_BUFFER_DESC))
544 spin_lock_irqsave(&tp->indirect_lock, flags);
545 if (tp->tg3_flags & TG3_FLAG_SRAM_USE_CONFIG) {
546 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off);
547 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
549 /* Always leave this as zero. */
550 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
552 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, off);
553 tw32_f(TG3PCI_MEM_WIN_DATA, val);
555 /* Always leave this as zero. */
556 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, 0);
558 spin_unlock_irqrestore(&tp->indirect_lock, flags);
561 static void tg3_read_mem(struct tg3 *tp, u32 off, u32 *val)
565 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) &&
566 (off >= NIC_SRAM_STATS_BLK) && (off < NIC_SRAM_TX_BUFFER_DESC)) {
571 spin_lock_irqsave(&tp->indirect_lock, flags);
572 if (tp->tg3_flags & TG3_FLAG_SRAM_USE_CONFIG) {
573 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off);
574 pci_read_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
576 /* Always leave this as zero. */
577 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
579 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, off);
580 *val = tr32(TG3PCI_MEM_WIN_DATA);
582 /* Always leave this as zero. */
583 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, 0);
585 spin_unlock_irqrestore(&tp->indirect_lock, flags);
588 static void tg3_ape_lock_init(struct tg3 *tp)
593 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
594 regbase = TG3_APE_LOCK_GRANT;
596 regbase = TG3_APE_PER_LOCK_GRANT;
598 /* Make sure the driver hasn't any stale locks. */
599 for (i = 0; i < 8; i++)
600 tg3_ape_write32(tp, regbase + 4 * i, APE_LOCK_GRANT_DRIVER);
603 static int tg3_ape_lock(struct tg3 *tp, int locknum)
607 u32 status, req, gnt;
609 if (!(tp->tg3_flags3 & TG3_FLG3_ENABLE_APE))
613 case TG3_APE_LOCK_GRC:
614 case TG3_APE_LOCK_MEM:
620 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761) {
621 req = TG3_APE_LOCK_REQ;
622 gnt = TG3_APE_LOCK_GRANT;
624 req = TG3_APE_PER_LOCK_REQ;
625 gnt = TG3_APE_PER_LOCK_GRANT;
630 tg3_ape_write32(tp, req + off, APE_LOCK_REQ_DRIVER);
632 /* Wait for up to 1 millisecond to acquire lock. */
633 for (i = 0; i < 100; i++) {
634 status = tg3_ape_read32(tp, gnt + off);
635 if (status == APE_LOCK_GRANT_DRIVER)
640 if (status != APE_LOCK_GRANT_DRIVER) {
641 /* Revoke the lock request. */
642 tg3_ape_write32(tp, gnt + off,
643 APE_LOCK_GRANT_DRIVER);
651 static void tg3_ape_unlock(struct tg3 *tp, int locknum)
655 if (!(tp->tg3_flags3 & TG3_FLG3_ENABLE_APE))
659 case TG3_APE_LOCK_GRC:
660 case TG3_APE_LOCK_MEM:
666 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
667 gnt = TG3_APE_LOCK_GRANT;
669 gnt = TG3_APE_PER_LOCK_GRANT;
671 tg3_ape_write32(tp, gnt + 4 * locknum, APE_LOCK_GRANT_DRIVER);
674 static void tg3_disable_ints(struct tg3 *tp)
678 tw32(TG3PCI_MISC_HOST_CTRL,
679 (tp->misc_host_ctrl | MISC_HOST_CTRL_MASK_PCI_INT));
680 for (i = 0; i < tp->irq_max; i++)
681 tw32_mailbox_f(tp->napi[i].int_mbox, 0x00000001);
684 static void tg3_enable_ints(struct tg3 *tp)
691 tw32(TG3PCI_MISC_HOST_CTRL,
692 (tp->misc_host_ctrl & ~MISC_HOST_CTRL_MASK_PCI_INT));
694 tp->coal_now = tp->coalesce_mode | HOSTCC_MODE_ENABLE;
695 for (i = 0; i < tp->irq_cnt; i++) {
696 struct tg3_napi *tnapi = &tp->napi[i];
698 tw32_mailbox_f(tnapi->int_mbox, tnapi->last_tag << 24);
699 if (tp->tg3_flags2 & TG3_FLG2_1SHOT_MSI)
700 tw32_mailbox_f(tnapi->int_mbox, tnapi->last_tag << 24);
702 tp->coal_now |= tnapi->coal_now;
705 /* Force an initial interrupt */
706 if (!(tp->tg3_flags & TG3_FLAG_TAGGED_STATUS) &&
707 (tp->napi[0].hw_status->status & SD_STATUS_UPDATED))
708 tw32(GRC_LOCAL_CTRL, tp->grc_local_ctrl | GRC_LCLCTRL_SETINT);
710 tw32(HOSTCC_MODE, tp->coal_now);
712 tp->coal_now &= ~(tp->napi[0].coal_now | tp->napi[1].coal_now);
715 static inline unsigned int tg3_has_work(struct tg3_napi *tnapi)
717 struct tg3 *tp = tnapi->tp;
718 struct tg3_hw_status *sblk = tnapi->hw_status;
719 unsigned int work_exists = 0;
721 /* check for phy events */
722 if (!(tp->tg3_flags &
723 (TG3_FLAG_USE_LINKCHG_REG |
724 TG3_FLAG_POLL_SERDES))) {
725 if (sblk->status & SD_STATUS_LINK_CHG)
728 /* check for RX/TX work to do */
729 if (sblk->idx[0].tx_consumer != tnapi->tx_cons ||
730 *(tnapi->rx_rcb_prod_idx) != tnapi->rx_rcb_ptr)
737 * similar to tg3_enable_ints, but it accurately determines whether there
738 * is new work pending and can return without flushing the PIO write
739 * which reenables interrupts
741 static void tg3_int_reenable(struct tg3_napi *tnapi)
743 struct tg3 *tp = tnapi->tp;
745 tw32_mailbox(tnapi->int_mbox, tnapi->last_tag << 24);
748 /* When doing tagged status, this work check is unnecessary.
749 * The last_tag we write above tells the chip which piece of
750 * work we've completed.
752 if (!(tp->tg3_flags & TG3_FLAG_TAGGED_STATUS) &&
754 tw32(HOSTCC_MODE, tp->coalesce_mode |
755 HOSTCC_MODE_ENABLE | tnapi->coal_now);
758 static void tg3_napi_disable(struct tg3 *tp)
762 for (i = tp->irq_cnt - 1; i >= 0; i--)
763 napi_disable(&tp->napi[i].napi);
766 static void tg3_napi_enable(struct tg3 *tp)
770 for (i = 0; i < tp->irq_cnt; i++)
771 napi_enable(&tp->napi[i].napi);
774 static inline void tg3_netif_stop(struct tg3 *tp)
776 tp->dev->trans_start = jiffies; /* prevent tx timeout */
777 tg3_napi_disable(tp);
778 netif_tx_disable(tp->dev);
781 static inline void tg3_netif_start(struct tg3 *tp)
783 /* NOTE: unconditional netif_tx_wake_all_queues is only
784 * appropriate so long as all callers are assured to
785 * have free tx slots (such as after tg3_init_hw)
787 netif_tx_wake_all_queues(tp->dev);
790 tp->napi[0].hw_status->status |= SD_STATUS_UPDATED;
794 static void tg3_switch_clocks(struct tg3 *tp)
799 if ((tp->tg3_flags & TG3_FLAG_CPMU_PRESENT) ||
800 (tp->tg3_flags2 & TG3_FLG2_5780_CLASS))
803 clock_ctrl = tr32(TG3PCI_CLOCK_CTRL);
805 orig_clock_ctrl = clock_ctrl;
806 clock_ctrl &= (CLOCK_CTRL_FORCE_CLKRUN |
807 CLOCK_CTRL_CLKRUN_OENABLE |
809 tp->pci_clock_ctrl = clock_ctrl;
811 if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
812 if (orig_clock_ctrl & CLOCK_CTRL_625_CORE) {
813 tw32_wait_f(TG3PCI_CLOCK_CTRL,
814 clock_ctrl | CLOCK_CTRL_625_CORE, 40);
816 } else if ((orig_clock_ctrl & CLOCK_CTRL_44MHZ_CORE) != 0) {
817 tw32_wait_f(TG3PCI_CLOCK_CTRL,
819 (CLOCK_CTRL_44MHZ_CORE | CLOCK_CTRL_ALTCLK),
821 tw32_wait_f(TG3PCI_CLOCK_CTRL,
822 clock_ctrl | (CLOCK_CTRL_ALTCLK),
825 tw32_wait_f(TG3PCI_CLOCK_CTRL, clock_ctrl, 40);
828 #define PHY_BUSY_LOOPS 5000
830 static int tg3_readphy(struct tg3 *tp, int reg, u32 *val)
836 if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
838 (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
844 frame_val = ((tp->phy_addr << MI_COM_PHY_ADDR_SHIFT) &
845 MI_COM_PHY_ADDR_MASK);
846 frame_val |= ((reg << MI_COM_REG_ADDR_SHIFT) &
847 MI_COM_REG_ADDR_MASK);
848 frame_val |= (MI_COM_CMD_READ | MI_COM_START);
850 tw32_f(MAC_MI_COM, frame_val);
852 loops = PHY_BUSY_LOOPS;
855 frame_val = tr32(MAC_MI_COM);
857 if ((frame_val & MI_COM_BUSY) == 0) {
859 frame_val = tr32(MAC_MI_COM);
867 *val = frame_val & MI_COM_DATA_MASK;
871 if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
872 tw32_f(MAC_MI_MODE, tp->mi_mode);
879 static int tg3_writephy(struct tg3 *tp, int reg, u32 val)
885 if ((tp->tg3_flags3 & TG3_FLG3_PHY_IS_FET) &&
886 (reg == MII_TG3_CTRL || reg == MII_TG3_AUX_CTRL))
889 if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
891 (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
895 frame_val = ((tp->phy_addr << MI_COM_PHY_ADDR_SHIFT) &
896 MI_COM_PHY_ADDR_MASK);
897 frame_val |= ((reg << MI_COM_REG_ADDR_SHIFT) &
898 MI_COM_REG_ADDR_MASK);
899 frame_val |= (val & MI_COM_DATA_MASK);
900 frame_val |= (MI_COM_CMD_WRITE | MI_COM_START);
902 tw32_f(MAC_MI_COM, frame_val);
904 loops = PHY_BUSY_LOOPS;
907 frame_val = tr32(MAC_MI_COM);
908 if ((frame_val & MI_COM_BUSY) == 0) {
910 frame_val = tr32(MAC_MI_COM);
920 if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
921 tw32_f(MAC_MI_MODE, tp->mi_mode);
928 static int tg3_bmcr_reset(struct tg3 *tp)
933 /* OK, reset it, and poll the BMCR_RESET bit until it
934 * clears or we time out.
936 phy_control = BMCR_RESET;
937 err = tg3_writephy(tp, MII_BMCR, phy_control);
943 err = tg3_readphy(tp, MII_BMCR, &phy_control);
947 if ((phy_control & BMCR_RESET) == 0) {
959 static int tg3_mdio_read(struct mii_bus *bp, int mii_id, int reg)
961 struct tg3 *tp = bp->priv;
964 spin_lock_bh(&tp->lock);
966 if (tg3_readphy(tp, reg, &val))
969 spin_unlock_bh(&tp->lock);
974 static int tg3_mdio_write(struct mii_bus *bp, int mii_id, int reg, u16 val)
976 struct tg3 *tp = bp->priv;
979 spin_lock_bh(&tp->lock);
981 if (tg3_writephy(tp, reg, val))
984 spin_unlock_bh(&tp->lock);
989 static int tg3_mdio_reset(struct mii_bus *bp)
994 static void tg3_mdio_config_5785(struct tg3 *tp)
997 struct phy_device *phydev;
999 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
1000 switch (phydev->drv->phy_id & phydev->drv->phy_id_mask) {
1001 case PHY_ID_BCM50610:
1002 case PHY_ID_BCM50610M:
1003 val = MAC_PHYCFG2_50610_LED_MODES;
1005 case PHY_ID_BCMAC131:
1006 val = MAC_PHYCFG2_AC131_LED_MODES;
1008 case PHY_ID_RTL8211C:
1009 val = MAC_PHYCFG2_RTL8211C_LED_MODES;
1011 case PHY_ID_RTL8201E:
1012 val = MAC_PHYCFG2_RTL8201E_LED_MODES;
1018 if (phydev->interface != PHY_INTERFACE_MODE_RGMII) {
1019 tw32(MAC_PHYCFG2, val);
1021 val = tr32(MAC_PHYCFG1);
1022 val &= ~(MAC_PHYCFG1_RGMII_INT |
1023 MAC_PHYCFG1_RXCLK_TO_MASK | MAC_PHYCFG1_TXCLK_TO_MASK);
1024 val |= MAC_PHYCFG1_RXCLK_TIMEOUT | MAC_PHYCFG1_TXCLK_TIMEOUT;
1025 tw32(MAC_PHYCFG1, val);
1030 if (!(tp->tg3_flags3 & TG3_FLG3_RGMII_INBAND_DISABLE))
1031 val |= MAC_PHYCFG2_EMODE_MASK_MASK |
1032 MAC_PHYCFG2_FMODE_MASK_MASK |
1033 MAC_PHYCFG2_GMODE_MASK_MASK |
1034 MAC_PHYCFG2_ACT_MASK_MASK |
1035 MAC_PHYCFG2_QUAL_MASK_MASK |
1036 MAC_PHYCFG2_INBAND_ENABLE;
1038 tw32(MAC_PHYCFG2, val);
1040 val = tr32(MAC_PHYCFG1);
1041 val &= ~(MAC_PHYCFG1_RXCLK_TO_MASK | MAC_PHYCFG1_TXCLK_TO_MASK |
1042 MAC_PHYCFG1_RGMII_EXT_RX_DEC | MAC_PHYCFG1_RGMII_SND_STAT_EN);
1043 if (!(tp->tg3_flags3 & TG3_FLG3_RGMII_INBAND_DISABLE)) {
1044 if (tp->tg3_flags3 & TG3_FLG3_RGMII_EXT_IBND_RX_EN)
1045 val |= MAC_PHYCFG1_RGMII_EXT_RX_DEC;
1046 if (tp->tg3_flags3 & TG3_FLG3_RGMII_EXT_IBND_TX_EN)
1047 val |= MAC_PHYCFG1_RGMII_SND_STAT_EN;
1049 val |= MAC_PHYCFG1_RXCLK_TIMEOUT | MAC_PHYCFG1_TXCLK_TIMEOUT |
1050 MAC_PHYCFG1_RGMII_INT | MAC_PHYCFG1_TXC_DRV;
1051 tw32(MAC_PHYCFG1, val);
1053 val = tr32(MAC_EXT_RGMII_MODE);
1054 val &= ~(MAC_RGMII_MODE_RX_INT_B |
1055 MAC_RGMII_MODE_RX_QUALITY |
1056 MAC_RGMII_MODE_RX_ACTIVITY |
1057 MAC_RGMII_MODE_RX_ENG_DET |
1058 MAC_RGMII_MODE_TX_ENABLE |
1059 MAC_RGMII_MODE_TX_LOWPWR |
1060 MAC_RGMII_MODE_TX_RESET);
1061 if (!(tp->tg3_flags3 & TG3_FLG3_RGMII_INBAND_DISABLE)) {
1062 if (tp->tg3_flags3 & TG3_FLG3_RGMII_EXT_IBND_RX_EN)
1063 val |= MAC_RGMII_MODE_RX_INT_B |
1064 MAC_RGMII_MODE_RX_QUALITY |
1065 MAC_RGMII_MODE_RX_ACTIVITY |
1066 MAC_RGMII_MODE_RX_ENG_DET;
1067 if (tp->tg3_flags3 & TG3_FLG3_RGMII_EXT_IBND_TX_EN)
1068 val |= MAC_RGMII_MODE_TX_ENABLE |
1069 MAC_RGMII_MODE_TX_LOWPWR |
1070 MAC_RGMII_MODE_TX_RESET;
1072 tw32(MAC_EXT_RGMII_MODE, val);
1075 static void tg3_mdio_start(struct tg3 *tp)
1077 tp->mi_mode &= ~MAC_MI_MODE_AUTO_POLL;
1078 tw32_f(MAC_MI_MODE, tp->mi_mode);
1081 if ((tp->tg3_flags3 & TG3_FLG3_MDIOBUS_INITED) &&
1082 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
1083 tg3_mdio_config_5785(tp);
1086 static int tg3_mdio_init(struct tg3 *tp)
1090 struct phy_device *phydev;
1092 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
1093 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719) {
1096 tp->phy_addr = PCI_FUNC(tp->pdev->devfn) + 1;
1098 if (tp->pci_chip_rev_id != CHIPREV_ID_5717_A0)
1099 is_serdes = tr32(SG_DIG_STATUS) & SG_DIG_IS_SERDES;
1101 is_serdes = tr32(TG3_CPMU_PHY_STRAP) &
1102 TG3_CPMU_PHY_STRAP_IS_SERDES;
1106 tp->phy_addr = TG3_PHY_MII_ADDR;
1110 if (!(tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB) ||
1111 (tp->tg3_flags3 & TG3_FLG3_MDIOBUS_INITED))
1114 tp->mdio_bus = mdiobus_alloc();
1115 if (tp->mdio_bus == NULL)
1118 tp->mdio_bus->name = "tg3 mdio bus";
1119 snprintf(tp->mdio_bus->id, MII_BUS_ID_SIZE, "%x",
1120 (tp->pdev->bus->number << 8) | tp->pdev->devfn);
1121 tp->mdio_bus->priv = tp;
1122 tp->mdio_bus->parent = &tp->pdev->dev;
1123 tp->mdio_bus->read = &tg3_mdio_read;
1124 tp->mdio_bus->write = &tg3_mdio_write;
1125 tp->mdio_bus->reset = &tg3_mdio_reset;
1126 tp->mdio_bus->phy_mask = ~(1 << TG3_PHY_MII_ADDR);
1127 tp->mdio_bus->irq = &tp->mdio_irq[0];
1129 for (i = 0; i < PHY_MAX_ADDR; i++)
1130 tp->mdio_bus->irq[i] = PHY_POLL;
1132 /* The bus registration will look for all the PHYs on the mdio bus.
1133 * Unfortunately, it does not ensure the PHY is powered up before
1134 * accessing the PHY ID registers. A chip reset is the
1135 * quickest way to bring the device back to an operational state..
1137 if (tg3_readphy(tp, MII_BMCR, ®) || (reg & BMCR_PDOWN))
1140 i = mdiobus_register(tp->mdio_bus);
1142 dev_warn(&tp->pdev->dev, "mdiobus_reg failed (0x%x)\n", i);
1143 mdiobus_free(tp->mdio_bus);
1147 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
1149 if (!phydev || !phydev->drv) {
1150 dev_warn(&tp->pdev->dev, "No PHY devices\n");
1151 mdiobus_unregister(tp->mdio_bus);
1152 mdiobus_free(tp->mdio_bus);
1156 switch (phydev->drv->phy_id & phydev->drv->phy_id_mask) {
1157 case PHY_ID_BCM57780:
1158 phydev->interface = PHY_INTERFACE_MODE_GMII;
1159 phydev->dev_flags |= PHY_BRCM_AUTO_PWRDWN_ENABLE;
1161 case PHY_ID_BCM50610:
1162 case PHY_ID_BCM50610M:
1163 phydev->dev_flags |= PHY_BRCM_CLEAR_RGMII_MODE |
1164 PHY_BRCM_RX_REFCLK_UNUSED |
1165 PHY_BRCM_DIS_TXCRXC_NOENRGY |
1166 PHY_BRCM_AUTO_PWRDWN_ENABLE;
1167 if (tp->tg3_flags3 & TG3_FLG3_RGMII_INBAND_DISABLE)
1168 phydev->dev_flags |= PHY_BRCM_STD_IBND_DISABLE;
1169 if (tp->tg3_flags3 & TG3_FLG3_RGMII_EXT_IBND_RX_EN)
1170 phydev->dev_flags |= PHY_BRCM_EXT_IBND_RX_ENABLE;
1171 if (tp->tg3_flags3 & TG3_FLG3_RGMII_EXT_IBND_TX_EN)
1172 phydev->dev_flags |= PHY_BRCM_EXT_IBND_TX_ENABLE;
1174 case PHY_ID_RTL8211C:
1175 phydev->interface = PHY_INTERFACE_MODE_RGMII;
1177 case PHY_ID_RTL8201E:
1178 case PHY_ID_BCMAC131:
1179 phydev->interface = PHY_INTERFACE_MODE_MII;
1180 phydev->dev_flags |= PHY_BRCM_AUTO_PWRDWN_ENABLE;
1181 tp->tg3_flags3 |= TG3_FLG3_PHY_IS_FET;
1185 tp->tg3_flags3 |= TG3_FLG3_MDIOBUS_INITED;
1187 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
1188 tg3_mdio_config_5785(tp);
1193 static void tg3_mdio_fini(struct tg3 *tp)
1195 if (tp->tg3_flags3 & TG3_FLG3_MDIOBUS_INITED) {
1196 tp->tg3_flags3 &= ~TG3_FLG3_MDIOBUS_INITED;
1197 mdiobus_unregister(tp->mdio_bus);
1198 mdiobus_free(tp->mdio_bus);
1202 /* tp->lock is held. */
1203 static inline void tg3_generate_fw_event(struct tg3 *tp)
1207 val = tr32(GRC_RX_CPU_EVENT);
1208 val |= GRC_RX_CPU_DRIVER_EVENT;
1209 tw32_f(GRC_RX_CPU_EVENT, val);
1211 tp->last_event_jiffies = jiffies;
1214 #define TG3_FW_EVENT_TIMEOUT_USEC 2500
1216 /* tp->lock is held. */
1217 static void tg3_wait_for_event_ack(struct tg3 *tp)
1220 unsigned int delay_cnt;
1223 /* If enough time has passed, no wait is necessary. */
1224 time_remain = (long)(tp->last_event_jiffies + 1 +
1225 usecs_to_jiffies(TG3_FW_EVENT_TIMEOUT_USEC)) -
1227 if (time_remain < 0)
1230 /* Check if we can shorten the wait time. */
1231 delay_cnt = jiffies_to_usecs(time_remain);
1232 if (delay_cnt > TG3_FW_EVENT_TIMEOUT_USEC)
1233 delay_cnt = TG3_FW_EVENT_TIMEOUT_USEC;
1234 delay_cnt = (delay_cnt >> 3) + 1;
1236 for (i = 0; i < delay_cnt; i++) {
1237 if (!(tr32(GRC_RX_CPU_EVENT) & GRC_RX_CPU_DRIVER_EVENT))
1243 /* tp->lock is held. */
1244 static void tg3_ump_link_report(struct tg3 *tp)
1249 if (!(tp->tg3_flags2 & TG3_FLG2_5780_CLASS) ||
1250 !(tp->tg3_flags & TG3_FLAG_ENABLE_ASF))
1253 tg3_wait_for_event_ack(tp);
1255 tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX, FWCMD_NICDRV_LINK_UPDATE);
1257 tg3_write_mem(tp, NIC_SRAM_FW_CMD_LEN_MBOX, 14);
1260 if (!tg3_readphy(tp, MII_BMCR, ®))
1262 if (!tg3_readphy(tp, MII_BMSR, ®))
1263 val |= (reg & 0xffff);
1264 tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX, val);
1267 if (!tg3_readphy(tp, MII_ADVERTISE, ®))
1269 if (!tg3_readphy(tp, MII_LPA, ®))
1270 val |= (reg & 0xffff);
1271 tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 4, val);
1274 if (!(tp->tg3_flags2 & TG3_FLG2_MII_SERDES)) {
1275 if (!tg3_readphy(tp, MII_CTRL1000, ®))
1277 if (!tg3_readphy(tp, MII_STAT1000, ®))
1278 val |= (reg & 0xffff);
1280 tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 8, val);
1282 if (!tg3_readphy(tp, MII_PHYADDR, ®))
1286 tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 12, val);
1288 tg3_generate_fw_event(tp);
1291 static void tg3_link_report(struct tg3 *tp)
1293 if (!netif_carrier_ok(tp->dev)) {
1294 netif_info(tp, link, tp->dev, "Link is down\n");
1295 tg3_ump_link_report(tp);
1296 } else if (netif_msg_link(tp)) {
1297 netdev_info(tp->dev, "Link is up at %d Mbps, %s duplex\n",
1298 (tp->link_config.active_speed == SPEED_1000 ?
1300 (tp->link_config.active_speed == SPEED_100 ?
1302 (tp->link_config.active_duplex == DUPLEX_FULL ?
1305 netdev_info(tp->dev, "Flow control is %s for TX and %s for RX\n",
1306 (tp->link_config.active_flowctrl & FLOW_CTRL_TX) ?
1308 (tp->link_config.active_flowctrl & FLOW_CTRL_RX) ?
1310 tg3_ump_link_report(tp);
1314 static u16 tg3_advert_flowctrl_1000T(u8 flow_ctrl)
1318 if ((flow_ctrl & FLOW_CTRL_TX) && (flow_ctrl & FLOW_CTRL_RX))
1319 miireg = ADVERTISE_PAUSE_CAP;
1320 else if (flow_ctrl & FLOW_CTRL_TX)
1321 miireg = ADVERTISE_PAUSE_ASYM;
1322 else if (flow_ctrl & FLOW_CTRL_RX)
1323 miireg = ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM;
1330 static u16 tg3_advert_flowctrl_1000X(u8 flow_ctrl)
1334 if ((flow_ctrl & FLOW_CTRL_TX) && (flow_ctrl & FLOW_CTRL_RX))
1335 miireg = ADVERTISE_1000XPAUSE;
1336 else if (flow_ctrl & FLOW_CTRL_TX)
1337 miireg = ADVERTISE_1000XPSE_ASYM;
1338 else if (flow_ctrl & FLOW_CTRL_RX)
1339 miireg = ADVERTISE_1000XPAUSE | ADVERTISE_1000XPSE_ASYM;
1346 static u8 tg3_resolve_flowctrl_1000X(u16 lcladv, u16 rmtadv)
1350 if (lcladv & ADVERTISE_1000XPAUSE) {
1351 if (lcladv & ADVERTISE_1000XPSE_ASYM) {
1352 if (rmtadv & LPA_1000XPAUSE)
1353 cap = FLOW_CTRL_TX | FLOW_CTRL_RX;
1354 else if (rmtadv & LPA_1000XPAUSE_ASYM)
1357 if (rmtadv & LPA_1000XPAUSE)
1358 cap = FLOW_CTRL_TX | FLOW_CTRL_RX;
1360 } else if (lcladv & ADVERTISE_1000XPSE_ASYM) {
1361 if ((rmtadv & LPA_1000XPAUSE) && (rmtadv & LPA_1000XPAUSE_ASYM))
1368 static void tg3_setup_flow_control(struct tg3 *tp, u32 lcladv, u32 rmtadv)
1372 u32 old_rx_mode = tp->rx_mode;
1373 u32 old_tx_mode = tp->tx_mode;
1375 if (tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB)
1376 autoneg = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]->autoneg;
1378 autoneg = tp->link_config.autoneg;
1380 if (autoneg == AUTONEG_ENABLE &&
1381 (tp->tg3_flags & TG3_FLAG_PAUSE_AUTONEG)) {
1382 if (tp->tg3_flags2 & TG3_FLG2_ANY_SERDES)
1383 flowctrl = tg3_resolve_flowctrl_1000X(lcladv, rmtadv);
1385 flowctrl = mii_resolve_flowctrl_fdx(lcladv, rmtadv);
1387 flowctrl = tp->link_config.flowctrl;
1389 tp->link_config.active_flowctrl = flowctrl;
1391 if (flowctrl & FLOW_CTRL_RX)
1392 tp->rx_mode |= RX_MODE_FLOW_CTRL_ENABLE;
1394 tp->rx_mode &= ~RX_MODE_FLOW_CTRL_ENABLE;
1396 if (old_rx_mode != tp->rx_mode)
1397 tw32_f(MAC_RX_MODE, tp->rx_mode);
1399 if (flowctrl & FLOW_CTRL_TX)
1400 tp->tx_mode |= TX_MODE_FLOW_CTRL_ENABLE;
1402 tp->tx_mode &= ~TX_MODE_FLOW_CTRL_ENABLE;
1404 if (old_tx_mode != tp->tx_mode)
1405 tw32_f(MAC_TX_MODE, tp->tx_mode);
1408 static void tg3_adjust_link(struct net_device *dev)
1410 u8 oldflowctrl, linkmesg = 0;
1411 u32 mac_mode, lcl_adv, rmt_adv;
1412 struct tg3 *tp = netdev_priv(dev);
1413 struct phy_device *phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
1415 spin_lock_bh(&tp->lock);
1417 mac_mode = tp->mac_mode & ~(MAC_MODE_PORT_MODE_MASK |
1418 MAC_MODE_HALF_DUPLEX);
1420 oldflowctrl = tp->link_config.active_flowctrl;
1426 if (phydev->speed == SPEED_100 || phydev->speed == SPEED_10)
1427 mac_mode |= MAC_MODE_PORT_MODE_MII;
1428 else if (phydev->speed == SPEED_1000 ||
1429 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785)
1430 mac_mode |= MAC_MODE_PORT_MODE_GMII;
1432 mac_mode |= MAC_MODE_PORT_MODE_MII;
1434 if (phydev->duplex == DUPLEX_HALF)
1435 mac_mode |= MAC_MODE_HALF_DUPLEX;
1437 lcl_adv = tg3_advert_flowctrl_1000T(
1438 tp->link_config.flowctrl);
1441 rmt_adv = LPA_PAUSE_CAP;
1442 if (phydev->asym_pause)
1443 rmt_adv |= LPA_PAUSE_ASYM;
1446 tg3_setup_flow_control(tp, lcl_adv, rmt_adv);
1448 mac_mode |= MAC_MODE_PORT_MODE_GMII;
1450 if (mac_mode != tp->mac_mode) {
1451 tp->mac_mode = mac_mode;
1452 tw32_f(MAC_MODE, tp->mac_mode);
1456 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785) {
1457 if (phydev->speed == SPEED_10)
1459 MAC_MI_STAT_10MBPS_MODE |
1460 MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
1462 tw32(MAC_MI_STAT, MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
1465 if (phydev->speed == SPEED_1000 && phydev->duplex == DUPLEX_HALF)
1466 tw32(MAC_TX_LENGTHS,
1467 ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
1468 (6 << TX_LENGTHS_IPG_SHIFT) |
1469 (0xff << TX_LENGTHS_SLOT_TIME_SHIFT)));
1471 tw32(MAC_TX_LENGTHS,
1472 ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
1473 (6 << TX_LENGTHS_IPG_SHIFT) |
1474 (32 << TX_LENGTHS_SLOT_TIME_SHIFT)));
1476 if ((phydev->link && tp->link_config.active_speed == SPEED_INVALID) ||
1477 (!phydev->link && tp->link_config.active_speed != SPEED_INVALID) ||
1478 phydev->speed != tp->link_config.active_speed ||
1479 phydev->duplex != tp->link_config.active_duplex ||
1480 oldflowctrl != tp->link_config.active_flowctrl)
1483 tp->link_config.active_speed = phydev->speed;
1484 tp->link_config.active_duplex = phydev->duplex;
1486 spin_unlock_bh(&tp->lock);
1489 tg3_link_report(tp);
1492 static int tg3_phy_init(struct tg3 *tp)
1494 struct phy_device *phydev;
1496 if (tp->tg3_flags3 & TG3_FLG3_PHY_CONNECTED)
1499 /* Bring the PHY back to a known state. */
1502 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
1504 /* Attach the MAC to the PHY. */
1505 phydev = phy_connect(tp->dev, dev_name(&phydev->dev), tg3_adjust_link,
1506 phydev->dev_flags, phydev->interface);
1507 if (IS_ERR(phydev)) {
1508 dev_err(&tp->pdev->dev, "Could not attach to PHY\n");
1509 return PTR_ERR(phydev);
1512 /* Mask with MAC supported features. */
1513 switch (phydev->interface) {
1514 case PHY_INTERFACE_MODE_GMII:
1515 case PHY_INTERFACE_MODE_RGMII:
1516 if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY)) {
1517 phydev->supported &= (PHY_GBIT_FEATURES |
1519 SUPPORTED_Asym_Pause);
1523 case PHY_INTERFACE_MODE_MII:
1524 phydev->supported &= (PHY_BASIC_FEATURES |
1526 SUPPORTED_Asym_Pause);
1529 phy_disconnect(tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]);
1533 tp->tg3_flags3 |= TG3_FLG3_PHY_CONNECTED;
1535 phydev->advertising = phydev->supported;
1540 static void tg3_phy_start(struct tg3 *tp)
1542 struct phy_device *phydev;
1544 if (!(tp->tg3_flags3 & TG3_FLG3_PHY_CONNECTED))
1547 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
1549 if (tp->link_config.phy_is_low_power) {
1550 tp->link_config.phy_is_low_power = 0;
1551 phydev->speed = tp->link_config.orig_speed;
1552 phydev->duplex = tp->link_config.orig_duplex;
1553 phydev->autoneg = tp->link_config.orig_autoneg;
1554 phydev->advertising = tp->link_config.orig_advertising;
1559 phy_start_aneg(phydev);
1562 static void tg3_phy_stop(struct tg3 *tp)
1564 if (!(tp->tg3_flags3 & TG3_FLG3_PHY_CONNECTED))
1567 phy_stop(tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]);
1570 static void tg3_phy_fini(struct tg3 *tp)
1572 if (tp->tg3_flags3 & TG3_FLG3_PHY_CONNECTED) {
1573 phy_disconnect(tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]);
1574 tp->tg3_flags3 &= ~TG3_FLG3_PHY_CONNECTED;
1578 static void tg3_phydsp_write(struct tg3 *tp, u32 reg, u32 val)
1580 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, reg);
1581 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, val);
1584 static void tg3_phy_fet_toggle_apd(struct tg3 *tp, bool enable)
1588 if (!tg3_readphy(tp, MII_TG3_FET_TEST, &phytest)) {
1591 tg3_writephy(tp, MII_TG3_FET_TEST,
1592 phytest | MII_TG3_FET_SHADOW_EN);
1593 if (!tg3_readphy(tp, MII_TG3_FET_SHDW_AUXSTAT2, &phy)) {
1595 phy |= MII_TG3_FET_SHDW_AUXSTAT2_APD;
1597 phy &= ~MII_TG3_FET_SHDW_AUXSTAT2_APD;
1598 tg3_writephy(tp, MII_TG3_FET_SHDW_AUXSTAT2, phy);
1600 tg3_writephy(tp, MII_TG3_FET_TEST, phytest);
1604 static void tg3_phy_toggle_apd(struct tg3 *tp, bool enable)
1608 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS) ||
1609 ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
1610 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719) &&
1611 (tp->tg3_flags2 & TG3_FLG2_MII_SERDES)))
1614 if (tp->tg3_flags3 & TG3_FLG3_PHY_IS_FET) {
1615 tg3_phy_fet_toggle_apd(tp, enable);
1619 reg = MII_TG3_MISC_SHDW_WREN |
1620 MII_TG3_MISC_SHDW_SCR5_SEL |
1621 MII_TG3_MISC_SHDW_SCR5_LPED |
1622 MII_TG3_MISC_SHDW_SCR5_DLPTLM |
1623 MII_TG3_MISC_SHDW_SCR5_SDTL |
1624 MII_TG3_MISC_SHDW_SCR5_C125OE;
1625 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5784 || !enable)
1626 reg |= MII_TG3_MISC_SHDW_SCR5_DLLAPD;
1628 tg3_writephy(tp, MII_TG3_MISC_SHDW, reg);
1631 reg = MII_TG3_MISC_SHDW_WREN |
1632 MII_TG3_MISC_SHDW_APD_SEL |
1633 MII_TG3_MISC_SHDW_APD_WKTM_84MS;
1635 reg |= MII_TG3_MISC_SHDW_APD_ENABLE;
1637 tg3_writephy(tp, MII_TG3_MISC_SHDW, reg);
1640 static void tg3_phy_toggle_automdix(struct tg3 *tp, int enable)
1644 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS) ||
1645 (tp->tg3_flags2 & TG3_FLG2_ANY_SERDES))
1648 if (tp->tg3_flags3 & TG3_FLG3_PHY_IS_FET) {
1651 if (!tg3_readphy(tp, MII_TG3_FET_TEST, &ephy)) {
1652 u32 reg = MII_TG3_FET_SHDW_MISCCTRL;
1654 tg3_writephy(tp, MII_TG3_FET_TEST,
1655 ephy | MII_TG3_FET_SHADOW_EN);
1656 if (!tg3_readphy(tp, reg, &phy)) {
1658 phy |= MII_TG3_FET_SHDW_MISCCTRL_MDIX;
1660 phy &= ~MII_TG3_FET_SHDW_MISCCTRL_MDIX;
1661 tg3_writephy(tp, reg, phy);
1663 tg3_writephy(tp, MII_TG3_FET_TEST, ephy);
1666 phy = MII_TG3_AUXCTL_MISC_RDSEL_MISC |
1667 MII_TG3_AUXCTL_SHDWSEL_MISC;
1668 if (!tg3_writephy(tp, MII_TG3_AUX_CTRL, phy) &&
1669 !tg3_readphy(tp, MII_TG3_AUX_CTRL, &phy)) {
1671 phy |= MII_TG3_AUXCTL_MISC_FORCE_AMDIX;
1673 phy &= ~MII_TG3_AUXCTL_MISC_FORCE_AMDIX;
1674 phy |= MII_TG3_AUXCTL_MISC_WREN;
1675 tg3_writephy(tp, MII_TG3_AUX_CTRL, phy);
1680 static void tg3_phy_set_wirespeed(struct tg3 *tp)
1684 if (tp->tg3_flags2 & TG3_FLG2_NO_ETH_WIRE_SPEED)
1687 if (!tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x7007) &&
1688 !tg3_readphy(tp, MII_TG3_AUX_CTRL, &val))
1689 tg3_writephy(tp, MII_TG3_AUX_CTRL,
1690 (val | (1 << 15) | (1 << 4)));
1693 static void tg3_phy_apply_otp(struct tg3 *tp)
1702 /* Enable SM_DSP clock and tx 6dB coding. */
1703 phy = MII_TG3_AUXCTL_SHDWSEL_AUXCTL |
1704 MII_TG3_AUXCTL_ACTL_SMDSP_ENA |
1705 MII_TG3_AUXCTL_ACTL_TX_6DB;
1706 tg3_writephy(tp, MII_TG3_AUX_CTRL, phy);
1708 phy = ((otp & TG3_OTP_AGCTGT_MASK) >> TG3_OTP_AGCTGT_SHIFT);
1709 phy |= MII_TG3_DSP_TAP1_AGCTGT_DFLT;
1710 tg3_phydsp_write(tp, MII_TG3_DSP_TAP1, phy);
1712 phy = ((otp & TG3_OTP_HPFFLTR_MASK) >> TG3_OTP_HPFFLTR_SHIFT) |
1713 ((otp & TG3_OTP_HPFOVER_MASK) >> TG3_OTP_HPFOVER_SHIFT);
1714 tg3_phydsp_write(tp, MII_TG3_DSP_AADJ1CH0, phy);
1716 phy = ((otp & TG3_OTP_LPFDIS_MASK) >> TG3_OTP_LPFDIS_SHIFT);
1717 phy |= MII_TG3_DSP_AADJ1CH3_ADCCKADJ;
1718 tg3_phydsp_write(tp, MII_TG3_DSP_AADJ1CH3, phy);
1720 phy = ((otp & TG3_OTP_VDAC_MASK) >> TG3_OTP_VDAC_SHIFT);
1721 tg3_phydsp_write(tp, MII_TG3_DSP_EXP75, phy);
1723 phy = ((otp & TG3_OTP_10BTAMP_MASK) >> TG3_OTP_10BTAMP_SHIFT);
1724 tg3_phydsp_write(tp, MII_TG3_DSP_EXP96, phy);
1726 phy = ((otp & TG3_OTP_ROFF_MASK) >> TG3_OTP_ROFF_SHIFT) |
1727 ((otp & TG3_OTP_RCOFF_MASK) >> TG3_OTP_RCOFF_SHIFT);
1728 tg3_phydsp_write(tp, MII_TG3_DSP_EXP97, phy);
1730 /* Turn off SM_DSP clock. */
1731 phy = MII_TG3_AUXCTL_SHDWSEL_AUXCTL |
1732 MII_TG3_AUXCTL_ACTL_TX_6DB;
1733 tg3_writephy(tp, MII_TG3_AUX_CTRL, phy);
1736 static int tg3_wait_macro_done(struct tg3 *tp)
1743 if (!tg3_readphy(tp, 0x16, &tmp32)) {
1744 if ((tmp32 & 0x1000) == 0)
1754 static int tg3_phy_write_and_check_testpat(struct tg3 *tp, int *resetp)
1756 static const u32 test_pat[4][6] = {
1757 { 0x00005555, 0x00000005, 0x00002aaa, 0x0000000a, 0x00003456, 0x00000003 },
1758 { 0x00002aaa, 0x0000000a, 0x00003333, 0x00000003, 0x0000789a, 0x00000005 },
1759 { 0x00005a5a, 0x00000005, 0x00002a6a, 0x0000000a, 0x00001bcd, 0x00000003 },
1760 { 0x00002a5a, 0x0000000a, 0x000033c3, 0x00000003, 0x00002ef1, 0x00000005 }
1764 for (chan = 0; chan < 4; chan++) {
1767 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
1768 (chan * 0x2000) | 0x0200);
1769 tg3_writephy(tp, 0x16, 0x0002);
1771 for (i = 0; i < 6; i++)
1772 tg3_writephy(tp, MII_TG3_DSP_RW_PORT,
1775 tg3_writephy(tp, 0x16, 0x0202);
1776 if (tg3_wait_macro_done(tp)) {
1781 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
1782 (chan * 0x2000) | 0x0200);
1783 tg3_writephy(tp, 0x16, 0x0082);
1784 if (tg3_wait_macro_done(tp)) {
1789 tg3_writephy(tp, 0x16, 0x0802);
1790 if (tg3_wait_macro_done(tp)) {
1795 for (i = 0; i < 6; i += 2) {
1798 if (tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &low) ||
1799 tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &high) ||
1800 tg3_wait_macro_done(tp)) {
1806 if (low != test_pat[chan][i] ||
1807 high != test_pat[chan][i+1]) {
1808 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000b);
1809 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x4001);
1810 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x4005);
1820 static int tg3_phy_reset_chanpat(struct tg3 *tp)
1824 for (chan = 0; chan < 4; chan++) {
1827 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
1828 (chan * 0x2000) | 0x0200);
1829 tg3_writephy(tp, 0x16, 0x0002);
1830 for (i = 0; i < 6; i++)
1831 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x000);
1832 tg3_writephy(tp, 0x16, 0x0202);
1833 if (tg3_wait_macro_done(tp))
1840 static int tg3_phy_reset_5703_4_5(struct tg3 *tp)
1842 u32 reg32, phy9_orig;
1843 int retries, do_phy_reset, err;
1849 err = tg3_bmcr_reset(tp);
1855 /* Disable transmitter and interrupt. */
1856 if (tg3_readphy(tp, MII_TG3_EXT_CTRL, ®32))
1860 tg3_writephy(tp, MII_TG3_EXT_CTRL, reg32);
1862 /* Set full-duplex, 1000 mbps. */
1863 tg3_writephy(tp, MII_BMCR,
1864 BMCR_FULLDPLX | TG3_BMCR_SPEED1000);
1866 /* Set to master mode. */
1867 if (tg3_readphy(tp, MII_TG3_CTRL, &phy9_orig))
1870 tg3_writephy(tp, MII_TG3_CTRL,
1871 (MII_TG3_CTRL_AS_MASTER |
1872 MII_TG3_CTRL_ENABLE_AS_MASTER));
1874 /* Enable SM_DSP_CLOCK and 6dB. */
1875 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0c00);
1877 /* Block the PHY control access. */
1878 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8005);
1879 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0800);
1881 err = tg3_phy_write_and_check_testpat(tp, &do_phy_reset);
1884 } while (--retries);
1886 err = tg3_phy_reset_chanpat(tp);
1890 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8005);
1891 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0000);
1893 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8200);
1894 tg3_writephy(tp, 0x16, 0x0000);
1896 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
1897 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
1898 /* Set Extended packet length bit for jumbo frames */
1899 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x4400);
1901 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0400);
1904 tg3_writephy(tp, MII_TG3_CTRL, phy9_orig);
1906 if (!tg3_readphy(tp, MII_TG3_EXT_CTRL, ®32)) {
1908 tg3_writephy(tp, MII_TG3_EXT_CTRL, reg32);
1915 /* This will reset the tigon3 PHY if there is no valid
1916 * link unless the FORCE argument is non-zero.
1918 static int tg3_phy_reset(struct tg3 *tp)
1924 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
1927 val = tr32(GRC_MISC_CFG);
1928 tw32_f(GRC_MISC_CFG, val & ~GRC_MISC_CFG_EPHY_IDDQ);
1931 err = tg3_readphy(tp, MII_BMSR, &phy_status);
1932 err |= tg3_readphy(tp, MII_BMSR, &phy_status);
1936 if (netif_running(tp->dev) && netif_carrier_ok(tp->dev)) {
1937 netif_carrier_off(tp->dev);
1938 tg3_link_report(tp);
1941 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
1942 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
1943 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
1944 err = tg3_phy_reset_5703_4_5(tp);
1951 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 &&
1952 GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5784_AX) {
1953 cpmuctrl = tr32(TG3_CPMU_CTRL);
1954 if (cpmuctrl & CPMU_CTRL_GPHY_10MB_RXONLY)
1956 cpmuctrl & ~CPMU_CTRL_GPHY_10MB_RXONLY);
1959 err = tg3_bmcr_reset(tp);
1963 if (cpmuctrl & CPMU_CTRL_GPHY_10MB_RXONLY) {
1966 phy = MII_TG3_DSP_EXP8_AEDW | MII_TG3_DSP_EXP8_REJ2MHz;
1967 tg3_phydsp_write(tp, MII_TG3_DSP_EXP8, phy);
1969 tw32(TG3_CPMU_CTRL, cpmuctrl);
1972 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5784_AX ||
1973 GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5761_AX) {
1976 val = tr32(TG3_CPMU_LSPD_1000MB_CLK);
1977 if ((val & CPMU_LSPD_1000MB_MACCLK_MASK) ==
1978 CPMU_LSPD_1000MB_MACCLK_12_5) {
1979 val &= ~CPMU_LSPD_1000MB_MACCLK_MASK;
1981 tw32_f(TG3_CPMU_LSPD_1000MB_CLK, val);
1985 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
1986 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719) &&
1987 (tp->tg3_flags2 & TG3_FLG2_MII_SERDES))
1990 tg3_phy_apply_otp(tp);
1992 if (tp->tg3_flags3 & TG3_FLG3_PHY_ENABLE_APD)
1993 tg3_phy_toggle_apd(tp, true);
1995 tg3_phy_toggle_apd(tp, false);
1998 if (tp->tg3_flags2 & TG3_FLG2_PHY_ADC_BUG) {
1999 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0c00);
2000 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x201f);
2001 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x2aaa);
2002 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000a);
2003 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0323);
2004 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0400);
2006 if (tp->tg3_flags2 & TG3_FLG2_PHY_5704_A0_BUG) {
2007 tg3_writephy(tp, 0x1c, 0x8d68);
2008 tg3_writephy(tp, 0x1c, 0x8d68);
2010 if (tp->tg3_flags2 & TG3_FLG2_PHY_BER_BUG) {
2011 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0c00);
2012 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000a);
2013 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x310b);
2014 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x201f);
2015 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x9506);
2016 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x401f);
2017 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x14e2);
2018 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0400);
2019 } else if (tp->tg3_flags2 & TG3_FLG2_PHY_JITTER_BUG) {
2020 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0c00);
2021 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000a);
2022 if (tp->tg3_flags2 & TG3_FLG2_PHY_ADJUST_TRIM) {
2023 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x110b);
2024 tg3_writephy(tp, MII_TG3_TEST1,
2025 MII_TG3_TEST1_TRIM_EN | 0x4);
2027 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x010b);
2028 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0400);
2030 /* Set Extended packet length bit (bit 14) on all chips that */
2031 /* support jumbo frames */
2032 if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) {
2033 /* Cannot do read-modify-write on 5401 */
2034 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x4c20);
2035 } else if (tp->tg3_flags & TG3_FLAG_JUMBO_CAPABLE) {
2038 /* Set bit 14 with read-modify-write to preserve other bits */
2039 if (!tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0007) &&
2040 !tg3_readphy(tp, MII_TG3_AUX_CTRL, &phy_reg))
2041 tg3_writephy(tp, MII_TG3_AUX_CTRL, phy_reg | 0x4000);
2044 /* Set phy register 0x10 bit 0 to high fifo elasticity to support
2045 * jumbo frames transmission.
2047 if (tp->tg3_flags & TG3_FLAG_JUMBO_CAPABLE) {
2050 if (!tg3_readphy(tp, MII_TG3_EXT_CTRL, &phy_reg))
2051 tg3_writephy(tp, MII_TG3_EXT_CTRL,
2052 phy_reg | MII_TG3_EXT_CTRL_FIFO_ELASTIC);
2055 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
2056 /* adjust output voltage */
2057 tg3_writephy(tp, MII_TG3_FET_PTEST, 0x12);
2060 tg3_phy_toggle_automdix(tp, 1);
2061 tg3_phy_set_wirespeed(tp);
2065 static void tg3_frob_aux_power(struct tg3 *tp)
2067 struct tg3 *tp_peer = tp;
2069 /* The GPIOs do something completely different on 57765. */
2070 if ((tp->tg3_flags2 & TG3_FLG2_IS_NIC) == 0 ||
2071 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
2072 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765)
2075 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
2076 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714 ||
2077 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717) {
2078 struct net_device *dev_peer;
2080 dev_peer = pci_get_drvdata(tp->pdev_peer);
2081 /* remove_one() may have been run on the peer. */
2085 tp_peer = netdev_priv(dev_peer);
2088 if ((tp->tg3_flags & TG3_FLAG_WOL_ENABLE) != 0 ||
2089 (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) != 0 ||
2090 (tp_peer->tg3_flags & TG3_FLAG_WOL_ENABLE) != 0 ||
2091 (tp_peer->tg3_flags & TG3_FLAG_ENABLE_ASF) != 0) {
2092 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
2093 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
2094 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
2095 (GRC_LCLCTRL_GPIO_OE0 |
2096 GRC_LCLCTRL_GPIO_OE1 |
2097 GRC_LCLCTRL_GPIO_OE2 |
2098 GRC_LCLCTRL_GPIO_OUTPUT0 |
2099 GRC_LCLCTRL_GPIO_OUTPUT1),
2101 } else if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761 ||
2102 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761S) {
2103 /* The 5761 non-e device swaps GPIO 0 and GPIO 2. */
2104 u32 grc_local_ctrl = GRC_LCLCTRL_GPIO_OE0 |
2105 GRC_LCLCTRL_GPIO_OE1 |
2106 GRC_LCLCTRL_GPIO_OE2 |
2107 GRC_LCLCTRL_GPIO_OUTPUT0 |
2108 GRC_LCLCTRL_GPIO_OUTPUT1 |
2110 tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl, 100);
2112 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OUTPUT2;
2113 tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl, 100);
2115 grc_local_ctrl &= ~GRC_LCLCTRL_GPIO_OUTPUT0;
2116 tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl, 100);
2119 u32 grc_local_ctrl = 0;
2121 if (tp_peer != tp &&
2122 (tp_peer->tg3_flags & TG3_FLAG_INIT_COMPLETE) != 0)
2125 /* Workaround to prevent overdrawing Amps. */
2126 if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
2128 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE3;
2129 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
2130 grc_local_ctrl, 100);
2133 /* On 5753 and variants, GPIO2 cannot be used. */
2134 no_gpio2 = tp->nic_sram_data_cfg &
2135 NIC_SRAM_DATA_CFG_NO_GPIO2;
2137 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE0 |
2138 GRC_LCLCTRL_GPIO_OE1 |
2139 GRC_LCLCTRL_GPIO_OE2 |
2140 GRC_LCLCTRL_GPIO_OUTPUT1 |
2141 GRC_LCLCTRL_GPIO_OUTPUT2;
2143 grc_local_ctrl &= ~(GRC_LCLCTRL_GPIO_OE2 |
2144 GRC_LCLCTRL_GPIO_OUTPUT2);
2146 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
2147 grc_local_ctrl, 100);
2149 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OUTPUT0;
2151 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
2152 grc_local_ctrl, 100);
2155 grc_local_ctrl &= ~GRC_LCLCTRL_GPIO_OUTPUT2;
2156 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
2157 grc_local_ctrl, 100);
2161 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
2162 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701) {
2163 if (tp_peer != tp &&
2164 (tp_peer->tg3_flags & TG3_FLAG_INIT_COMPLETE) != 0)
2167 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
2168 (GRC_LCLCTRL_GPIO_OE1 |
2169 GRC_LCLCTRL_GPIO_OUTPUT1), 100);
2171 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
2172 GRC_LCLCTRL_GPIO_OE1, 100);
2174 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
2175 (GRC_LCLCTRL_GPIO_OE1 |
2176 GRC_LCLCTRL_GPIO_OUTPUT1), 100);
2181 static int tg3_5700_link_polarity(struct tg3 *tp, u32 speed)
2183 if (tp->led_ctrl == LED_CTRL_MODE_PHY_2)
2185 else if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5411) {
2186 if (speed != SPEED_10)
2188 } else if (speed == SPEED_10)
2194 static int tg3_setup_phy(struct tg3 *, int);
2196 #define RESET_KIND_SHUTDOWN 0
2197 #define RESET_KIND_INIT 1
2198 #define RESET_KIND_SUSPEND 2
2200 static void tg3_write_sig_post_reset(struct tg3 *, int);
2201 static int tg3_halt_cpu(struct tg3 *, u32);
2203 static void tg3_power_down_phy(struct tg3 *tp, bool do_low_power)
2207 if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) {
2208 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
2209 u32 sg_dig_ctrl = tr32(SG_DIG_CTRL);
2210 u32 serdes_cfg = tr32(MAC_SERDES_CFG);
2213 SG_DIG_USING_HW_AUTONEG | SG_DIG_SOFT_RESET;
2214 tw32(SG_DIG_CTRL, sg_dig_ctrl);
2215 tw32(MAC_SERDES_CFG, serdes_cfg | (1 << 15));
2220 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
2222 val = tr32(GRC_MISC_CFG);
2223 tw32_f(GRC_MISC_CFG, val | GRC_MISC_CFG_EPHY_IDDQ);
2226 } else if (tp->tg3_flags3 & TG3_FLG3_PHY_IS_FET) {
2228 if (!tg3_readphy(tp, MII_TG3_FET_TEST, &phytest)) {
2231 tg3_writephy(tp, MII_ADVERTISE, 0);
2232 tg3_writephy(tp, MII_BMCR,
2233 BMCR_ANENABLE | BMCR_ANRESTART);
2235 tg3_writephy(tp, MII_TG3_FET_TEST,
2236 phytest | MII_TG3_FET_SHADOW_EN);
2237 if (!tg3_readphy(tp, MII_TG3_FET_SHDW_AUXMODE4, &phy)) {
2238 phy |= MII_TG3_FET_SHDW_AUXMODE4_SBPD;
2240 MII_TG3_FET_SHDW_AUXMODE4,
2243 tg3_writephy(tp, MII_TG3_FET_TEST, phytest);
2246 } else if (do_low_power) {
2247 tg3_writephy(tp, MII_TG3_EXT_CTRL,
2248 MII_TG3_EXT_CTRL_FORCE_LED_OFF);
2250 tg3_writephy(tp, MII_TG3_AUX_CTRL,
2251 MII_TG3_AUXCTL_SHDWSEL_PWRCTL |
2252 MII_TG3_AUXCTL_PCTL_100TX_LPWR |
2253 MII_TG3_AUXCTL_PCTL_SPR_ISOLATE |
2254 MII_TG3_AUXCTL_PCTL_VREG_11V);
2257 /* The PHY should not be powered down on some chips because
2260 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
2261 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
2262 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780 &&
2263 (tp->tg3_flags2 & TG3_FLG2_MII_SERDES)))
2266 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5784_AX ||
2267 GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5761_AX) {
2268 val = tr32(TG3_CPMU_LSPD_1000MB_CLK);
2269 val &= ~CPMU_LSPD_1000MB_MACCLK_MASK;
2270 val |= CPMU_LSPD_1000MB_MACCLK_12_5;
2271 tw32_f(TG3_CPMU_LSPD_1000MB_CLK, val);
2274 tg3_writephy(tp, MII_BMCR, BMCR_PDOWN);
2277 /* tp->lock is held. */
2278 static int tg3_nvram_lock(struct tg3 *tp)
2280 if (tp->tg3_flags & TG3_FLAG_NVRAM) {
2283 if (tp->nvram_lock_cnt == 0) {
2284 tw32(NVRAM_SWARB, SWARB_REQ_SET1);
2285 for (i = 0; i < 8000; i++) {
2286 if (tr32(NVRAM_SWARB) & SWARB_GNT1)
2291 tw32(NVRAM_SWARB, SWARB_REQ_CLR1);
2295 tp->nvram_lock_cnt++;
2300 /* tp->lock is held. */
2301 static void tg3_nvram_unlock(struct tg3 *tp)
2303 if (tp->tg3_flags & TG3_FLAG_NVRAM) {
2304 if (tp->nvram_lock_cnt > 0)
2305 tp->nvram_lock_cnt--;
2306 if (tp->nvram_lock_cnt == 0)
2307 tw32_f(NVRAM_SWARB, SWARB_REQ_CLR1);
2311 /* tp->lock is held. */
2312 static void tg3_enable_nvram_access(struct tg3 *tp)
2314 if ((tp->tg3_flags2 & TG3_FLG2_5750_PLUS) &&
2315 !(tp->tg3_flags3 & TG3_FLG3_PROTECTED_NVRAM)) {
2316 u32 nvaccess = tr32(NVRAM_ACCESS);
2318 tw32(NVRAM_ACCESS, nvaccess | ACCESS_ENABLE);
2322 /* tp->lock is held. */
2323 static void tg3_disable_nvram_access(struct tg3 *tp)
2325 if ((tp->tg3_flags2 & TG3_FLG2_5750_PLUS) &&
2326 !(tp->tg3_flags3 & TG3_FLG3_PROTECTED_NVRAM)) {
2327 u32 nvaccess = tr32(NVRAM_ACCESS);
2329 tw32(NVRAM_ACCESS, nvaccess & ~ACCESS_ENABLE);
2333 static int tg3_nvram_read_using_eeprom(struct tg3 *tp,
2334 u32 offset, u32 *val)
2339 if (offset > EEPROM_ADDR_ADDR_MASK || (offset % 4) != 0)
2342 tmp = tr32(GRC_EEPROM_ADDR) & ~(EEPROM_ADDR_ADDR_MASK |
2343 EEPROM_ADDR_DEVID_MASK |
2345 tw32(GRC_EEPROM_ADDR,
2347 (0 << EEPROM_ADDR_DEVID_SHIFT) |
2348 ((offset << EEPROM_ADDR_ADDR_SHIFT) &
2349 EEPROM_ADDR_ADDR_MASK) |
2350 EEPROM_ADDR_READ | EEPROM_ADDR_START);
2352 for (i = 0; i < 1000; i++) {
2353 tmp = tr32(GRC_EEPROM_ADDR);
2355 if (tmp & EEPROM_ADDR_COMPLETE)
2359 if (!(tmp & EEPROM_ADDR_COMPLETE))
2362 tmp = tr32(GRC_EEPROM_DATA);
2365 * The data will always be opposite the native endian
2366 * format. Perform a blind byteswap to compensate.
2373 #define NVRAM_CMD_TIMEOUT 10000
2375 static int tg3_nvram_exec_cmd(struct tg3 *tp, u32 nvram_cmd)
2379 tw32(NVRAM_CMD, nvram_cmd);
2380 for (i = 0; i < NVRAM_CMD_TIMEOUT; i++) {
2382 if (tr32(NVRAM_CMD) & NVRAM_CMD_DONE) {
2388 if (i == NVRAM_CMD_TIMEOUT)
2394 static u32 tg3_nvram_phys_addr(struct tg3 *tp, u32 addr)
2396 if ((tp->tg3_flags & TG3_FLAG_NVRAM) &&
2397 (tp->tg3_flags & TG3_FLAG_NVRAM_BUFFERED) &&
2398 (tp->tg3_flags2 & TG3_FLG2_FLASH) &&
2399 !(tp->tg3_flags3 & TG3_FLG3_NO_NVRAM_ADDR_TRANS) &&
2400 (tp->nvram_jedecnum == JEDEC_ATMEL))
2402 addr = ((addr / tp->nvram_pagesize) <<
2403 ATMEL_AT45DB0X1B_PAGE_POS) +
2404 (addr % tp->nvram_pagesize);
2409 static u32 tg3_nvram_logical_addr(struct tg3 *tp, u32 addr)
2411 if ((tp->tg3_flags & TG3_FLAG_NVRAM) &&
2412 (tp->tg3_flags & TG3_FLAG_NVRAM_BUFFERED) &&
2413 (tp->tg3_flags2 & TG3_FLG2_FLASH) &&
2414 !(tp->tg3_flags3 & TG3_FLG3_NO_NVRAM_ADDR_TRANS) &&
2415 (tp->nvram_jedecnum == JEDEC_ATMEL))
2417 addr = ((addr >> ATMEL_AT45DB0X1B_PAGE_POS) *
2418 tp->nvram_pagesize) +
2419 (addr & ((1 << ATMEL_AT45DB0X1B_PAGE_POS) - 1));
2424 /* NOTE: Data read in from NVRAM is byteswapped according to
2425 * the byteswapping settings for all other register accesses.
2426 * tg3 devices are BE devices, so on a BE machine, the data
2427 * returned will be exactly as it is seen in NVRAM. On a LE
2428 * machine, the 32-bit value will be byteswapped.
2430 static int tg3_nvram_read(struct tg3 *tp, u32 offset, u32 *val)
2434 if (!(tp->tg3_flags & TG3_FLAG_NVRAM))
2435 return tg3_nvram_read_using_eeprom(tp, offset, val);
2437 offset = tg3_nvram_phys_addr(tp, offset);
2439 if (offset > NVRAM_ADDR_MSK)
2442 ret = tg3_nvram_lock(tp);
2446 tg3_enable_nvram_access(tp);
2448 tw32(NVRAM_ADDR, offset);
2449 ret = tg3_nvram_exec_cmd(tp, NVRAM_CMD_RD | NVRAM_CMD_GO |
2450 NVRAM_CMD_FIRST | NVRAM_CMD_LAST | NVRAM_CMD_DONE);
2453 *val = tr32(NVRAM_RDDATA);
2455 tg3_disable_nvram_access(tp);
2457 tg3_nvram_unlock(tp);
2462 /* Ensures NVRAM data is in bytestream format. */
2463 static int tg3_nvram_read_be32(struct tg3 *tp, u32 offset, __be32 *val)
2466 int res = tg3_nvram_read(tp, offset, &v);
2468 *val = cpu_to_be32(v);
2472 /* tp->lock is held. */
2473 static void __tg3_set_mac_addr(struct tg3 *tp, int skip_mac_1)
2475 u32 addr_high, addr_low;
2478 addr_high = ((tp->dev->dev_addr[0] << 8) |
2479 tp->dev->dev_addr[1]);
2480 addr_low = ((tp->dev->dev_addr[2] << 24) |
2481 (tp->dev->dev_addr[3] << 16) |
2482 (tp->dev->dev_addr[4] << 8) |
2483 (tp->dev->dev_addr[5] << 0));
2484 for (i = 0; i < 4; i++) {
2485 if (i == 1 && skip_mac_1)
2487 tw32(MAC_ADDR_0_HIGH + (i * 8), addr_high);
2488 tw32(MAC_ADDR_0_LOW + (i * 8), addr_low);
2491 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
2492 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
2493 for (i = 0; i < 12; i++) {
2494 tw32(MAC_EXTADDR_0_HIGH + (i * 8), addr_high);
2495 tw32(MAC_EXTADDR_0_LOW + (i * 8), addr_low);
2499 addr_high = (tp->dev->dev_addr[0] +
2500 tp->dev->dev_addr[1] +
2501 tp->dev->dev_addr[2] +
2502 tp->dev->dev_addr[3] +
2503 tp->dev->dev_addr[4] +
2504 tp->dev->dev_addr[5]) &
2505 TX_BACKOFF_SEED_MASK;
2506 tw32(MAC_TX_BACKOFF_SEED, addr_high);
2509 static int tg3_set_power_state(struct tg3 *tp, pci_power_t state)
2512 bool device_should_wake, do_low_power;
2514 /* Make sure register accesses (indirect or otherwise)
2515 * will function correctly.
2517 pci_write_config_dword(tp->pdev,
2518 TG3PCI_MISC_HOST_CTRL,
2519 tp->misc_host_ctrl);
2523 pci_enable_wake(tp->pdev, state, false);
2524 pci_set_power_state(tp->pdev, PCI_D0);
2526 /* Switch out of Vaux if it is a NIC */
2527 if (tp->tg3_flags2 & TG3_FLG2_IS_NIC)
2528 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl, 100);
2538 netdev_err(tp->dev, "Invalid power state (D%d) requested\n",
2543 /* Restore the CLKREQ setting. */
2544 if (tp->tg3_flags3 & TG3_FLG3_CLKREQ_BUG) {
2547 pci_read_config_word(tp->pdev,
2548 tp->pcie_cap + PCI_EXP_LNKCTL,
2550 lnkctl |= PCI_EXP_LNKCTL_CLKREQ_EN;
2551 pci_write_config_word(tp->pdev,
2552 tp->pcie_cap + PCI_EXP_LNKCTL,
2556 misc_host_ctrl = tr32(TG3PCI_MISC_HOST_CTRL);
2557 tw32(TG3PCI_MISC_HOST_CTRL,
2558 misc_host_ctrl | MISC_HOST_CTRL_MASK_PCI_INT);
2560 device_should_wake = pci_pme_capable(tp->pdev, state) &&
2561 device_may_wakeup(&tp->pdev->dev) &&
2562 (tp->tg3_flags & TG3_FLAG_WOL_ENABLE);
2564 if (tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB) {
2565 do_low_power = false;
2566 if ((tp->tg3_flags3 & TG3_FLG3_PHY_CONNECTED) &&
2567 !tp->link_config.phy_is_low_power) {
2568 struct phy_device *phydev;
2569 u32 phyid, advertising;
2571 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
2573 tp->link_config.phy_is_low_power = 1;
2575 tp->link_config.orig_speed = phydev->speed;
2576 tp->link_config.orig_duplex = phydev->duplex;
2577 tp->link_config.orig_autoneg = phydev->autoneg;
2578 tp->link_config.orig_advertising = phydev->advertising;
2580 advertising = ADVERTISED_TP |
2582 ADVERTISED_Autoneg |
2583 ADVERTISED_10baseT_Half;
2585 if ((tp->tg3_flags & TG3_FLAG_ENABLE_ASF) ||
2586 device_should_wake) {
2587 if (tp->tg3_flags & TG3_FLAG_WOL_SPEED_100MB)
2589 ADVERTISED_100baseT_Half |
2590 ADVERTISED_100baseT_Full |
2591 ADVERTISED_10baseT_Full;
2593 advertising |= ADVERTISED_10baseT_Full;
2596 phydev->advertising = advertising;
2598 phy_start_aneg(phydev);
2600 phyid = phydev->drv->phy_id & phydev->drv->phy_id_mask;
2601 if (phyid != PHY_ID_BCMAC131) {
2602 phyid &= PHY_BCM_OUI_MASK;
2603 if (phyid == PHY_BCM_OUI_1 ||
2604 phyid == PHY_BCM_OUI_2 ||
2605 phyid == PHY_BCM_OUI_3)
2606 do_low_power = true;
2610 do_low_power = true;
2612 if (tp->link_config.phy_is_low_power == 0) {
2613 tp->link_config.phy_is_low_power = 1;
2614 tp->link_config.orig_speed = tp->link_config.speed;
2615 tp->link_config.orig_duplex = tp->link_config.duplex;
2616 tp->link_config.orig_autoneg = tp->link_config.autoneg;
2619 if (!(tp->tg3_flags2 & TG3_FLG2_ANY_SERDES)) {
2620 tp->link_config.speed = SPEED_10;
2621 tp->link_config.duplex = DUPLEX_HALF;
2622 tp->link_config.autoneg = AUTONEG_ENABLE;
2623 tg3_setup_phy(tp, 0);
2627 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
2630 val = tr32(GRC_VCPU_EXT_CTRL);
2631 tw32(GRC_VCPU_EXT_CTRL, val | GRC_VCPU_EXT_CTRL_DISABLE_WOL);
2632 } else if (!(tp->tg3_flags & TG3_FLAG_ENABLE_ASF)) {
2636 for (i = 0; i < 200; i++) {
2637 tg3_read_mem(tp, NIC_SRAM_FW_ASF_STATUS_MBOX, &val);
2638 if (val == ~NIC_SRAM_FIRMWARE_MBOX_MAGIC1)
2643 if (tp->tg3_flags & TG3_FLAG_WOL_CAP)
2644 tg3_write_mem(tp, NIC_SRAM_WOL_MBOX, WOL_SIGNATURE |
2645 WOL_DRV_STATE_SHUTDOWN |
2649 if (device_should_wake) {
2652 if (!(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)) {
2654 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x5a);
2658 if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES)
2659 mac_mode = MAC_MODE_PORT_MODE_GMII;
2661 mac_mode = MAC_MODE_PORT_MODE_MII;
2663 mac_mode |= tp->mac_mode & MAC_MODE_LINK_POLARITY;
2664 if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
2666 u32 speed = (tp->tg3_flags &
2667 TG3_FLAG_WOL_SPEED_100MB) ?
2668 SPEED_100 : SPEED_10;
2669 if (tg3_5700_link_polarity(tp, speed))
2670 mac_mode |= MAC_MODE_LINK_POLARITY;
2672 mac_mode &= ~MAC_MODE_LINK_POLARITY;
2675 mac_mode = MAC_MODE_PORT_MODE_TBI;
2678 if (!(tp->tg3_flags2 & TG3_FLG2_5750_PLUS))
2679 tw32(MAC_LED_CTRL, tp->led_ctrl);
2681 mac_mode |= MAC_MODE_MAGIC_PKT_ENABLE;
2682 if (((tp->tg3_flags2 & TG3_FLG2_5705_PLUS) &&
2683 !(tp->tg3_flags2 & TG3_FLG2_5780_CLASS)) &&
2684 ((tp->tg3_flags & TG3_FLAG_ENABLE_ASF) ||
2685 (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE)))
2686 mac_mode |= MAC_MODE_KEEP_FRAME_IN_WOL;
2688 if (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE) {
2689 mac_mode |= tp->mac_mode &
2690 (MAC_MODE_APE_TX_EN | MAC_MODE_APE_RX_EN);
2691 if (mac_mode & MAC_MODE_APE_TX_EN)
2692 mac_mode |= MAC_MODE_TDE_ENABLE;
2695 tw32_f(MAC_MODE, mac_mode);
2698 tw32_f(MAC_RX_MODE, RX_MODE_ENABLE);
2702 if (!(tp->tg3_flags & TG3_FLAG_WOL_SPEED_100MB) &&
2703 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
2704 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)) {
2707 base_val = tp->pci_clock_ctrl;
2708 base_val |= (CLOCK_CTRL_RXCLK_DISABLE |
2709 CLOCK_CTRL_TXCLK_DISABLE);
2711 tw32_wait_f(TG3PCI_CLOCK_CTRL, base_val | CLOCK_CTRL_ALTCLK |
2712 CLOCK_CTRL_PWRDOWN_PLL133, 40);
2713 } else if ((tp->tg3_flags2 & TG3_FLG2_5780_CLASS) ||
2714 (tp->tg3_flags & TG3_FLAG_CPMU_PRESENT) ||
2715 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)) {
2717 } else if (!((tp->tg3_flags2 & TG3_FLG2_5750_PLUS) &&
2718 (tp->tg3_flags & TG3_FLAG_ENABLE_ASF))) {
2719 u32 newbits1, newbits2;
2721 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
2722 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
2723 newbits1 = (CLOCK_CTRL_RXCLK_DISABLE |
2724 CLOCK_CTRL_TXCLK_DISABLE |
2726 newbits2 = newbits1 | CLOCK_CTRL_44MHZ_CORE;
2727 } else if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
2728 newbits1 = CLOCK_CTRL_625_CORE;
2729 newbits2 = newbits1 | CLOCK_CTRL_ALTCLK;
2731 newbits1 = CLOCK_CTRL_ALTCLK;
2732 newbits2 = newbits1 | CLOCK_CTRL_44MHZ_CORE;
2735 tw32_wait_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl | newbits1,
2738 tw32_wait_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl | newbits2,
2741 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
2744 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
2745 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
2746 newbits3 = (CLOCK_CTRL_RXCLK_DISABLE |
2747 CLOCK_CTRL_TXCLK_DISABLE |
2748 CLOCK_CTRL_44MHZ_CORE);
2750 newbits3 = CLOCK_CTRL_44MHZ_CORE;
2753 tw32_wait_f(TG3PCI_CLOCK_CTRL,
2754 tp->pci_clock_ctrl | newbits3, 40);
2758 if (!(device_should_wake) &&
2759 !(tp->tg3_flags & TG3_FLAG_ENABLE_ASF))
2760 tg3_power_down_phy(tp, do_low_power);
2762 tg3_frob_aux_power(tp);
2764 /* Workaround for unstable PLL clock */
2765 if ((GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_AX) ||
2766 (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_BX)) {
2767 u32 val = tr32(0x7d00);
2769 val &= ~((1 << 16) | (1 << 4) | (1 << 2) | (1 << 1) | 1);
2771 if (!(tp->tg3_flags & TG3_FLAG_ENABLE_ASF)) {
2774 err = tg3_nvram_lock(tp);
2775 tg3_halt_cpu(tp, RX_CPU_BASE);
2777 tg3_nvram_unlock(tp);
2781 tg3_write_sig_post_reset(tp, RESET_KIND_SHUTDOWN);
2783 if (device_should_wake)
2784 pci_enable_wake(tp->pdev, state, true);
2786 /* Finally, set the new power state. */
2787 pci_set_power_state(tp->pdev, state);
2792 static void tg3_aux_stat_to_speed_duplex(struct tg3 *tp, u32 val, u16 *speed, u8 *duplex)
2794 switch (val & MII_TG3_AUX_STAT_SPDMASK) {
2795 case MII_TG3_AUX_STAT_10HALF:
2797 *duplex = DUPLEX_HALF;
2800 case MII_TG3_AUX_STAT_10FULL:
2802 *duplex = DUPLEX_FULL;
2805 case MII_TG3_AUX_STAT_100HALF:
2807 *duplex = DUPLEX_HALF;
2810 case MII_TG3_AUX_STAT_100FULL:
2812 *duplex = DUPLEX_FULL;
2815 case MII_TG3_AUX_STAT_1000HALF:
2816 *speed = SPEED_1000;
2817 *duplex = DUPLEX_HALF;
2820 case MII_TG3_AUX_STAT_1000FULL:
2821 *speed = SPEED_1000;
2822 *duplex = DUPLEX_FULL;
2826 if (tp->tg3_flags3 & TG3_FLG3_PHY_IS_FET) {
2827 *speed = (val & MII_TG3_AUX_STAT_100) ? SPEED_100 :
2829 *duplex = (val & MII_TG3_AUX_STAT_FULL) ? DUPLEX_FULL :
2833 *speed = SPEED_INVALID;
2834 *duplex = DUPLEX_INVALID;
2839 static void tg3_phy_copper_begin(struct tg3 *tp)
2844 if (tp->link_config.phy_is_low_power) {
2845 /* Entering low power mode. Disable gigabit and
2846 * 100baseT advertisements.
2848 tg3_writephy(tp, MII_TG3_CTRL, 0);
2850 new_adv = (ADVERTISE_10HALF | ADVERTISE_10FULL |
2851 ADVERTISE_CSMA | ADVERTISE_PAUSE_CAP);
2852 if (tp->tg3_flags & TG3_FLAG_WOL_SPEED_100MB)
2853 new_adv |= (ADVERTISE_100HALF | ADVERTISE_100FULL);
2855 tg3_writephy(tp, MII_ADVERTISE, new_adv);
2856 } else if (tp->link_config.speed == SPEED_INVALID) {
2857 if (tp->tg3_flags & TG3_FLAG_10_100_ONLY)
2858 tp->link_config.advertising &=
2859 ~(ADVERTISED_1000baseT_Half |
2860 ADVERTISED_1000baseT_Full);
2862 new_adv = ADVERTISE_CSMA;
2863 if (tp->link_config.advertising & ADVERTISED_10baseT_Half)
2864 new_adv |= ADVERTISE_10HALF;
2865 if (tp->link_config.advertising & ADVERTISED_10baseT_Full)
2866 new_adv |= ADVERTISE_10FULL;
2867 if (tp->link_config.advertising & ADVERTISED_100baseT_Half)
2868 new_adv |= ADVERTISE_100HALF;
2869 if (tp->link_config.advertising & ADVERTISED_100baseT_Full)
2870 new_adv |= ADVERTISE_100FULL;
2872 new_adv |= tg3_advert_flowctrl_1000T(tp->link_config.flowctrl);
2874 tg3_writephy(tp, MII_ADVERTISE, new_adv);
2876 if (tp->link_config.advertising &
2877 (ADVERTISED_1000baseT_Half | ADVERTISED_1000baseT_Full)) {
2879 if (tp->link_config.advertising & ADVERTISED_1000baseT_Half)
2880 new_adv |= MII_TG3_CTRL_ADV_1000_HALF;
2881 if (tp->link_config.advertising & ADVERTISED_1000baseT_Full)
2882 new_adv |= MII_TG3_CTRL_ADV_1000_FULL;
2883 if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY) &&
2884 (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
2885 tp->pci_chip_rev_id == CHIPREV_ID_5701_B0))
2886 new_adv |= (MII_TG3_CTRL_AS_MASTER |
2887 MII_TG3_CTRL_ENABLE_AS_MASTER);
2888 tg3_writephy(tp, MII_TG3_CTRL, new_adv);
2890 tg3_writephy(tp, MII_TG3_CTRL, 0);
2893 new_adv = tg3_advert_flowctrl_1000T(tp->link_config.flowctrl);
2894 new_adv |= ADVERTISE_CSMA;
2896 /* Asking for a specific link mode. */
2897 if (tp->link_config.speed == SPEED_1000) {
2898 tg3_writephy(tp, MII_ADVERTISE, new_adv);
2900 if (tp->link_config.duplex == DUPLEX_FULL)
2901 new_adv = MII_TG3_CTRL_ADV_1000_FULL;
2903 new_adv = MII_TG3_CTRL_ADV_1000_HALF;
2904 if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
2905 tp->pci_chip_rev_id == CHIPREV_ID_5701_B0)
2906 new_adv |= (MII_TG3_CTRL_AS_MASTER |
2907 MII_TG3_CTRL_ENABLE_AS_MASTER);
2909 if (tp->link_config.speed == SPEED_100) {
2910 if (tp->link_config.duplex == DUPLEX_FULL)
2911 new_adv |= ADVERTISE_100FULL;
2913 new_adv |= ADVERTISE_100HALF;
2915 if (tp->link_config.duplex == DUPLEX_FULL)
2916 new_adv |= ADVERTISE_10FULL;
2918 new_adv |= ADVERTISE_10HALF;
2920 tg3_writephy(tp, MII_ADVERTISE, new_adv);
2925 tg3_writephy(tp, MII_TG3_CTRL, new_adv);
2928 if (tp->link_config.autoneg == AUTONEG_DISABLE &&
2929 tp->link_config.speed != SPEED_INVALID) {
2930 u32 bmcr, orig_bmcr;
2932 tp->link_config.active_speed = tp->link_config.speed;
2933 tp->link_config.active_duplex = tp->link_config.duplex;
2936 switch (tp->link_config.speed) {
2942 bmcr |= BMCR_SPEED100;
2946 bmcr |= TG3_BMCR_SPEED1000;
2950 if (tp->link_config.duplex == DUPLEX_FULL)
2951 bmcr |= BMCR_FULLDPLX;
2953 if (!tg3_readphy(tp, MII_BMCR, &orig_bmcr) &&
2954 (bmcr != orig_bmcr)) {
2955 tg3_writephy(tp, MII_BMCR, BMCR_LOOPBACK);
2956 for (i = 0; i < 1500; i++) {
2960 if (tg3_readphy(tp, MII_BMSR, &tmp) ||
2961 tg3_readphy(tp, MII_BMSR, &tmp))
2963 if (!(tmp & BMSR_LSTATUS)) {
2968 tg3_writephy(tp, MII_BMCR, bmcr);
2972 tg3_writephy(tp, MII_BMCR,
2973 BMCR_ANENABLE | BMCR_ANRESTART);
2977 static int tg3_init_5401phy_dsp(struct tg3 *tp)
2981 /* Turn off tap power management. */
2982 /* Set Extended packet length bit */
2983 err = tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x4c20);
2985 err |= tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x0012);
2986 err |= tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x1804);
2988 err |= tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x0013);
2989 err |= tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x1204);
2991 err |= tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8006);
2992 err |= tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0132);
2994 err |= tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8006);
2995 err |= tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0232);
2997 err |= tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x201f);
2998 err |= tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0a20);
3005 static int tg3_copper_is_advertising_all(struct tg3 *tp, u32 mask)
3007 u32 adv_reg, all_mask = 0;
3009 if (mask & ADVERTISED_10baseT_Half)
3010 all_mask |= ADVERTISE_10HALF;
3011 if (mask & ADVERTISED_10baseT_Full)
3012 all_mask |= ADVERTISE_10FULL;
3013 if (mask & ADVERTISED_100baseT_Half)
3014 all_mask |= ADVERTISE_100HALF;
3015 if (mask & ADVERTISED_100baseT_Full)
3016 all_mask |= ADVERTISE_100FULL;
3018 if (tg3_readphy(tp, MII_ADVERTISE, &adv_reg))
3021 if ((adv_reg & all_mask) != all_mask)
3023 if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY)) {
3027 if (mask & ADVERTISED_1000baseT_Half)
3028 all_mask |= ADVERTISE_1000HALF;
3029 if (mask & ADVERTISED_1000baseT_Full)
3030 all_mask |= ADVERTISE_1000FULL;
3032 if (tg3_readphy(tp, MII_TG3_CTRL, &tg3_ctrl))
3035 if ((tg3_ctrl & all_mask) != all_mask)
3041 static int tg3_adv_1000T_flowctrl_ok(struct tg3 *tp, u32 *lcladv, u32 *rmtadv)
3045 if (tg3_readphy(tp, MII_ADVERTISE, lcladv))
3048 curadv = *lcladv & (ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM);
3049 reqadv = tg3_advert_flowctrl_1000T(tp->link_config.flowctrl);
3051 if (tp->link_config.active_duplex == DUPLEX_FULL) {
3052 if (curadv != reqadv)
3055 if (tp->tg3_flags & TG3_FLAG_PAUSE_AUTONEG)
3056 tg3_readphy(tp, MII_LPA, rmtadv);
3058 /* Reprogram the advertisement register, even if it
3059 * does not affect the current link. If the link
3060 * gets renegotiated in the future, we can save an
3061 * additional renegotiation cycle by advertising
3062 * it correctly in the first place.
3064 if (curadv != reqadv) {
3065 *lcladv &= ~(ADVERTISE_PAUSE_CAP |
3066 ADVERTISE_PAUSE_ASYM);
3067 tg3_writephy(tp, MII_ADVERTISE, *lcladv | reqadv);
3074 static int tg3_setup_copper_phy(struct tg3 *tp, int force_reset)
3076 int current_link_up;
3078 u32 lcl_adv, rmt_adv;
3086 (MAC_STATUS_SYNC_CHANGED |
3087 MAC_STATUS_CFG_CHANGED |
3088 MAC_STATUS_MI_COMPLETION |
3089 MAC_STATUS_LNKSTATE_CHANGED));
3092 if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
3094 (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
3098 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x02);
3100 /* Some third-party PHYs need to be reset on link going
3103 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
3104 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
3105 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) &&
3106 netif_carrier_ok(tp->dev)) {
3107 tg3_readphy(tp, MII_BMSR, &bmsr);
3108 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
3109 !(bmsr & BMSR_LSTATUS))
3115 if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) {
3116 tg3_readphy(tp, MII_BMSR, &bmsr);
3117 if (tg3_readphy(tp, MII_BMSR, &bmsr) ||
3118 !(tp->tg3_flags & TG3_FLAG_INIT_COMPLETE))
3121 if (!(bmsr & BMSR_LSTATUS)) {
3122 err = tg3_init_5401phy_dsp(tp);
3126 tg3_readphy(tp, MII_BMSR, &bmsr);
3127 for (i = 0; i < 1000; i++) {
3129 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
3130 (bmsr & BMSR_LSTATUS)) {
3136 if ((tp->phy_id & TG3_PHY_ID_REV_MASK) ==
3137 TG3_PHY_REV_BCM5401_B0 &&
3138 !(bmsr & BMSR_LSTATUS) &&
3139 tp->link_config.active_speed == SPEED_1000) {
3140 err = tg3_phy_reset(tp);
3142 err = tg3_init_5401phy_dsp(tp);
3147 } else if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
3148 tp->pci_chip_rev_id == CHIPREV_ID_5701_B0) {
3149 /* 5701 {A0,B0} CRC bug workaround */
3150 tg3_writephy(tp, 0x15, 0x0a75);
3151 tg3_writephy(tp, 0x1c, 0x8c68);
3152 tg3_writephy(tp, 0x1c, 0x8d68);
3153 tg3_writephy(tp, 0x1c, 0x8c68);
3156 /* Clear pending interrupts... */
3157 tg3_readphy(tp, MII_TG3_ISTAT, &dummy);
3158 tg3_readphy(tp, MII_TG3_ISTAT, &dummy);
3160 if (tp->tg3_flags & TG3_FLAG_USE_MI_INTERRUPT)
3161 tg3_writephy(tp, MII_TG3_IMASK, ~MII_TG3_INT_LINKCHG);
3162 else if (!(tp->tg3_flags3 & TG3_FLG3_PHY_IS_FET))
3163 tg3_writephy(tp, MII_TG3_IMASK, ~0);
3165 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
3166 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
3167 if (tp->led_ctrl == LED_CTRL_MODE_PHY_1)
3168 tg3_writephy(tp, MII_TG3_EXT_CTRL,
3169 MII_TG3_EXT_CTRL_LNK3_LED_MODE);
3171 tg3_writephy(tp, MII_TG3_EXT_CTRL, 0);
3174 current_link_up = 0;
3175 current_speed = SPEED_INVALID;
3176 current_duplex = DUPLEX_INVALID;
3178 if (tp->tg3_flags2 & TG3_FLG2_CAPACITIVE_COUPLING) {
3181 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x4007);
3182 tg3_readphy(tp, MII_TG3_AUX_CTRL, &val);
3183 if (!(val & (1 << 10))) {
3185 tg3_writephy(tp, MII_TG3_AUX_CTRL, val);
3191 for (i = 0; i < 100; i++) {
3192 tg3_readphy(tp, MII_BMSR, &bmsr);
3193 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
3194 (bmsr & BMSR_LSTATUS))
3199 if (bmsr & BMSR_LSTATUS) {
3202 tg3_readphy(tp, MII_TG3_AUX_STAT, &aux_stat);
3203 for (i = 0; i < 2000; i++) {
3205 if (!tg3_readphy(tp, MII_TG3_AUX_STAT, &aux_stat) &&
3210 tg3_aux_stat_to_speed_duplex(tp, aux_stat,
3215 for (i = 0; i < 200; i++) {
3216 tg3_readphy(tp, MII_BMCR, &bmcr);
3217 if (tg3_readphy(tp, MII_BMCR, &bmcr))
3219 if (bmcr && bmcr != 0x7fff)
3227 tp->link_config.active_speed = current_speed;
3228 tp->link_config.active_duplex = current_duplex;
3230 if (tp->link_config.autoneg == AUTONEG_ENABLE) {
3231 if ((bmcr & BMCR_ANENABLE) &&
3232 tg3_copper_is_advertising_all(tp,
3233 tp->link_config.advertising)) {
3234 if (tg3_adv_1000T_flowctrl_ok(tp, &lcl_adv,
3236 current_link_up = 1;
3239 if (!(bmcr & BMCR_ANENABLE) &&
3240 tp->link_config.speed == current_speed &&
3241 tp->link_config.duplex == current_duplex &&
3242 tp->link_config.flowctrl ==
3243 tp->link_config.active_flowctrl) {
3244 current_link_up = 1;
3248 if (current_link_up == 1 &&
3249 tp->link_config.active_duplex == DUPLEX_FULL)
3250 tg3_setup_flow_control(tp, lcl_adv, rmt_adv);
3254 if (current_link_up == 0 || tp->link_config.phy_is_low_power) {
3257 tg3_phy_copper_begin(tp);
3259 tg3_readphy(tp, MII_BMSR, &tmp);
3260 if (!tg3_readphy(tp, MII_BMSR, &tmp) &&
3261 (tmp & BMSR_LSTATUS))
3262 current_link_up = 1;
3265 tp->mac_mode &= ~MAC_MODE_PORT_MODE_MASK;
3266 if (current_link_up == 1) {
3267 if (tp->link_config.active_speed == SPEED_100 ||
3268 tp->link_config.active_speed == SPEED_10)
3269 tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
3271 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
3272 } else if (tp->tg3_flags3 & TG3_FLG3_PHY_IS_FET)
3273 tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
3275 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
3277 tp->mac_mode &= ~MAC_MODE_HALF_DUPLEX;
3278 if (tp->link_config.active_duplex == DUPLEX_HALF)
3279 tp->mac_mode |= MAC_MODE_HALF_DUPLEX;
3281 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700) {
3282 if (current_link_up == 1 &&
3283 tg3_5700_link_polarity(tp, tp->link_config.active_speed))
3284 tp->mac_mode |= MAC_MODE_LINK_POLARITY;
3286 tp->mac_mode &= ~MAC_MODE_LINK_POLARITY;
3289 /* ??? Without this setting Netgear GA302T PHY does not
3290 * ??? send/receive packets...
3292 if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5411 &&
3293 tp->pci_chip_rev_id == CHIPREV_ID_5700_ALTIMA) {
3294 tp->mi_mode |= MAC_MI_MODE_AUTO_POLL;
3295 tw32_f(MAC_MI_MODE, tp->mi_mode);
3299 tw32_f(MAC_MODE, tp->mac_mode);
3302 if (tp->tg3_flags & TG3_FLAG_USE_LINKCHG_REG) {
3303 /* Polled via timer. */
3304 tw32_f(MAC_EVENT, 0);
3306 tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
3310 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 &&
3311 current_link_up == 1 &&
3312 tp->link_config.active_speed == SPEED_1000 &&
3313 ((tp->tg3_flags & TG3_FLAG_PCIX_MODE) ||
3314 (tp->tg3_flags & TG3_FLAG_PCI_HIGH_SPEED))) {
3317 (MAC_STATUS_SYNC_CHANGED |
3318 MAC_STATUS_CFG_CHANGED));
3321 NIC_SRAM_FIRMWARE_MBOX,
3322 NIC_SRAM_FIRMWARE_MBOX_MAGIC2);
3325 /* Prevent send BD corruption. */
3326 if (tp->tg3_flags3 & TG3_FLG3_CLKREQ_BUG) {
3327 u16 oldlnkctl, newlnkctl;
3329 pci_read_config_word(tp->pdev,
3330 tp->pcie_cap + PCI_EXP_LNKCTL,
3332 if (tp->link_config.active_speed == SPEED_100 ||
3333 tp->link_config.active_speed == SPEED_10)
3334 newlnkctl = oldlnkctl & ~PCI_EXP_LNKCTL_CLKREQ_EN;
3336 newlnkctl = oldlnkctl | PCI_EXP_LNKCTL_CLKREQ_EN;
3337 if (newlnkctl != oldlnkctl)
3338 pci_write_config_word(tp->pdev,
3339 tp->pcie_cap + PCI_EXP_LNKCTL,
3343 if (current_link_up != netif_carrier_ok(tp->dev)) {
3344 if (current_link_up)
3345 netif_carrier_on(tp->dev);
3347 netif_carrier_off(tp->dev);
3348 tg3_link_report(tp);
3354 struct tg3_fiber_aneginfo {
3356 #define ANEG_STATE_UNKNOWN 0
3357 #define ANEG_STATE_AN_ENABLE 1
3358 #define ANEG_STATE_RESTART_INIT 2
3359 #define ANEG_STATE_RESTART 3
3360 #define ANEG_STATE_DISABLE_LINK_OK 4
3361 #define ANEG_STATE_ABILITY_DETECT_INIT 5
3362 #define ANEG_STATE_ABILITY_DETECT 6
3363 #define ANEG_STATE_ACK_DETECT_INIT 7
3364 #define ANEG_STATE_ACK_DETECT 8
3365 #define ANEG_STATE_COMPLETE_ACK_INIT 9
3366 #define ANEG_STATE_COMPLETE_ACK 10
3367 #define ANEG_STATE_IDLE_DETECT_INIT 11
3368 #define ANEG_STATE_IDLE_DETECT 12
3369 #define ANEG_STATE_LINK_OK 13
3370 #define ANEG_STATE_NEXT_PAGE_WAIT_INIT 14
3371 #define ANEG_STATE_NEXT_PAGE_WAIT 15
3374 #define MR_AN_ENABLE 0x00000001
3375 #define MR_RESTART_AN 0x00000002
3376 #define MR_AN_COMPLETE 0x00000004
3377 #define MR_PAGE_RX 0x00000008
3378 #define MR_NP_LOADED 0x00000010
3379 #define MR_TOGGLE_TX 0x00000020
3380 #define MR_LP_ADV_FULL_DUPLEX 0x00000040
3381 #define MR_LP_ADV_HALF_DUPLEX 0x00000080
3382 #define MR_LP_ADV_SYM_PAUSE 0x00000100
3383 #define MR_LP_ADV_ASYM_PAUSE 0x00000200
3384 #define MR_LP_ADV_REMOTE_FAULT1 0x00000400
3385 #define MR_LP_ADV_REMOTE_FAULT2 0x00000800
3386 #define MR_LP_ADV_NEXT_PAGE 0x00001000
3387 #define MR_TOGGLE_RX 0x00002000
3388 #define MR_NP_RX 0x00004000
3390 #define MR_LINK_OK 0x80000000
3392 unsigned long link_time, cur_time;
3394 u32 ability_match_cfg;
3395 int ability_match_count;
3397 char ability_match, idle_match, ack_match;
3399 u32 txconfig, rxconfig;
3400 #define ANEG_CFG_NP 0x00000080
3401 #define ANEG_CFG_ACK 0x00000040
3402 #define ANEG_CFG_RF2 0x00000020
3403 #define ANEG_CFG_RF1 0x00000010
3404 #define ANEG_CFG_PS2 0x00000001
3405 #define ANEG_CFG_PS1 0x00008000
3406 #define ANEG_CFG_HD 0x00004000
3407 #define ANEG_CFG_FD 0x00002000
3408 #define ANEG_CFG_INVAL 0x00001f06
3413 #define ANEG_TIMER_ENAB 2
3414 #define ANEG_FAILED -1
3416 #define ANEG_STATE_SETTLE_TIME 10000
3418 static int tg3_fiber_aneg_smachine(struct tg3 *tp,
3419 struct tg3_fiber_aneginfo *ap)
3422 unsigned long delta;
3426 if (ap->state == ANEG_STATE_UNKNOWN) {
3430 ap->ability_match_cfg = 0;
3431 ap->ability_match_count = 0;
3432 ap->ability_match = 0;
3438 if (tr32(MAC_STATUS) & MAC_STATUS_RCVD_CFG) {
3439 rx_cfg_reg = tr32(MAC_RX_AUTO_NEG);
3441 if (rx_cfg_reg != ap->ability_match_cfg) {
3442 ap->ability_match_cfg = rx_cfg_reg;
3443 ap->ability_match = 0;
3444 ap->ability_match_count = 0;
3446 if (++ap->ability_match_count > 1) {
3447 ap->ability_match = 1;
3448 ap->ability_match_cfg = rx_cfg_reg;
3451 if (rx_cfg_reg & ANEG_CFG_ACK)
3459 ap->ability_match_cfg = 0;
3460 ap->ability_match_count = 0;
3461 ap->ability_match = 0;
3467 ap->rxconfig = rx_cfg_reg;
3470 switch (ap->state) {
3471 case ANEG_STATE_UNKNOWN:
3472 if (ap->flags & (MR_AN_ENABLE | MR_RESTART_AN))
3473 ap->state = ANEG_STATE_AN_ENABLE;
3476 case ANEG_STATE_AN_ENABLE:
3477 ap->flags &= ~(MR_AN_COMPLETE | MR_PAGE_RX);
3478 if (ap->flags & MR_AN_ENABLE) {
3481 ap->ability_match_cfg = 0;
3482 ap->ability_match_count = 0;
3483 ap->ability_match = 0;
3487 ap->state = ANEG_STATE_RESTART_INIT;
3489 ap->state = ANEG_STATE_DISABLE_LINK_OK;
3493 case ANEG_STATE_RESTART_INIT:
3494 ap->link_time = ap->cur_time;
3495 ap->flags &= ~(MR_NP_LOADED);
3497 tw32(MAC_TX_AUTO_NEG, 0);
3498 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
3499 tw32_f(MAC_MODE, tp->mac_mode);
3502 ret = ANEG_TIMER_ENAB;
3503 ap->state = ANEG_STATE_RESTART;
3506 case ANEG_STATE_RESTART:
3507 delta = ap->cur_time - ap->link_time;
3508 if (delta > ANEG_STATE_SETTLE_TIME)
3509 ap->state = ANEG_STATE_ABILITY_DETECT_INIT;
3511 ret = ANEG_TIMER_ENAB;
3514 case ANEG_STATE_DISABLE_LINK_OK:
3518 case ANEG_STATE_ABILITY_DETECT_INIT:
3519 ap->flags &= ~(MR_TOGGLE_TX);
3520 ap->txconfig = ANEG_CFG_FD;
3521 flowctrl = tg3_advert_flowctrl_1000X(tp->link_config.flowctrl);
3522 if (flowctrl & ADVERTISE_1000XPAUSE)
3523 ap->txconfig |= ANEG_CFG_PS1;
3524 if (flowctrl & ADVERTISE_1000XPSE_ASYM)
3525 ap->txconfig |= ANEG_CFG_PS2;
3526 tw32(MAC_TX_AUTO_NEG, ap->txconfig);
3527 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
3528 tw32_f(MAC_MODE, tp->mac_mode);
3531 ap->state = ANEG_STATE_ABILITY_DETECT;
3534 case ANEG_STATE_ABILITY_DETECT:
3535 if (ap->ability_match != 0 && ap->rxconfig != 0)
3536 ap->state = ANEG_STATE_ACK_DETECT_INIT;
3539 case ANEG_STATE_ACK_DETECT_INIT:
3540 ap->txconfig |= ANEG_CFG_ACK;
3541 tw32(MAC_TX_AUTO_NEG, ap->txconfig);
3542 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
3543 tw32_f(MAC_MODE, tp->mac_mode);
3546 ap->state = ANEG_STATE_ACK_DETECT;
3549 case ANEG_STATE_ACK_DETECT:
3550 if (ap->ack_match != 0) {
3551 if ((ap->rxconfig & ~ANEG_CFG_ACK) ==
3552 (ap->ability_match_cfg & ~ANEG_CFG_ACK)) {
3553 ap->state = ANEG_STATE_COMPLETE_ACK_INIT;
3555 ap->state = ANEG_STATE_AN_ENABLE;
3557 } else if (ap->ability_match != 0 &&
3558 ap->rxconfig == 0) {
3559 ap->state = ANEG_STATE_AN_ENABLE;
3563 case ANEG_STATE_COMPLETE_ACK_INIT:
3564 if (ap->rxconfig & ANEG_CFG_INVAL) {
3568 ap->flags &= ~(MR_LP_ADV_FULL_DUPLEX |
3569 MR_LP_ADV_HALF_DUPLEX |
3570 MR_LP_ADV_SYM_PAUSE |
3571 MR_LP_ADV_ASYM_PAUSE |
3572 MR_LP_ADV_REMOTE_FAULT1 |
3573 MR_LP_ADV_REMOTE_FAULT2 |
3574 MR_LP_ADV_NEXT_PAGE |
3577 if (ap->rxconfig & ANEG_CFG_FD)
3578 ap->flags |= MR_LP_ADV_FULL_DUPLEX;
3579 if (ap->rxconfig & ANEG_CFG_HD)
3580 ap->flags |= MR_LP_ADV_HALF_DUPLEX;
3581 if (ap->rxconfig & ANEG_CFG_PS1)
3582 ap->flags |= MR_LP_ADV_SYM_PAUSE;
3583 if (ap->rxconfig & ANEG_CFG_PS2)
3584 ap->flags |= MR_LP_ADV_ASYM_PAUSE;
3585 if (ap->rxconfig & ANEG_CFG_RF1)
3586 ap->flags |= MR_LP_ADV_REMOTE_FAULT1;
3587 if (ap->rxconfig & ANEG_CFG_RF2)
3588 ap->flags |= MR_LP_ADV_REMOTE_FAULT2;
3589 if (ap->rxconfig & ANEG_CFG_NP)
3590 ap->flags |= MR_LP_ADV_NEXT_PAGE;
3592 ap->link_time = ap->cur_time;
3594 ap->flags ^= (MR_TOGGLE_TX);
3595 if (ap->rxconfig & 0x0008)
3596 ap->flags |= MR_TOGGLE_RX;
3597 if (ap->rxconfig & ANEG_CFG_NP)
3598 ap->flags |= MR_NP_RX;
3599 ap->flags |= MR_PAGE_RX;
3601 ap->state = ANEG_STATE_COMPLETE_ACK;
3602 ret = ANEG_TIMER_ENAB;
3605 case ANEG_STATE_COMPLETE_ACK:
3606 if (ap->ability_match != 0 &&
3607 ap->rxconfig == 0) {
3608 ap->state = ANEG_STATE_AN_ENABLE;
3611 delta = ap->cur_time - ap->link_time;
3612 if (delta > ANEG_STATE_SETTLE_TIME) {
3613 if (!(ap->flags & (MR_LP_ADV_NEXT_PAGE))) {
3614 ap->state = ANEG_STATE_IDLE_DETECT_INIT;
3616 if ((ap->txconfig & ANEG_CFG_NP) == 0 &&
3617 !(ap->flags & MR_NP_RX)) {
3618 ap->state = ANEG_STATE_IDLE_DETECT_INIT;
3626 case ANEG_STATE_IDLE_DETECT_INIT:
3627 ap->link_time = ap->cur_time;
3628 tp->mac_mode &= ~MAC_MODE_SEND_CONFIGS;
3629 tw32_f(MAC_MODE, tp->mac_mode);
3632 ap->state = ANEG_STATE_IDLE_DETECT;
3633 ret = ANEG_TIMER_ENAB;
3636 case ANEG_STATE_IDLE_DETECT:
3637 if (ap->ability_match != 0 &&
3638 ap->rxconfig == 0) {
3639 ap->state = ANEG_STATE_AN_ENABLE;
3642 delta = ap->cur_time - ap->link_time;
3643 if (delta > ANEG_STATE_SETTLE_TIME) {
3644 /* XXX another gem from the Broadcom driver :( */
3645 ap->state = ANEG_STATE_LINK_OK;
3649 case ANEG_STATE_LINK_OK:
3650 ap->flags |= (MR_AN_COMPLETE | MR_LINK_OK);
3654 case ANEG_STATE_NEXT_PAGE_WAIT_INIT:
3655 /* ??? unimplemented */
3658 case ANEG_STATE_NEXT_PAGE_WAIT:
3659 /* ??? unimplemented */
3670 static int fiber_autoneg(struct tg3 *tp, u32 *txflags, u32 *rxflags)
3673 struct tg3_fiber_aneginfo aninfo;
3674 int status = ANEG_FAILED;
3678 tw32_f(MAC_TX_AUTO_NEG, 0);
3680 tmp = tp->mac_mode & ~MAC_MODE_PORT_MODE_MASK;
3681 tw32_f(MAC_MODE, tmp | MAC_MODE_PORT_MODE_GMII);
3684 tw32_f(MAC_MODE, tp->mac_mode | MAC_MODE_SEND_CONFIGS);
3687 memset(&aninfo, 0, sizeof(aninfo));
3688 aninfo.flags |= MR_AN_ENABLE;
3689 aninfo.state = ANEG_STATE_UNKNOWN;
3690 aninfo.cur_time = 0;
3692 while (++tick < 195000) {
3693 status = tg3_fiber_aneg_smachine(tp, &aninfo);
3694 if (status == ANEG_DONE || status == ANEG_FAILED)
3700 tp->mac_mode &= ~MAC_MODE_SEND_CONFIGS;
3701 tw32_f(MAC_MODE, tp->mac_mode);
3704 *txflags = aninfo.txconfig;
3705 *rxflags = aninfo.flags;
3707 if (status == ANEG_DONE &&
3708 (aninfo.flags & (MR_AN_COMPLETE | MR_LINK_OK |
3709 MR_LP_ADV_FULL_DUPLEX)))
3715 static void tg3_init_bcm8002(struct tg3 *tp)
3717 u32 mac_status = tr32(MAC_STATUS);
3720 /* Reset when initting first time or we have a link. */
3721 if ((tp->tg3_flags & TG3_FLAG_INIT_COMPLETE) &&
3722 !(mac_status & MAC_STATUS_PCS_SYNCED))
3725 /* Set PLL lock range. */
3726 tg3_writephy(tp, 0x16, 0x8007);
3729 tg3_writephy(tp, MII_BMCR, BMCR_RESET);
3731 /* Wait for reset to complete. */
3732 /* XXX schedule_timeout() ... */
3733 for (i = 0; i < 500; i++)
3736 /* Config mode; select PMA/Ch 1 regs. */
3737 tg3_writephy(tp, 0x10, 0x8411);
3739 /* Enable auto-lock and comdet, select txclk for tx. */
3740 tg3_writephy(tp, 0x11, 0x0a10);
3742 tg3_writephy(tp, 0x18, 0x00a0);
3743 tg3_writephy(tp, 0x16, 0x41ff);
3745 /* Assert and deassert POR. */
3746 tg3_writephy(tp, 0x13, 0x0400);
3748 tg3_writephy(tp, 0x13, 0x0000);
3750 tg3_writephy(tp, 0x11, 0x0a50);
3752 tg3_writephy(tp, 0x11, 0x0a10);
3754 /* Wait for signal to stabilize */
3755 /* XXX schedule_timeout() ... */
3756 for (i = 0; i < 15000; i++)
3759 /* Deselect the channel register so we can read the PHYID
3762 tg3_writephy(tp, 0x10, 0x8011);
3765 static int tg3_setup_fiber_hw_autoneg(struct tg3 *tp, u32 mac_status)
3768 u32 sg_dig_ctrl, sg_dig_status;
3769 u32 serdes_cfg, expected_sg_dig_ctrl;
3770 int workaround, port_a;
3771 int current_link_up;
3774 expected_sg_dig_ctrl = 0;
3777 current_link_up = 0;
3779 if (tp->pci_chip_rev_id != CHIPREV_ID_5704_A0 &&
3780 tp->pci_chip_rev_id != CHIPREV_ID_5704_A1) {
3782 if (tr32(TG3PCI_DUAL_MAC_CTRL) & DUAL_MAC_CTRL_ID)
3785 /* preserve bits 0-11,13,14 for signal pre-emphasis */
3786 /* preserve bits 20-23 for voltage regulator */
3787 serdes_cfg = tr32(MAC_SERDES_CFG) & 0x00f06fff;
3790 sg_dig_ctrl = tr32(SG_DIG_CTRL);
3792 if (tp->link_config.autoneg != AUTONEG_ENABLE) {
3793 if (sg_dig_ctrl & SG_DIG_USING_HW_AUTONEG) {
3795 u32 val = serdes_cfg;
3801 tw32_f(MAC_SERDES_CFG, val);
3804 tw32_f(SG_DIG_CTRL, SG_DIG_COMMON_SETUP);
3806 if (mac_status & MAC_STATUS_PCS_SYNCED) {
3807 tg3_setup_flow_control(tp, 0, 0);
3808 current_link_up = 1;
3813 /* Want auto-negotiation. */
3814 expected_sg_dig_ctrl = SG_DIG_USING_HW_AUTONEG | SG_DIG_COMMON_SETUP;
3816 flowctrl = tg3_advert_flowctrl_1000X(tp->link_config.flowctrl);
3817 if (flowctrl & ADVERTISE_1000XPAUSE)
3818 expected_sg_dig_ctrl |= SG_DIG_PAUSE_CAP;
3819 if (flowctrl & ADVERTISE_1000XPSE_ASYM)
3820 expected_sg_dig_ctrl |= SG_DIG_ASYM_PAUSE;
3822 if (sg_dig_ctrl != expected_sg_dig_ctrl) {
3823 if ((tp->tg3_flags2 & TG3_FLG2_PARALLEL_DETECT) &&
3824 tp->serdes_counter &&
3825 ((mac_status & (MAC_STATUS_PCS_SYNCED |
3826 MAC_STATUS_RCVD_CFG)) ==
3827 MAC_STATUS_PCS_SYNCED)) {
3828 tp->serdes_counter--;
3829 current_link_up = 1;
3834 tw32_f(MAC_SERDES_CFG, serdes_cfg | 0xc011000);
3835 tw32_f(SG_DIG_CTRL, expected_sg_dig_ctrl | SG_DIG_SOFT_RESET);
3837 tw32_f(SG_DIG_CTRL, expected_sg_dig_ctrl);
3839 tp->serdes_counter = SERDES_AN_TIMEOUT_5704S;
3840 tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
3841 } else if (mac_status & (MAC_STATUS_PCS_SYNCED |
3842 MAC_STATUS_SIGNAL_DET)) {
3843 sg_dig_status = tr32(SG_DIG_STATUS);
3844 mac_status = tr32(MAC_STATUS);
3846 if ((sg_dig_status & SG_DIG_AUTONEG_COMPLETE) &&
3847 (mac_status & MAC_STATUS_PCS_SYNCED)) {
3848 u32 local_adv = 0, remote_adv = 0;
3850 if (sg_dig_ctrl & SG_DIG_PAUSE_CAP)
3851 local_adv |= ADVERTISE_1000XPAUSE;
3852 if (sg_dig_ctrl & SG_DIG_ASYM_PAUSE)
3853 local_adv |= ADVERTISE_1000XPSE_ASYM;
3855 if (sg_dig_status & SG_DIG_PARTNER_PAUSE_CAPABLE)
3856 remote_adv |= LPA_1000XPAUSE;
3857 if (sg_dig_status & SG_DIG_PARTNER_ASYM_PAUSE)
3858 remote_adv |= LPA_1000XPAUSE_ASYM;
3860 tg3_setup_flow_control(tp, local_adv, remote_adv);
3861 current_link_up = 1;
3862 tp->serdes_counter = 0;
3863 tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
3864 } else if (!(sg_dig_status & SG_DIG_AUTONEG_COMPLETE)) {
3865 if (tp->serdes_counter)
3866 tp->serdes_counter--;
3869 u32 val = serdes_cfg;
3876 tw32_f(MAC_SERDES_CFG, val);
3879 tw32_f(SG_DIG_CTRL, SG_DIG_COMMON_SETUP);
3882 /* Link parallel detection - link is up */
3883 /* only if we have PCS_SYNC and not */
3884 /* receiving config code words */
3885 mac_status = tr32(MAC_STATUS);
3886 if ((mac_status & MAC_STATUS_PCS_SYNCED) &&
3887 !(mac_status & MAC_STATUS_RCVD_CFG)) {
3888 tg3_setup_flow_control(tp, 0, 0);
3889 current_link_up = 1;
3891 TG3_FLG2_PARALLEL_DETECT;
3892 tp->serdes_counter =
3893 SERDES_PARALLEL_DET_TIMEOUT;
3895 goto restart_autoneg;
3899 tp->serdes_counter = SERDES_AN_TIMEOUT_5704S;
3900 tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
3904 return current_link_up;
3907 static int tg3_setup_fiber_by_hand(struct tg3 *tp, u32 mac_status)
3909 int current_link_up = 0;
3911 if (!(mac_status & MAC_STATUS_PCS_SYNCED))
3914 if (tp->link_config.autoneg == AUTONEG_ENABLE) {
3915 u32 txflags, rxflags;
3918 if (fiber_autoneg(tp, &txflags, &rxflags)) {
3919 u32 local_adv = 0, remote_adv = 0;
3921 if (txflags & ANEG_CFG_PS1)
3922 local_adv |= ADVERTISE_1000XPAUSE;
3923 if (txflags & ANEG_CFG_PS2)
3924 local_adv |= ADVERTISE_1000XPSE_ASYM;
3926 if (rxflags & MR_LP_ADV_SYM_PAUSE)
3927 remote_adv |= LPA_1000XPAUSE;
3928 if (rxflags & MR_LP_ADV_ASYM_PAUSE)
3929 remote_adv |= LPA_1000XPAUSE_ASYM;
3931 tg3_setup_flow_control(tp, local_adv, remote_adv);
3933 current_link_up = 1;
3935 for (i = 0; i < 30; i++) {
3938 (MAC_STATUS_SYNC_CHANGED |
3939 MAC_STATUS_CFG_CHANGED));
3941 if ((tr32(MAC_STATUS) &
3942 (MAC_STATUS_SYNC_CHANGED |
3943 MAC_STATUS_CFG_CHANGED)) == 0)
3947 mac_status = tr32(MAC_STATUS);
3948 if (current_link_up == 0 &&
3949 (mac_status & MAC_STATUS_PCS_SYNCED) &&
3950 !(mac_status & MAC_STATUS_RCVD_CFG))
3951 current_link_up = 1;
3953 tg3_setup_flow_control(tp, 0, 0);
3955 /* Forcing 1000FD link up. */
3956 current_link_up = 1;
3958 tw32_f(MAC_MODE, (tp->mac_mode | MAC_MODE_SEND_CONFIGS));
3961 tw32_f(MAC_MODE, tp->mac_mode);
3966 return current_link_up;
3969 static int tg3_setup_fiber_phy(struct tg3 *tp, int force_reset)
3972 u16 orig_active_speed;
3973 u8 orig_active_duplex;
3975 int current_link_up;
3978 orig_pause_cfg = tp->link_config.active_flowctrl;
3979 orig_active_speed = tp->link_config.active_speed;
3980 orig_active_duplex = tp->link_config.active_duplex;
3982 if (!(tp->tg3_flags2 & TG3_FLG2_HW_AUTONEG) &&
3983 netif_carrier_ok(tp->dev) &&
3984 (tp->tg3_flags & TG3_FLAG_INIT_COMPLETE)) {
3985 mac_status = tr32(MAC_STATUS);
3986 mac_status &= (MAC_STATUS_PCS_SYNCED |
3987 MAC_STATUS_SIGNAL_DET |
3988 MAC_STATUS_CFG_CHANGED |
3989 MAC_STATUS_RCVD_CFG);
3990 if (mac_status == (MAC_STATUS_PCS_SYNCED |
3991 MAC_STATUS_SIGNAL_DET)) {
3992 tw32_f(MAC_STATUS, (MAC_STATUS_SYNC_CHANGED |
3993 MAC_STATUS_CFG_CHANGED));
3998 tw32_f(MAC_TX_AUTO_NEG, 0);
4000 tp->mac_mode &= ~(MAC_MODE_PORT_MODE_MASK | MAC_MODE_HALF_DUPLEX);
4001 tp->mac_mode |= MAC_MODE_PORT_MODE_TBI;
4002 tw32_f(MAC_MODE, tp->mac_mode);
4005 if (tp->phy_id == TG3_PHY_ID_BCM8002)
4006 tg3_init_bcm8002(tp);
4008 /* Enable link change event even when serdes polling. */
4009 tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
4012 current_link_up = 0;
4013 mac_status = tr32(MAC_STATUS);
4015 if (tp->tg3_flags2 & TG3_FLG2_HW_AUTONEG)
4016 current_link_up = tg3_setup_fiber_hw_autoneg(tp, mac_status);
4018 current_link_up = tg3_setup_fiber_by_hand(tp, mac_status);
4020 tp->napi[0].hw_status->status =
4021 (SD_STATUS_UPDATED |
4022 (tp->napi[0].hw_status->status & ~SD_STATUS_LINK_CHG));
4024 for (i = 0; i < 100; i++) {
4025 tw32_f(MAC_STATUS, (MAC_STATUS_SYNC_CHANGED |
4026 MAC_STATUS_CFG_CHANGED));
4028 if ((tr32(MAC_STATUS) & (MAC_STATUS_SYNC_CHANGED |
4029 MAC_STATUS_CFG_CHANGED |
4030 MAC_STATUS_LNKSTATE_CHANGED)) == 0)
4034 mac_status = tr32(MAC_STATUS);
4035 if ((mac_status & MAC_STATUS_PCS_SYNCED) == 0) {
4036 current_link_up = 0;
4037 if (tp->link_config.autoneg == AUTONEG_ENABLE &&
4038 tp->serdes_counter == 0) {
4039 tw32_f(MAC_MODE, (tp->mac_mode |
4040 MAC_MODE_SEND_CONFIGS));
4042 tw32_f(MAC_MODE, tp->mac_mode);
4046 if (current_link_up == 1) {
4047 tp->link_config.active_speed = SPEED_1000;
4048 tp->link_config.active_duplex = DUPLEX_FULL;
4049 tw32(MAC_LED_CTRL, (tp->led_ctrl |
4050 LED_CTRL_LNKLED_OVERRIDE |
4051 LED_CTRL_1000MBPS_ON));
4053 tp->link_config.active_speed = SPEED_INVALID;
4054 tp->link_config.active_duplex = DUPLEX_INVALID;
4055 tw32(MAC_LED_CTRL, (tp->led_ctrl |
4056 LED_CTRL_LNKLED_OVERRIDE |
4057 LED_CTRL_TRAFFIC_OVERRIDE));
4060 if (current_link_up != netif_carrier_ok(tp->dev)) {
4061 if (current_link_up)
4062 netif_carrier_on(tp->dev);
4064 netif_carrier_off(tp->dev);
4065 tg3_link_report(tp);
4067 u32 now_pause_cfg = tp->link_config.active_flowctrl;
4068 if (orig_pause_cfg != now_pause_cfg ||
4069 orig_active_speed != tp->link_config.active_speed ||
4070 orig_active_duplex != tp->link_config.active_duplex)
4071 tg3_link_report(tp);
4077 static int tg3_setup_fiber_mii_phy(struct tg3 *tp, int force_reset)
4079 int current_link_up, err = 0;
4083 u32 local_adv, remote_adv;
4085 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
4086 tw32_f(MAC_MODE, tp->mac_mode);
4092 (MAC_STATUS_SYNC_CHANGED |
4093 MAC_STATUS_CFG_CHANGED |
4094 MAC_STATUS_MI_COMPLETION |
4095 MAC_STATUS_LNKSTATE_CHANGED));
4101 current_link_up = 0;
4102 current_speed = SPEED_INVALID;
4103 current_duplex = DUPLEX_INVALID;
4105 err |= tg3_readphy(tp, MII_BMSR, &bmsr);
4106 err |= tg3_readphy(tp, MII_BMSR, &bmsr);
4107 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) {
4108 if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
4109 bmsr |= BMSR_LSTATUS;
4111 bmsr &= ~BMSR_LSTATUS;
4114 err |= tg3_readphy(tp, MII_BMCR, &bmcr);
4116 if ((tp->link_config.autoneg == AUTONEG_ENABLE) && !force_reset &&
4117 (tp->tg3_flags2 & TG3_FLG2_PARALLEL_DETECT)) {
4118 /* do nothing, just check for link up at the end */
4119 } else if (tp->link_config.autoneg == AUTONEG_ENABLE) {
4122 err |= tg3_readphy(tp, MII_ADVERTISE, &adv);
4123 new_adv = adv & ~(ADVERTISE_1000XFULL | ADVERTISE_1000XHALF |
4124 ADVERTISE_1000XPAUSE |
4125 ADVERTISE_1000XPSE_ASYM |
4128 new_adv |= tg3_advert_flowctrl_1000X(tp->link_config.flowctrl);
4130 if (tp->link_config.advertising & ADVERTISED_1000baseT_Half)
4131 new_adv |= ADVERTISE_1000XHALF;
4132 if (tp->link_config.advertising & ADVERTISED_1000baseT_Full)
4133 new_adv |= ADVERTISE_1000XFULL;
4135 if ((new_adv != adv) || !(bmcr & BMCR_ANENABLE)) {
4136 tg3_writephy(tp, MII_ADVERTISE, new_adv);
4137 bmcr |= BMCR_ANENABLE | BMCR_ANRESTART;
4138 tg3_writephy(tp, MII_BMCR, bmcr);
4140 tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
4141 tp->serdes_counter = SERDES_AN_TIMEOUT_5714S;
4142 tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
4149 bmcr &= ~BMCR_SPEED1000;
4150 new_bmcr = bmcr & ~(BMCR_ANENABLE | BMCR_FULLDPLX);
4152 if (tp->link_config.duplex == DUPLEX_FULL)
4153 new_bmcr |= BMCR_FULLDPLX;
4155 if (new_bmcr != bmcr) {
4156 /* BMCR_SPEED1000 is a reserved bit that needs
4157 * to be set on write.
4159 new_bmcr |= BMCR_SPEED1000;
4161 /* Force a linkdown */
4162 if (netif_carrier_ok(tp->dev)) {
4165 err |= tg3_readphy(tp, MII_ADVERTISE, &adv);
4166 adv &= ~(ADVERTISE_1000XFULL |
4167 ADVERTISE_1000XHALF |
4169 tg3_writephy(tp, MII_ADVERTISE, adv);
4170 tg3_writephy(tp, MII_BMCR, bmcr |
4174 netif_carrier_off(tp->dev);
4176 tg3_writephy(tp, MII_BMCR, new_bmcr);
4178 err |= tg3_readphy(tp, MII_BMSR, &bmsr);
4179 err |= tg3_readphy(tp, MII_BMSR, &bmsr);
4180 if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
4182 if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
4183 bmsr |= BMSR_LSTATUS;
4185 bmsr &= ~BMSR_LSTATUS;
4187 tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
4191 if (bmsr & BMSR_LSTATUS) {
4192 current_speed = SPEED_1000;
4193 current_link_up = 1;
4194 if (bmcr & BMCR_FULLDPLX)
4195 current_duplex = DUPLEX_FULL;
4197 current_duplex = DUPLEX_HALF;
4202 if (bmcr & BMCR_ANENABLE) {
4205 err |= tg3_readphy(tp, MII_ADVERTISE, &local_adv);
4206 err |= tg3_readphy(tp, MII_LPA, &remote_adv);
4207 common = local_adv & remote_adv;
4208 if (common & (ADVERTISE_1000XHALF |
4209 ADVERTISE_1000XFULL)) {
4210 if (common & ADVERTISE_1000XFULL)
4211 current_duplex = DUPLEX_FULL;
4213 current_duplex = DUPLEX_HALF;
4214 } else if (!(tp->tg3_flags2 & TG3_FLG2_5780_CLASS)) {
4215 /* Link is up via parallel detect */
4217 current_link_up = 0;
4222 if (current_link_up == 1 && current_duplex == DUPLEX_FULL)
4223 tg3_setup_flow_control(tp, local_adv, remote_adv);
4225 tp->mac_mode &= ~MAC_MODE_HALF_DUPLEX;
4226 if (tp->link_config.active_duplex == DUPLEX_HALF)
4227 tp->mac_mode |= MAC_MODE_HALF_DUPLEX;
4229 tw32_f(MAC_MODE, tp->mac_mode);
4232 tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
4234 tp->link_config.active_speed = current_speed;
4235 tp->link_config.active_duplex = current_duplex;
4237 if (current_link_up != netif_carrier_ok(tp->dev)) {
4238 if (current_link_up)
4239 netif_carrier_on(tp->dev);
4241 netif_carrier_off(tp->dev);
4242 tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
4244 tg3_link_report(tp);
4249 static void tg3_serdes_parallel_detect(struct tg3 *tp)
4251 if (tp->serdes_counter) {
4252 /* Give autoneg time to complete. */
4253 tp->serdes_counter--;
4257 if (!netif_carrier_ok(tp->dev) &&
4258 (tp->link_config.autoneg == AUTONEG_ENABLE)) {
4261 tg3_readphy(tp, MII_BMCR, &bmcr);
4262 if (bmcr & BMCR_ANENABLE) {
4265 /* Select shadow register 0x1f */
4266 tg3_writephy(tp, 0x1c, 0x7c00);
4267 tg3_readphy(tp, 0x1c, &phy1);
4269 /* Select expansion interrupt status register */
4270 tg3_writephy(tp, 0x17, 0x0f01);
4271 tg3_readphy(tp, 0x15, &phy2);
4272 tg3_readphy(tp, 0x15, &phy2);
4274 if ((phy1 & 0x10) && !(phy2 & 0x20)) {
4275 /* We have signal detect and not receiving
4276 * config code words, link is up by parallel
4280 bmcr &= ~BMCR_ANENABLE;
4281 bmcr |= BMCR_SPEED1000 | BMCR_FULLDPLX;
4282 tg3_writephy(tp, MII_BMCR, bmcr);
4283 tp->tg3_flags2 |= TG3_FLG2_PARALLEL_DETECT;
4286 } else if (netif_carrier_ok(tp->dev) &&
4287 (tp->link_config.autoneg == AUTONEG_ENABLE) &&
4288 (tp->tg3_flags2 & TG3_FLG2_PARALLEL_DETECT)) {
4291 /* Select expansion interrupt status register */
4292 tg3_writephy(tp, 0x17, 0x0f01);
4293 tg3_readphy(tp, 0x15, &phy2);
4297 /* Config code words received, turn on autoneg. */
4298 tg3_readphy(tp, MII_BMCR, &bmcr);
4299 tg3_writephy(tp, MII_BMCR, bmcr | BMCR_ANENABLE);
4301 tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
4307 static int tg3_setup_phy(struct tg3 *tp, int force_reset)
4311 if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)
4312 err = tg3_setup_fiber_phy(tp, force_reset);
4313 else if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES)
4314 err = tg3_setup_fiber_mii_phy(tp, force_reset);
4316 err = tg3_setup_copper_phy(tp, force_reset);
4318 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5784_AX) {
4321 val = tr32(TG3_CPMU_CLCK_STAT) & CPMU_CLCK_STAT_MAC_CLCK_MASK;
4322 if (val == CPMU_CLCK_STAT_MAC_CLCK_62_5)
4324 else if (val == CPMU_CLCK_STAT_MAC_CLCK_6_25)
4329 val = tr32(GRC_MISC_CFG) & ~GRC_MISC_CFG_PRESCALAR_MASK;
4330 val |= (scale << GRC_MISC_CFG_PRESCALAR_SHIFT);
4331 tw32(GRC_MISC_CFG, val);
4334 if (tp->link_config.active_speed == SPEED_1000 &&
4335 tp->link_config.active_duplex == DUPLEX_HALF)
4336 tw32(MAC_TX_LENGTHS,
4337 ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
4338 (6 << TX_LENGTHS_IPG_SHIFT) |
4339 (0xff << TX_LENGTHS_SLOT_TIME_SHIFT)));
4341 tw32(MAC_TX_LENGTHS,
4342 ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
4343 (6 << TX_LENGTHS_IPG_SHIFT) |
4344 (32 << TX_LENGTHS_SLOT_TIME_SHIFT)));
4346 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
4347 if (netif_carrier_ok(tp->dev)) {
4348 tw32(HOSTCC_STAT_COAL_TICKS,
4349 tp->coal.stats_block_coalesce_usecs);
4351 tw32(HOSTCC_STAT_COAL_TICKS, 0);
4355 if (tp->tg3_flags & TG3_FLAG_ASPM_WORKAROUND) {
4356 u32 val = tr32(PCIE_PWR_MGMT_THRESH);
4357 if (!netif_carrier_ok(tp->dev))
4358 val = (val & ~PCIE_PWR_MGMT_L1_THRESH_MSK) |
4361 val |= PCIE_PWR_MGMT_L1_THRESH_MSK;
4362 tw32(PCIE_PWR_MGMT_THRESH, val);
4368 /* This is called whenever we suspect that the system chipset is re-
4369 * ordering the sequence of MMIO to the tx send mailbox. The symptom
4370 * is bogus tx completions. We try to recover by setting the
4371 * TG3_FLAG_MBOX_WRITE_REORDER flag and resetting the chip later
4374 static void tg3_tx_recover(struct tg3 *tp)
4376 BUG_ON((tp->tg3_flags & TG3_FLAG_MBOX_WRITE_REORDER) ||
4377 tp->write32_tx_mbox == tg3_write_indirect_mbox);
4379 netdev_warn(tp->dev,
4380 "The system may be re-ordering memory-mapped I/O "
4381 "cycles to the network device, attempting to recover. "
4382 "Please report the problem to the driver maintainer "
4383 "and include system chipset information.\n");
4385 spin_lock(&tp->lock);
4386 tp->tg3_flags |= TG3_FLAG_TX_RECOVERY_PENDING;
4387 spin_unlock(&tp->lock);
4390 static inline u32 tg3_tx_avail(struct tg3_napi *tnapi)
4393 return tnapi->tx_pending -
4394 ((tnapi->tx_prod - tnapi->tx_cons) & (TG3_TX_RING_SIZE - 1));
4397 /* Tigon3 never reports partial packet sends. So we do not
4398 * need special logic to handle SKBs that have not had all
4399 * of their frags sent yet, like SunGEM does.
4401 static void tg3_tx(struct tg3_napi *tnapi)
4403 struct tg3 *tp = tnapi->tp;
4404 u32 hw_idx = tnapi->hw_status->idx[0].tx_consumer;
4405 u32 sw_idx = tnapi->tx_cons;
4406 struct netdev_queue *txq;
4407 int index = tnapi - tp->napi;
4409 if (tp->tg3_flags3 & TG3_FLG3_ENABLE_TSS)
4412 txq = netdev_get_tx_queue(tp->dev, index);
4414 while (sw_idx != hw_idx) {
4415 struct ring_info *ri = &tnapi->tx_buffers[sw_idx];
4416 struct sk_buff *skb = ri->skb;
4419 if (unlikely(skb == NULL)) {
4424 pci_unmap_single(tp->pdev,
4425 dma_unmap_addr(ri, mapping),
4431 sw_idx = NEXT_TX(sw_idx);
4433 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
4434 ri = &tnapi->tx_buffers[sw_idx];
4435 if (unlikely(ri->skb != NULL || sw_idx == hw_idx))
4438 pci_unmap_page(tp->pdev,
4439 dma_unmap_addr(ri, mapping),
4440 skb_shinfo(skb)->frags[i].size,
4442 sw_idx = NEXT_TX(sw_idx);
4447 if (unlikely(tx_bug)) {
4453 tnapi->tx_cons = sw_idx;
4455 /* Need to make the tx_cons update visible to tg3_start_xmit()
4456 * before checking for netif_queue_stopped(). Without the
4457 * memory barrier, there is a small possibility that tg3_start_xmit()
4458 * will miss it and cause the queue to be stopped forever.
4462 if (unlikely(netif_tx_queue_stopped(txq) &&
4463 (tg3_tx_avail(tnapi) > TG3_TX_WAKEUP_THRESH(tnapi)))) {
4464 __netif_tx_lock(txq, smp_processor_id());
4465 if (netif_tx_queue_stopped(txq) &&
4466 (tg3_tx_avail(tnapi) > TG3_TX_WAKEUP_THRESH(tnapi)))
4467 netif_tx_wake_queue(txq);
4468 __netif_tx_unlock(txq);
4472 static void tg3_rx_skb_free(struct tg3 *tp, struct ring_info *ri, u32 map_sz)
4477 pci_unmap_single(tp->pdev, dma_unmap_addr(ri, mapping),
4478 map_sz, PCI_DMA_FROMDEVICE);
4479 dev_kfree_skb_any(ri->skb);
4483 /* Returns size of skb allocated or < 0 on error.
4485 * We only need to fill in the address because the other members
4486 * of the RX descriptor are invariant, see tg3_init_rings.
4488 * Note the purposeful assymetry of cpu vs. chip accesses. For
4489 * posting buffers we only dirty the first cache line of the RX
4490 * descriptor (containing the address). Whereas for the RX status
4491 * buffers the cpu only reads the last cacheline of the RX descriptor
4492 * (to fetch the error flags, vlan tag, checksum, and opaque cookie).
4494 static int tg3_alloc_rx_skb(struct tg3 *tp, struct tg3_rx_prodring_set *tpr,
4495 u32 opaque_key, u32 dest_idx_unmasked)
4497 struct tg3_rx_buffer_desc *desc;
4498 struct ring_info *map, *src_map;
4499 struct sk_buff *skb;
4501 int skb_size, dest_idx;
4504 switch (opaque_key) {
4505 case RXD_OPAQUE_RING_STD:
4506 dest_idx = dest_idx_unmasked % TG3_RX_RING_SIZE;
4507 desc = &tpr->rx_std[dest_idx];
4508 map = &tpr->rx_std_buffers[dest_idx];
4509 skb_size = tp->rx_pkt_map_sz;
4512 case RXD_OPAQUE_RING_JUMBO:
4513 dest_idx = dest_idx_unmasked % TG3_RX_JUMBO_RING_SIZE;
4514 desc = &tpr->rx_jmb[dest_idx].std;
4515 map = &tpr->rx_jmb_buffers[dest_idx];
4516 skb_size = TG3_RX_JMB_MAP_SZ;
4523 /* Do not overwrite any of the map or rp information
4524 * until we are sure we can commit to a new buffer.
4526 * Callers depend upon this behavior and assume that
4527 * we leave everything unchanged if we fail.
4529 skb = netdev_alloc_skb(tp->dev, skb_size + tp->rx_offset);
4533 skb_reserve(skb, tp->rx_offset);
4535 mapping = pci_map_single(tp->pdev, skb->data, skb_size,
4536 PCI_DMA_FROMDEVICE);
4537 if (pci_dma_mapping_error(tp->pdev, mapping)) {
4543 dma_unmap_addr_set(map, mapping, mapping);
4545 desc->addr_hi = ((u64)mapping >> 32);
4546 desc->addr_lo = ((u64)mapping & 0xffffffff);
4551 /* We only need to move over in the address because the other
4552 * members of the RX descriptor are invariant. See notes above
4553 * tg3_alloc_rx_skb for full details.
4555 static void tg3_recycle_rx(struct tg3_napi *tnapi,
4556 struct tg3_rx_prodring_set *dpr,
4557 u32 opaque_key, int src_idx,
4558 u32 dest_idx_unmasked)
4560 struct tg3 *tp = tnapi->tp;
4561 struct tg3_rx_buffer_desc *src_desc, *dest_desc;
4562 struct ring_info *src_map, *dest_map;
4563 struct tg3_rx_prodring_set *spr = &tp->prodring[0];
4566 switch (opaque_key) {
4567 case RXD_OPAQUE_RING_STD:
4568 dest_idx = dest_idx_unmasked % TG3_RX_RING_SIZE;
4569 dest_desc = &dpr->rx_std[dest_idx];
4570 dest_map = &dpr->rx_std_buffers[dest_idx];
4571 src_desc = &spr->rx_std[src_idx];
4572 src_map = &spr->rx_std_buffers[src_idx];
4575 case RXD_OPAQUE_RING_JUMBO:
4576 dest_idx = dest_idx_unmasked % TG3_RX_JUMBO_RING_SIZE;
4577 dest_desc = &dpr->rx_jmb[dest_idx].std;
4578 dest_map = &dpr->rx_jmb_buffers[dest_idx];
4579 src_desc = &spr->rx_jmb[src_idx].std;
4580 src_map = &spr->rx_jmb_buffers[src_idx];
4587 dest_map->skb = src_map->skb;
4588 dma_unmap_addr_set(dest_map, mapping,
4589 dma_unmap_addr(src_map, mapping));
4590 dest_desc->addr_hi = src_desc->addr_hi;
4591 dest_desc->addr_lo = src_desc->addr_lo;
4593 /* Ensure that the update to the skb happens after the physical
4594 * addresses have been transferred to the new BD location.
4598 src_map->skb = NULL;
4601 /* The RX ring scheme is composed of multiple rings which post fresh
4602 * buffers to the chip, and one special ring the chip uses to report
4603 * status back to the host.
4605 * The special ring reports the status of received packets to the
4606 * host. The chip does not write into the original descriptor the
4607 * RX buffer was obtained from. The chip simply takes the original
4608 * descriptor as provided by the host, updates the status and length
4609 * field, then writes this into the next status ring entry.
4611 * Each ring the host uses to post buffers to the chip is described
4612 * by a TG3_BDINFO entry in the chips SRAM area. When a packet arrives,
4613 * it is first placed into the on-chip ram. When the packet's length
4614 * is known, it walks down the TG3_BDINFO entries to select the ring.
4615 * Each TG3_BDINFO specifies a MAXLEN field and the first TG3_BDINFO
4616 * which is within the range of the new packet's length is chosen.
4618 * The "separate ring for rx status" scheme may sound queer, but it makes
4619 * sense from a cache coherency perspective. If only the host writes
4620 * to the buffer post rings, and only the chip writes to the rx status
4621 * rings, then cache lines never move beyond shared-modified state.
4622 * If both the host and chip were to write into the same ring, cache line
4623 * eviction could occur since both entities want it in an exclusive state.
4625 static int tg3_rx(struct tg3_napi *tnapi, int budget)
4627 struct tg3 *tp = tnapi->tp;
4628 u32 work_mask, rx_std_posted = 0;
4629 u32 std_prod_idx, jmb_prod_idx;
4630 u32 sw_idx = tnapi->rx_rcb_ptr;
4633 struct tg3_rx_prodring_set *tpr = tnapi->prodring;
4635 hw_idx = *(tnapi->rx_rcb_prod_idx);
4637 * We need to order the read of hw_idx and the read of
4638 * the opaque cookie.
4643 std_prod_idx = tpr->rx_std_prod_idx;
4644 jmb_prod_idx = tpr->rx_jmb_prod_idx;
4645 while (sw_idx != hw_idx && budget > 0) {
4646 struct ring_info *ri;
4647 struct tg3_rx_buffer_desc *desc = &tnapi->rx_rcb[sw_idx];
4649 struct sk_buff *skb;
4650 dma_addr_t dma_addr;
4651 u32 opaque_key, desc_idx, *post_ptr;
4652 bool hw_vlan __maybe_unused = false;
4653 u16 vtag __maybe_unused = 0;
4655 desc_idx = desc->opaque & RXD_OPAQUE_INDEX_MASK;
4656 opaque_key = desc->opaque & RXD_OPAQUE_RING_MASK;
4657 if (opaque_key == RXD_OPAQUE_RING_STD) {
4658 ri = &tp->prodring[0].rx_std_buffers[desc_idx];
4659 dma_addr = dma_unmap_addr(ri, mapping);
4661 post_ptr = &std_prod_idx;
4663 } else if (opaque_key == RXD_OPAQUE_RING_JUMBO) {
4664 ri = &tp->prodring[0].rx_jmb_buffers[desc_idx];
4665 dma_addr = dma_unmap_addr(ri, mapping);
4667 post_ptr = &jmb_prod_idx;
4669 goto next_pkt_nopost;
4671 work_mask |= opaque_key;
4673 if ((desc->err_vlan & RXD_ERR_MASK) != 0 &&
4674 (desc->err_vlan != RXD_ERR_ODD_NIBBLE_RCVD_MII)) {
4676 tg3_recycle_rx(tnapi, tpr, opaque_key,
4677 desc_idx, *post_ptr);
4679 /* Other statistics kept track of by card. */
4680 tp->net_stats.rx_dropped++;
4684 len = ((desc->idx_len & RXD_LEN_MASK) >> RXD_LEN_SHIFT) -
4687 if (len > TG3_RX_COPY_THRESH(tp)) {
4690 skb_size = tg3_alloc_rx_skb(tp, tpr, opaque_key,
4695 pci_unmap_single(tp->pdev, dma_addr, skb_size,
4696 PCI_DMA_FROMDEVICE);
4698 /* Ensure that the update to the skb happens
4699 * after the usage of the old DMA mapping.
4707 struct sk_buff *copy_skb;
4709 tg3_recycle_rx(tnapi, tpr, opaque_key,
4710 desc_idx, *post_ptr);
4712 copy_skb = netdev_alloc_skb(tp->dev, len + VLAN_HLEN +
4714 if (copy_skb == NULL)
4715 goto drop_it_no_recycle;
4717 skb_reserve(copy_skb, TG3_RAW_IP_ALIGN + VLAN_HLEN);
4718 skb_put(copy_skb, len);
4719 pci_dma_sync_single_for_cpu(tp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE);
4720 skb_copy_from_linear_data(skb, copy_skb->data, len);
4721 pci_dma_sync_single_for_device(tp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE);
4723 /* We'll reuse the original ring buffer. */
4727 if ((tp->tg3_flags & TG3_FLAG_RX_CHECKSUMS) &&
4728 (desc->type_flags & RXD_FLAG_TCPUDP_CSUM) &&
4729 (((desc->ip_tcp_csum & RXD_TCPCSUM_MASK)
4730 >> RXD_TCPCSUM_SHIFT) == 0xffff))
4731 skb->ip_summed = CHECKSUM_UNNECESSARY;
4733 skb->ip_summed = CHECKSUM_NONE;
4735 skb->protocol = eth_type_trans(skb, tp->dev);
4737 if (len > (tp->dev->mtu + ETH_HLEN) &&
4738 skb->protocol != htons(ETH_P_8021Q)) {
4743 if (desc->type_flags & RXD_FLAG_VLAN &&
4744 !(tp->rx_mode & RX_MODE_KEEP_VLAN_TAG)) {
4745 vtag = desc->err_vlan & RXD_VLAN_MASK;
4746 #if TG3_VLAN_TAG_USED
4752 struct vlan_ethhdr *ve = (struct vlan_ethhdr *)
4753 __skb_push(skb, VLAN_HLEN);
4755 memmove(ve, skb->data + VLAN_HLEN,
4757 ve->h_vlan_proto = htons(ETH_P_8021Q);
4758 ve->h_vlan_TCI = htons(vtag);
4762 #if TG3_VLAN_TAG_USED
4764 vlan_gro_receive(&tnapi->napi, tp->vlgrp, vtag, skb);
4767 napi_gro_receive(&tnapi->napi, skb);
4775 if (unlikely(rx_std_posted >= tp->rx_std_max_post)) {
4776 tpr->rx_std_prod_idx = std_prod_idx % TG3_RX_RING_SIZE;
4777 tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG,
4778 tpr->rx_std_prod_idx);
4779 work_mask &= ~RXD_OPAQUE_RING_STD;
4784 sw_idx &= (TG3_RX_RCB_RING_SIZE(tp) - 1);
4786 /* Refresh hw_idx to see if there is new work */
4787 if (sw_idx == hw_idx) {
4788 hw_idx = *(tnapi->rx_rcb_prod_idx);
4793 /* ACK the status ring. */
4794 tnapi->rx_rcb_ptr = sw_idx;
4795 tw32_rx_mbox(tnapi->consmbox, sw_idx);
4797 /* Refill RX ring(s). */
4798 if (!(tp->tg3_flags3 & TG3_FLG3_ENABLE_RSS)) {
4799 if (work_mask & RXD_OPAQUE_RING_STD) {
4800 tpr->rx_std_prod_idx = std_prod_idx % TG3_RX_RING_SIZE;
4801 tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG,
4802 tpr->rx_std_prod_idx);
4804 if (work_mask & RXD_OPAQUE_RING_JUMBO) {
4805 tpr->rx_jmb_prod_idx = jmb_prod_idx %
4806 TG3_RX_JUMBO_RING_SIZE;
4807 tw32_rx_mbox(TG3_RX_JMB_PROD_IDX_REG,
4808 tpr->rx_jmb_prod_idx);
4811 } else if (work_mask) {
4812 /* rx_std_buffers[] and rx_jmb_buffers[] entries must be
4813 * updated before the producer indices can be updated.
4817 tpr->rx_std_prod_idx = std_prod_idx % TG3_RX_RING_SIZE;
4818 tpr->rx_jmb_prod_idx = jmb_prod_idx % TG3_RX_JUMBO_RING_SIZE;
4820 if (tnapi != &tp->napi[1])
4821 napi_schedule(&tp->napi[1].napi);
4827 static void tg3_poll_link(struct tg3 *tp)
4829 /* handle link change and other phy events */
4830 if (!(tp->tg3_flags &
4831 (TG3_FLAG_USE_LINKCHG_REG |
4832 TG3_FLAG_POLL_SERDES))) {
4833 struct tg3_hw_status *sblk = tp->napi[0].hw_status;
4835 if (sblk->status & SD_STATUS_LINK_CHG) {
4836 sblk->status = SD_STATUS_UPDATED |
4837 (sblk->status & ~SD_STATUS_LINK_CHG);
4838 spin_lock(&tp->lock);
4839 if (tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB) {
4841 (MAC_STATUS_SYNC_CHANGED |
4842 MAC_STATUS_CFG_CHANGED |
4843 MAC_STATUS_MI_COMPLETION |
4844 MAC_STATUS_LNKSTATE_CHANGED));
4847 tg3_setup_phy(tp, 0);
4848 spin_unlock(&tp->lock);
4853 static int tg3_rx_prodring_xfer(struct tg3 *tp,
4854 struct tg3_rx_prodring_set *dpr,
4855 struct tg3_rx_prodring_set *spr)
4857 u32 si, di, cpycnt, src_prod_idx;
4861 src_prod_idx = spr->rx_std_prod_idx;
4863 /* Make sure updates to the rx_std_buffers[] entries and the
4864 * standard producer index are seen in the correct order.
4868 if (spr->rx_std_cons_idx == src_prod_idx)
4871 if (spr->rx_std_cons_idx < src_prod_idx)
4872 cpycnt = src_prod_idx - spr->rx_std_cons_idx;
4874 cpycnt = TG3_RX_RING_SIZE - spr->rx_std_cons_idx;
4876 cpycnt = min(cpycnt, TG3_RX_RING_SIZE - dpr->rx_std_prod_idx);
4878 si = spr->rx_std_cons_idx;
4879 di = dpr->rx_std_prod_idx;
4881 for (i = di; i < di + cpycnt; i++) {
4882 if (dpr->rx_std_buffers[i].skb) {
4892 /* Ensure that updates to the rx_std_buffers ring and the
4893 * shadowed hardware producer ring from tg3_recycle_skb() are
4894 * ordered correctly WRT the skb check above.
4898 memcpy(&dpr->rx_std_buffers[di],
4899 &spr->rx_std_buffers[si],
4900 cpycnt * sizeof(struct ring_info));
4902 for (i = 0; i < cpycnt; i++, di++, si++) {
4903 struct tg3_rx_buffer_desc *sbd, *dbd;
4904 sbd = &spr->rx_std[si];
4905 dbd = &dpr->rx_std[di];
4906 dbd->addr_hi = sbd->addr_hi;
4907 dbd->addr_lo = sbd->addr_lo;
4910 spr->rx_std_cons_idx = (spr->rx_std_cons_idx + cpycnt) %
4912 dpr->rx_std_prod_idx = (dpr->rx_std_prod_idx + cpycnt) %
4917 src_prod_idx = spr->rx_jmb_prod_idx;
4919 /* Make sure updates to the rx_jmb_buffers[] entries and
4920 * the jumbo producer index are seen in the correct order.
4924 if (spr->rx_jmb_cons_idx == src_prod_idx)
4927 if (spr->rx_jmb_cons_idx < src_prod_idx)
4928 cpycnt = src_prod_idx - spr->rx_jmb_cons_idx;
4930 cpycnt = TG3_RX_JUMBO_RING_SIZE - spr->rx_jmb_cons_idx;
4932 cpycnt = min(cpycnt,
4933 TG3_RX_JUMBO_RING_SIZE - dpr->rx_jmb_prod_idx);
4935 si = spr->rx_jmb_cons_idx;
4936 di = dpr->rx_jmb_prod_idx;
4938 for (i = di; i < di + cpycnt; i++) {
4939 if (dpr->rx_jmb_buffers[i].skb) {
4949 /* Ensure that updates to the rx_jmb_buffers ring and the
4950 * shadowed hardware producer ring from tg3_recycle_skb() are
4951 * ordered correctly WRT the skb check above.
4955 memcpy(&dpr->rx_jmb_buffers[di],
4956 &spr->rx_jmb_buffers[si],
4957 cpycnt * sizeof(struct ring_info));
4959 for (i = 0; i < cpycnt; i++, di++, si++) {
4960 struct tg3_rx_buffer_desc *sbd, *dbd;
4961 sbd = &spr->rx_jmb[si].std;
4962 dbd = &dpr->rx_jmb[di].std;
4963 dbd->addr_hi = sbd->addr_hi;
4964 dbd->addr_lo = sbd->addr_lo;
4967 spr->rx_jmb_cons_idx = (spr->rx_jmb_cons_idx + cpycnt) %
4968 TG3_RX_JUMBO_RING_SIZE;
4969 dpr->rx_jmb_prod_idx = (dpr->rx_jmb_prod_idx + cpycnt) %
4970 TG3_RX_JUMBO_RING_SIZE;
4976 static int tg3_poll_work(struct tg3_napi *tnapi, int work_done, int budget)
4978 struct tg3 *tp = tnapi->tp;
4980 /* run TX completion thread */
4981 if (tnapi->hw_status->idx[0].tx_consumer != tnapi->tx_cons) {
4983 if (unlikely(tp->tg3_flags & TG3_FLAG_TX_RECOVERY_PENDING))
4987 /* run RX thread, within the bounds set by NAPI.
4988 * All RX "locking" is done by ensuring outside
4989 * code synchronizes with tg3->napi.poll()
4991 if (*(tnapi->rx_rcb_prod_idx) != tnapi->rx_rcb_ptr)
4992 work_done += tg3_rx(tnapi, budget - work_done);
4994 if ((tp->tg3_flags3 & TG3_FLG3_ENABLE_RSS) && tnapi == &tp->napi[1]) {
4995 struct tg3_rx_prodring_set *dpr = &tp->prodring[0];
4997 u32 std_prod_idx = dpr->rx_std_prod_idx;
4998 u32 jmb_prod_idx = dpr->rx_jmb_prod_idx;
5000 for (i = 1; i < tp->irq_cnt; i++)
5001 err |= tg3_rx_prodring_xfer(tp, dpr,
5002 tp->napi[i].prodring);
5006 if (std_prod_idx != dpr->rx_std_prod_idx)
5007 tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG,
5008 dpr->rx_std_prod_idx);
5010 if (jmb_prod_idx != dpr->rx_jmb_prod_idx)
5011 tw32_rx_mbox(TG3_RX_JMB_PROD_IDX_REG,
5012 dpr->rx_jmb_prod_idx);
5017 tw32_f(HOSTCC_MODE, tp->coal_now);
5023 static int tg3_poll_msix(struct napi_struct *napi, int budget)
5025 struct tg3_napi *tnapi = container_of(napi, struct tg3_napi, napi);
5026 struct tg3 *tp = tnapi->tp;
5028 struct tg3_hw_status *sblk = tnapi->hw_status;
5031 work_done = tg3_poll_work(tnapi, work_done, budget);
5033 if (unlikely(tp->tg3_flags & TG3_FLAG_TX_RECOVERY_PENDING))
5036 if (unlikely(work_done >= budget))
5039 /* tp->last_tag is used in tg3_int_reenable() below
5040 * to tell the hw how much work has been processed,
5041 * so we must read it before checking for more work.
5043 tnapi->last_tag = sblk->status_tag;
5044 tnapi->last_irq_tag = tnapi->last_tag;
5047 /* check for RX/TX work to do */
5048 if (likely(sblk->idx[0].tx_consumer == tnapi->tx_cons &&
5049 *(tnapi->rx_rcb_prod_idx) == tnapi->rx_rcb_ptr)) {
5050 napi_complete(napi);
5051 /* Reenable interrupts. */
5052 tw32_mailbox(tnapi->int_mbox, tnapi->last_tag << 24);
5061 /* work_done is guaranteed to be less than budget. */
5062 napi_complete(napi);
5063 schedule_work(&tp->reset_task);
5067 static int tg3_poll(struct napi_struct *napi, int budget)
5069 struct tg3_napi *tnapi = container_of(napi, struct tg3_napi, napi);
5070 struct tg3 *tp = tnapi->tp;
5072 struct tg3_hw_status *sblk = tnapi->hw_status;
5077 work_done = tg3_poll_work(tnapi, work_done, budget);
5079 if (unlikely(tp->tg3_flags & TG3_FLAG_TX_RECOVERY_PENDING))
5082 if (unlikely(work_done >= budget))
5085 if (tp->tg3_flags & TG3_FLAG_TAGGED_STATUS) {
5086 /* tp->last_tag is used in tg3_int_reenable() below
5087 * to tell the hw how much work has been processed,
5088 * so we must read it before checking for more work.
5090 tnapi->last_tag = sblk->status_tag;
5091 tnapi->last_irq_tag = tnapi->last_tag;
5094 sblk->status &= ~SD_STATUS_UPDATED;
5096 if (likely(!tg3_has_work(tnapi))) {
5097 napi_complete(napi);
5098 tg3_int_reenable(tnapi);
5106 /* work_done is guaranteed to be less than budget. */
5107 napi_complete(napi);
5108 schedule_work(&tp->reset_task);
5112 static void tg3_irq_quiesce(struct tg3 *tp)
5116 BUG_ON(tp->irq_sync);
5121 for (i = 0; i < tp->irq_cnt; i++)
5122 synchronize_irq(tp->napi[i].irq_vec);
5125 static inline int tg3_irq_sync(struct tg3 *tp)
5127 return tp->irq_sync;
5130 /* Fully shutdown all tg3 driver activity elsewhere in the system.
5131 * If irq_sync is non-zero, then the IRQ handler must be synchronized
5132 * with as well. Most of the time, this is not necessary except when
5133 * shutting down the device.
5135 static inline void tg3_full_lock(struct tg3 *tp, int irq_sync)
5137 spin_lock_bh(&tp->lock);
5139 tg3_irq_quiesce(tp);
5142 static inline void tg3_full_unlock(struct tg3 *tp)
5144 spin_unlock_bh(&tp->lock);
5147 /* One-shot MSI handler - Chip automatically disables interrupt
5148 * after sending MSI so driver doesn't have to do it.
5150 static irqreturn_t tg3_msi_1shot(int irq, void *dev_id)
5152 struct tg3_napi *tnapi = dev_id;
5153 struct tg3 *tp = tnapi->tp;
5155 prefetch(tnapi->hw_status);
5157 prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
5159 if (likely(!tg3_irq_sync(tp)))
5160 napi_schedule(&tnapi->napi);
5165 /* MSI ISR - No need to check for interrupt sharing and no need to
5166 * flush status block and interrupt mailbox. PCI ordering rules
5167 * guarantee that MSI will arrive after the status block.
5169 static irqreturn_t tg3_msi(int irq, void *dev_id)
5171 struct tg3_napi *tnapi = dev_id;
5172 struct tg3 *tp = tnapi->tp;
5174 prefetch(tnapi->hw_status);
5176 prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
5178 * Writing any value to intr-mbox-0 clears PCI INTA# and
5179 * chip-internal interrupt pending events.
5180 * Writing non-zero to intr-mbox-0 additional tells the
5181 * NIC to stop sending us irqs, engaging "in-intr-handler"
5184 tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
5185 if (likely(!tg3_irq_sync(tp)))
5186 napi_schedule(&tnapi->napi);
5188 return IRQ_RETVAL(1);
5191 static irqreturn_t tg3_interrupt(int irq, void *dev_id)
5193 struct tg3_napi *tnapi = dev_id;
5194 struct tg3 *tp = tnapi->tp;
5195 struct tg3_hw_status *sblk = tnapi->hw_status;
5196 unsigned int handled = 1;
5198 /* In INTx mode, it is possible for the interrupt to arrive at
5199 * the CPU before the status block posted prior to the interrupt.
5200 * Reading the PCI State register will confirm whether the
5201 * interrupt is ours and will flush the status block.
5203 if (unlikely(!(sblk->status & SD_STATUS_UPDATED))) {
5204 if ((tp->tg3_flags & TG3_FLAG_CHIP_RESETTING) ||
5205 (tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
5212 * Writing any value to intr-mbox-0 clears PCI INTA# and
5213 * chip-internal interrupt pending events.
5214 * Writing non-zero to intr-mbox-0 additional tells the
5215 * NIC to stop sending us irqs, engaging "in-intr-handler"
5218 * Flush the mailbox to de-assert the IRQ immediately to prevent
5219 * spurious interrupts. The flush impacts performance but
5220 * excessive spurious interrupts can be worse in some cases.
5222 tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
5223 if (tg3_irq_sync(tp))
5225 sblk->status &= ~SD_STATUS_UPDATED;
5226 if (likely(tg3_has_work(tnapi))) {
5227 prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
5228 napi_schedule(&tnapi->napi);
5230 /* No work, shared interrupt perhaps? re-enable
5231 * interrupts, and flush that PCI write
5233 tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
5237 return IRQ_RETVAL(handled);
5240 static irqreturn_t tg3_interrupt_tagged(int irq, void *dev_id)
5242 struct tg3_napi *tnapi = dev_id;
5243 struct tg3 *tp = tnapi->tp;
5244 struct tg3_hw_status *sblk = tnapi->hw_status;
5245 unsigned int handled = 1;
5247 /* In INTx mode, it is possible for the interrupt to arrive at
5248 * the CPU before the status block posted prior to the interrupt.
5249 * Reading the PCI State register will confirm whether the
5250 * interrupt is ours and will flush the status block.
5252 if (unlikely(sblk->status_tag == tnapi->last_irq_tag)) {
5253 if ((tp->tg3_flags & TG3_FLAG_CHIP_RESETTING) ||
5254 (tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
5261 * writing any value to intr-mbox-0 clears PCI INTA# and
5262 * chip-internal interrupt pending events.
5263 * writing non-zero to intr-mbox-0 additional tells the
5264 * NIC to stop sending us irqs, engaging "in-intr-handler"
5267 * Flush the mailbox to de-assert the IRQ immediately to prevent
5268 * spurious interrupts. The flush impacts performance but
5269 * excessive spurious interrupts can be worse in some cases.
5271 tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
5274 * In a shared interrupt configuration, sometimes other devices'
5275 * interrupts will scream. We record the current status tag here
5276 * so that the above check can report that the screaming interrupts
5277 * are unhandled. Eventually they will be silenced.
5279 tnapi->last_irq_tag = sblk->status_tag;
5281 if (tg3_irq_sync(tp))
5284 prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
5286 napi_schedule(&tnapi->napi);
5289 return IRQ_RETVAL(handled);
5292 /* ISR for interrupt test */
5293 static irqreturn_t tg3_test_isr(int irq, void *dev_id)
5295 struct tg3_napi *tnapi = dev_id;
5296 struct tg3 *tp = tnapi->tp;
5297 struct tg3_hw_status *sblk = tnapi->hw_status;
5299 if ((sblk->status & SD_STATUS_UPDATED) ||
5300 !(tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
5301 tg3_disable_ints(tp);
5302 return IRQ_RETVAL(1);
5304 return IRQ_RETVAL(0);
5307 static int tg3_init_hw(struct tg3 *, int);
5308 static int tg3_halt(struct tg3 *, int, int);
5310 /* Restart hardware after configuration changes, self-test, etc.
5311 * Invoked with tp->lock held.
5313 static int tg3_restart_hw(struct tg3 *tp, int reset_phy)
5314 __releases(tp->lock)
5315 __acquires(tp->lock)
5319 err = tg3_init_hw(tp, reset_phy);
5322 "Failed to re-initialize device, aborting\n");
5323 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
5324 tg3_full_unlock(tp);
5325 del_timer_sync(&tp->timer);
5327 tg3_napi_enable(tp);
5329 tg3_full_lock(tp, 0);
5334 #ifdef CONFIG_NET_POLL_CONTROLLER
5335 static void tg3_poll_controller(struct net_device *dev)
5338 struct tg3 *tp = netdev_priv(dev);
5340 for (i = 0; i < tp->irq_cnt; i++)
5341 tg3_interrupt(tp->napi[i].irq_vec, &tp->napi[i]);
5345 static void tg3_reset_task(struct work_struct *work)
5347 struct tg3 *tp = container_of(work, struct tg3, reset_task);
5349 unsigned int restart_timer;
5351 tg3_full_lock(tp, 0);
5353 if (!netif_running(tp->dev)) {
5354 tg3_full_unlock(tp);
5358 tg3_full_unlock(tp);
5364 tg3_full_lock(tp, 1);
5366 restart_timer = tp->tg3_flags2 & TG3_FLG2_RESTART_TIMER;
5367 tp->tg3_flags2 &= ~TG3_FLG2_RESTART_TIMER;
5369 if (tp->tg3_flags & TG3_FLAG_TX_RECOVERY_PENDING) {
5370 tp->write32_tx_mbox = tg3_write32_tx_mbox;
5371 tp->write32_rx_mbox = tg3_write_flush_reg32;
5372 tp->tg3_flags |= TG3_FLAG_MBOX_WRITE_REORDER;
5373 tp->tg3_flags &= ~TG3_FLAG_TX_RECOVERY_PENDING;
5376 tg3_halt(tp, RESET_KIND_SHUTDOWN, 0);
5377 err = tg3_init_hw(tp, 1);
5381 tg3_netif_start(tp);
5384 mod_timer(&tp->timer, jiffies + 1);
5387 tg3_full_unlock(tp);
5393 static void tg3_dump_short_state(struct tg3 *tp)
5395 netdev_err(tp->dev, "DEBUG: MAC_TX_STATUS[%08x] MAC_RX_STATUS[%08x]\n",
5396 tr32(MAC_TX_STATUS), tr32(MAC_RX_STATUS));
5397 netdev_err(tp->dev, "DEBUG: RDMAC_STATUS[%08x] WDMAC_STATUS[%08x]\n",
5398 tr32(RDMAC_STATUS), tr32(WDMAC_STATUS));
5401 static void tg3_tx_timeout(struct net_device *dev)
5403 struct tg3 *tp = netdev_priv(dev);
5405 if (netif_msg_tx_err(tp)) {
5406 netdev_err(dev, "transmit timed out, resetting\n");
5407 tg3_dump_short_state(tp);
5410 schedule_work(&tp->reset_task);
5413 /* Test for DMA buffers crossing any 4GB boundaries: 4G, 8G, etc */
5414 static inline int tg3_4g_overflow_test(dma_addr_t mapping, int len)
5416 u32 base = (u32) mapping & 0xffffffff;
5418 return ((base > 0xffffdcc0) &&
5419 (base + len + 8 < base));
5422 /* Test for DMA addresses > 40-bit */
5423 static inline int tg3_40bit_overflow_test(struct tg3 *tp, dma_addr_t mapping,
5426 #if defined(CONFIG_HIGHMEM) && (BITS_PER_LONG == 64)
5427 if (tp->tg3_flags & TG3_FLAG_40BIT_DMA_BUG)
5428 return (((u64) mapping + len) > DMA_BIT_MASK(40));
5435 static void tg3_set_txd(struct tg3_napi *, int, dma_addr_t, int, u32, u32);
5437 /* Workaround 4GB and 40-bit hardware DMA bugs. */
5438 static int tigon3_dma_hwbug_workaround(struct tg3_napi *tnapi,
5439 struct sk_buff *skb, u32 last_plus_one,
5440 u32 *start, u32 base_flags, u32 mss)
5442 struct tg3 *tp = tnapi->tp;
5443 struct sk_buff *new_skb;
5444 dma_addr_t new_addr = 0;
5448 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701)
5449 new_skb = skb_copy(skb, GFP_ATOMIC);
5451 int more_headroom = 4 - ((unsigned long)skb->data & 3);
5453 new_skb = skb_copy_expand(skb,
5454 skb_headroom(skb) + more_headroom,
5455 skb_tailroom(skb), GFP_ATOMIC);
5461 /* New SKB is guaranteed to be linear. */
5463 new_addr = pci_map_single(tp->pdev, new_skb->data, new_skb->len,
5465 /* Make sure the mapping succeeded */
5466 if (pci_dma_mapping_error(tp->pdev, new_addr)) {
5468 dev_kfree_skb(new_skb);
5471 /* Make sure new skb does not cross any 4G boundaries.
5472 * Drop the packet if it does.
5474 } else if ((tp->tg3_flags3 & TG3_FLG3_4G_DMA_BNDRY_BUG) &&
5475 tg3_4g_overflow_test(new_addr, new_skb->len)) {
5476 pci_unmap_single(tp->pdev, new_addr, new_skb->len,
5479 dev_kfree_skb(new_skb);
5482 tg3_set_txd(tnapi, entry, new_addr, new_skb->len,
5483 base_flags, 1 | (mss << 1));
5484 *start = NEXT_TX(entry);
5488 /* Now clean up the sw ring entries. */
5490 while (entry != last_plus_one) {
5494 len = skb_headlen(skb);
5496 len = skb_shinfo(skb)->frags[i-1].size;
5498 pci_unmap_single(tp->pdev,
5499 dma_unmap_addr(&tnapi->tx_buffers[entry],
5501 len, PCI_DMA_TODEVICE);
5503 tnapi->tx_buffers[entry].skb = new_skb;
5504 dma_unmap_addr_set(&tnapi->tx_buffers[entry], mapping,
5507 tnapi->tx_buffers[entry].skb = NULL;
5509 entry = NEXT_TX(entry);
5518 static void tg3_set_txd(struct tg3_napi *tnapi, int entry,
5519 dma_addr_t mapping, int len, u32 flags,
5522 struct tg3_tx_buffer_desc *txd = &tnapi->tx_ring[entry];
5523 int is_end = (mss_and_is_end & 0x1);
5524 u32 mss = (mss_and_is_end >> 1);
5528 flags |= TXD_FLAG_END;
5529 if (flags & TXD_FLAG_VLAN) {
5530 vlan_tag = flags >> 16;
5533 vlan_tag |= (mss << TXD_MSS_SHIFT);
5535 txd->addr_hi = ((u64) mapping >> 32);
5536 txd->addr_lo = ((u64) mapping & 0xffffffff);
5537 txd->len_flags = (len << TXD_LEN_SHIFT) | flags;
5538 txd->vlan_tag = vlan_tag << TXD_VLAN_TAG_SHIFT;
5541 /* hard_start_xmit for devices that don't have any bugs and
5542 * support TG3_FLG2_HW_TSO_2 and TG3_FLG2_HW_TSO_3 only.
5544 static netdev_tx_t tg3_start_xmit(struct sk_buff *skb,
5545 struct net_device *dev)
5547 struct tg3 *tp = netdev_priv(dev);
5548 u32 len, entry, base_flags, mss;
5550 struct tg3_napi *tnapi;
5551 struct netdev_queue *txq;
5552 unsigned int i, last;
5554 txq = netdev_get_tx_queue(dev, skb_get_queue_mapping(skb));
5555 tnapi = &tp->napi[skb_get_queue_mapping(skb)];
5556 if (tp->tg3_flags3 & TG3_FLG3_ENABLE_TSS)
5559 /* We are running in BH disabled context with netif_tx_lock
5560 * and TX reclaim runs via tp->napi.poll inside of a software
5561 * interrupt. Furthermore, IRQ processing runs lockless so we have
5562 * no IRQ context deadlocks to worry about either. Rejoice!
5564 if (unlikely(tg3_tx_avail(tnapi) <= (skb_shinfo(skb)->nr_frags + 1))) {
5565 if (!netif_tx_queue_stopped(txq)) {
5566 netif_tx_stop_queue(txq);
5568 /* This is a hard error, log it. */
5570 "BUG! Tx Ring full when queue awake!\n");
5572 return NETDEV_TX_BUSY;
5575 entry = tnapi->tx_prod;
5577 mss = skb_shinfo(skb)->gso_size;
5579 int tcp_opt_len, ip_tcp_len;
5582 if (skb_header_cloned(skb) &&
5583 pskb_expand_head(skb, 0, 0, GFP_ATOMIC)) {
5588 if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6)
5589 hdrlen = skb_headlen(skb) - ETH_HLEN;
5591 struct iphdr *iph = ip_hdr(skb);
5593 tcp_opt_len = tcp_optlen(skb);
5594 ip_tcp_len = ip_hdrlen(skb) + sizeof(struct tcphdr);
5597 iph->tot_len = htons(mss + ip_tcp_len + tcp_opt_len);
5598 hdrlen = ip_tcp_len + tcp_opt_len;
5601 if (tp->tg3_flags2 & TG3_FLG2_HW_TSO_3) {
5602 mss |= (hdrlen & 0xc) << 12;
5604 base_flags |= 0x00000010;
5605 base_flags |= (hdrlen & 0x3e0) << 5;
5609 base_flags |= (TXD_FLAG_CPU_PRE_DMA |
5610 TXD_FLAG_CPU_POST_DMA);
5612 tcp_hdr(skb)->check = 0;
5614 } else if (skb->ip_summed == CHECKSUM_PARTIAL) {
5615 base_flags |= TXD_FLAG_TCPUDP_CSUM;
5618 #if TG3_VLAN_TAG_USED
5619 if (tp->vlgrp != NULL && vlan_tx_tag_present(skb))
5620 base_flags |= (TXD_FLAG_VLAN |
5621 (vlan_tx_tag_get(skb) << 16));
5624 len = skb_headlen(skb);
5626 /* Queue skb data, a.k.a. the main skb fragment. */
5627 mapping = pci_map_single(tp->pdev, skb->data, len, PCI_DMA_TODEVICE);
5628 if (pci_dma_mapping_error(tp->pdev, mapping)) {
5633 tnapi->tx_buffers[entry].skb = skb;
5634 dma_unmap_addr_set(&tnapi->tx_buffers[entry], mapping, mapping);
5636 if ((tp->tg3_flags3 & TG3_FLG3_USE_JUMBO_BDFLAG) &&
5637 !mss && skb->len > ETH_DATA_LEN)
5638 base_flags |= TXD_FLAG_JMB_PKT;
5640 tg3_set_txd(tnapi, entry, mapping, len, base_flags,
5641 (skb_shinfo(skb)->nr_frags == 0) | (mss << 1));
5643 entry = NEXT_TX(entry);
5645 /* Now loop through additional data fragments, and queue them. */
5646 if (skb_shinfo(skb)->nr_frags > 0) {
5647 last = skb_shinfo(skb)->nr_frags - 1;
5648 for (i = 0; i <= last; i++) {
5649 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
5652 mapping = pci_map_page(tp->pdev,
5655 len, PCI_DMA_TODEVICE);
5656 if (pci_dma_mapping_error(tp->pdev, mapping))
5659 tnapi->tx_buffers[entry].skb = NULL;
5660 dma_unmap_addr_set(&tnapi->tx_buffers[entry], mapping,
5663 tg3_set_txd(tnapi, entry, mapping, len,
5664 base_flags, (i == last) | (mss << 1));
5666 entry = NEXT_TX(entry);
5670 /* Packets are ready, update Tx producer idx local and on card. */
5671 tw32_tx_mbox(tnapi->prodmbox, entry);
5673 tnapi->tx_prod = entry;
5674 if (unlikely(tg3_tx_avail(tnapi) <= (MAX_SKB_FRAGS + 1))) {
5675 netif_tx_stop_queue(txq);
5676 if (tg3_tx_avail(tnapi) > TG3_TX_WAKEUP_THRESH(tnapi))
5677 netif_tx_wake_queue(txq);
5683 return NETDEV_TX_OK;
5687 entry = tnapi->tx_prod;
5688 tnapi->tx_buffers[entry].skb = NULL;
5689 pci_unmap_single(tp->pdev,
5690 dma_unmap_addr(&tnapi->tx_buffers[entry], mapping),
5693 for (i = 0; i <= last; i++) {
5694 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
5695 entry = NEXT_TX(entry);
5697 pci_unmap_page(tp->pdev,
5698 dma_unmap_addr(&tnapi->tx_buffers[entry],
5700 frag->size, PCI_DMA_TODEVICE);
5704 return NETDEV_TX_OK;
5707 static netdev_tx_t tg3_start_xmit_dma_bug(struct sk_buff *,
5708 struct net_device *);
5710 /* Use GSO to workaround a rare TSO bug that may be triggered when the
5711 * TSO header is greater than 80 bytes.
5713 static int tg3_tso_bug(struct tg3 *tp, struct sk_buff *skb)
5715 struct sk_buff *segs, *nskb;
5716 u32 frag_cnt_est = skb_shinfo(skb)->gso_segs * 3;
5718 /* Estimate the number of fragments in the worst case */
5719 if (unlikely(tg3_tx_avail(&tp->napi[0]) <= frag_cnt_est)) {
5720 netif_stop_queue(tp->dev);
5721 if (tg3_tx_avail(&tp->napi[0]) <= frag_cnt_est)
5722 return NETDEV_TX_BUSY;
5724 netif_wake_queue(tp->dev);
5727 segs = skb_gso_segment(skb, tp->dev->features & ~NETIF_F_TSO);
5729 goto tg3_tso_bug_end;
5735 tg3_start_xmit_dma_bug(nskb, tp->dev);
5741 return NETDEV_TX_OK;
5744 /* hard_start_xmit for devices that have the 4G bug and/or 40-bit bug and
5745 * support TG3_FLG2_HW_TSO_1 or firmware TSO only.
5747 static netdev_tx_t tg3_start_xmit_dma_bug(struct sk_buff *skb,
5748 struct net_device *dev)
5750 struct tg3 *tp = netdev_priv(dev);
5751 u32 len, entry, base_flags, mss;
5752 int would_hit_hwbug;
5754 struct tg3_napi *tnapi;
5755 struct netdev_queue *txq;
5756 unsigned int i, last;
5758 txq = netdev_get_tx_queue(dev, skb_get_queue_mapping(skb));
5759 tnapi = &tp->napi[skb_get_queue_mapping(skb)];
5760 if (tp->tg3_flags3 & TG3_FLG3_ENABLE_TSS)
5763 /* We are running in BH disabled context with netif_tx_lock