2 * tg3.c: Broadcom Tigon3 ethernet driver.
4 * Copyright (C) 2001, 2002, 2003, 2004 David S. Miller (davem@redhat.com)
5 * Copyright (C) 2001, 2002, 2003 Jeff Garzik (jgarzik@pobox.com)
6 * Copyright (C) 2004 Sun Microsystems Inc.
7 * Copyright (C) 2005-2010 Broadcom Corporation.
10 * Derived from proprietary unpublished source code,
11 * Copyright (C) 2000-2003 Broadcom Corporation.
13 * Permission is hereby granted for the distribution of this firmware
14 * data in hexadecimal or equivalent format, provided this copyright
15 * notice is accompanying it.
19 #include <linux/module.h>
20 #include <linux/moduleparam.h>
21 #include <linux/kernel.h>
22 #include <linux/types.h>
23 #include <linux/compiler.h>
24 #include <linux/slab.h>
25 #include <linux/delay.h>
27 #include <linux/init.h>
28 #include <linux/ioport.h>
29 #include <linux/pci.h>
30 #include <linux/netdevice.h>
31 #include <linux/etherdevice.h>
32 #include <linux/skbuff.h>
33 #include <linux/ethtool.h>
34 #include <linux/mii.h>
35 #include <linux/phy.h>
36 #include <linux/brcmphy.h>
37 #include <linux/if_vlan.h>
39 #include <linux/tcp.h>
40 #include <linux/workqueue.h>
41 #include <linux/prefetch.h>
42 #include <linux/dma-mapping.h>
43 #include <linux/firmware.h>
45 #include <net/checksum.h>
48 #include <asm/system.h>
50 #include <asm/byteorder.h>
51 #include <asm/uaccess.h>
54 #include <asm/idprom.h>
61 #if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE)
62 #define TG3_VLAN_TAG_USED 1
64 #define TG3_VLAN_TAG_USED 0
69 #define DRV_MODULE_NAME "tg3"
70 #define PFX DRV_MODULE_NAME ": "
71 #define DRV_MODULE_VERSION "3.106"
72 #define DRV_MODULE_RELDATE "January 12, 2010"
74 #define TG3_DEF_MAC_MODE 0
75 #define TG3_DEF_RX_MODE 0
76 #define TG3_DEF_TX_MODE 0
77 #define TG3_DEF_MSG_ENABLE \
87 /* length of time before we decide the hardware is borked,
88 * and dev->tx_timeout() should be called to fix the problem
90 #define TG3_TX_TIMEOUT (5 * HZ)
92 /* hardware minimum and maximum for a single frame's data payload */
93 #define TG3_MIN_MTU 60
94 #define TG3_MAX_MTU(tp) \
95 ((tp->tg3_flags & TG3_FLAG_JUMBO_CAPABLE) ? 9000 : 1500)
97 /* These numbers seem to be hard coded in the NIC firmware somehow.
98 * You can't change the ring sizes, but you can change where you place
99 * them in the NIC onboard memory.
101 #define TG3_RX_RING_SIZE 512
102 #define TG3_DEF_RX_RING_PENDING 200
103 #define TG3_RX_JUMBO_RING_SIZE 256
104 #define TG3_DEF_RX_JUMBO_RING_PENDING 100
105 #define TG3_RSS_INDIR_TBL_SIZE 128
107 /* Do not place this n-ring entries value into the tp struct itself,
108 * we really want to expose these constants to GCC so that modulo et
109 * al. operations are done with shifts and masks instead of with
110 * hw multiply/modulo instructions. Another solution would be to
111 * replace things like '% foo' with '& (foo - 1)'.
113 #define TG3_RX_RCB_RING_SIZE(tp) \
114 (((tp->tg3_flags & TG3_FLAG_JUMBO_CAPABLE) && \
115 !(tp->tg3_flags2 & TG3_FLG2_5780_CLASS)) ? 1024 : 512)
117 #define TG3_TX_RING_SIZE 512
118 #define TG3_DEF_TX_RING_PENDING (TG3_TX_RING_SIZE - 1)
120 #define TG3_RX_RING_BYTES (sizeof(struct tg3_rx_buffer_desc) * \
122 #define TG3_RX_JUMBO_RING_BYTES (sizeof(struct tg3_ext_rx_buffer_desc) * \
123 TG3_RX_JUMBO_RING_SIZE)
124 #define TG3_RX_RCB_RING_BYTES(tp) (sizeof(struct tg3_rx_buffer_desc) * \
125 TG3_RX_RCB_RING_SIZE(tp))
126 #define TG3_TX_RING_BYTES (sizeof(struct tg3_tx_buffer_desc) * \
128 #define NEXT_TX(N) (((N) + 1) & (TG3_TX_RING_SIZE - 1))
130 #define TG3_DMA_BYTE_ENAB 64
132 #define TG3_RX_STD_DMA_SZ 1536
133 #define TG3_RX_JMB_DMA_SZ 9046
135 #define TG3_RX_DMA_TO_MAP_SZ(x) ((x) + TG3_DMA_BYTE_ENAB)
137 #define TG3_RX_STD_MAP_SZ TG3_RX_DMA_TO_MAP_SZ(TG3_RX_STD_DMA_SZ)
138 #define TG3_RX_JMB_MAP_SZ TG3_RX_DMA_TO_MAP_SZ(TG3_RX_JMB_DMA_SZ)
140 #define TG3_RX_STD_BUFF_RING_SIZE \
141 (sizeof(struct ring_info) * TG3_RX_RING_SIZE)
143 #define TG3_RX_JMB_BUFF_RING_SIZE \
144 (sizeof(struct ring_info) * TG3_RX_JUMBO_RING_SIZE)
146 /* minimum number of free TX descriptors required to wake up TX process */
147 #define TG3_TX_WAKEUP_THRESH(tnapi) ((tnapi)->tx_pending / 4)
149 #define TG3_RAW_IP_ALIGN 2
151 /* number of ETHTOOL_GSTATS u64's */
152 #define TG3_NUM_STATS (sizeof(struct tg3_ethtool_stats)/sizeof(u64))
154 #define TG3_NUM_TEST 6
156 #define FIRMWARE_TG3 "tigon/tg3.bin"
157 #define FIRMWARE_TG3TSO "tigon/tg3_tso.bin"
158 #define FIRMWARE_TG3TSO5 "tigon/tg3_tso5.bin"
160 static char version[] __devinitdata =
161 DRV_MODULE_NAME ".c:v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
163 MODULE_AUTHOR("David S. Miller (davem@redhat.com) and Jeff Garzik (jgarzik@pobox.com)");
164 MODULE_DESCRIPTION("Broadcom Tigon3 ethernet driver");
165 MODULE_LICENSE("GPL");
166 MODULE_VERSION(DRV_MODULE_VERSION);
167 MODULE_FIRMWARE(FIRMWARE_TG3);
168 MODULE_FIRMWARE(FIRMWARE_TG3TSO);
169 MODULE_FIRMWARE(FIRMWARE_TG3TSO5);
171 #define TG3_RSS_MIN_NUM_MSIX_VECS 2
173 static int tg3_debug = -1; /* -1 == use TG3_DEF_MSG_ENABLE as value */
174 module_param(tg3_debug, int, 0);
175 MODULE_PARM_DESC(tg3_debug, "Tigon3 bitmapped debugging message enable value");
177 static DEFINE_PCI_DEVICE_TABLE(tg3_pci_tbl) = {
178 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5700)},
179 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5701)},
180 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702)},
181 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703)},
182 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704)},
183 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702FE)},
184 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705)},
185 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705_2)},
186 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705M)},
187 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705M_2)},
188 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702X)},
189 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703X)},
190 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704S)},
191 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702A3)},
192 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703A3)},
193 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5782)},
194 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5788)},
195 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5789)},
196 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5901)},
197 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5901_2)},
198 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704S_2)},
199 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705F)},
200 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5720)},
201 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5721)},
202 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5722)},
203 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5750)},
204 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751)},
205 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5750M)},
206 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751M)},
207 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751F)},
208 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5752)},
209 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5752M)},
210 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753)},
211 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753M)},
212 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753F)},
213 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5754)},
214 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5754M)},
215 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5755)},
216 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5755M)},
217 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5756)},
218 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5786)},
219 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787)},
220 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787M)},
221 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787F)},
222 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5714)},
223 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5714S)},
224 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5715)},
225 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5715S)},
226 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5780)},
227 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5780S)},
228 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5781)},
229 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5906)},
230 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5906M)},
231 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5784)},
232 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5764)},
233 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5723)},
234 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5761)},
235 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5761E)},
236 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5761S)},
237 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5761SE)},
238 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5785_G)},
239 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5785_F)},
240 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57780)},
241 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57760)},
242 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57790)},
243 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57788)},
244 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5717)},
245 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5718)},
246 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5724)},
247 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57781)},
248 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57785)},
249 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57761)},
250 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57765)},
251 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57791)},
252 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57795)},
253 {PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9DXX)},
254 {PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9MXX)},
255 {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1000)},
256 {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1001)},
257 {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1003)},
258 {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC9100)},
259 {PCI_DEVICE(PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_TIGON3)},
263 MODULE_DEVICE_TABLE(pci, tg3_pci_tbl);
265 static const struct {
266 const char string[ETH_GSTRING_LEN];
267 } ethtool_stats_keys[TG3_NUM_STATS] = {
270 { "rx_ucast_packets" },
271 { "rx_mcast_packets" },
272 { "rx_bcast_packets" },
274 { "rx_align_errors" },
275 { "rx_xon_pause_rcvd" },
276 { "rx_xoff_pause_rcvd" },
277 { "rx_mac_ctrl_rcvd" },
278 { "rx_xoff_entered" },
279 { "rx_frame_too_long_errors" },
281 { "rx_undersize_packets" },
282 { "rx_in_length_errors" },
283 { "rx_out_length_errors" },
284 { "rx_64_or_less_octet_packets" },
285 { "rx_65_to_127_octet_packets" },
286 { "rx_128_to_255_octet_packets" },
287 { "rx_256_to_511_octet_packets" },
288 { "rx_512_to_1023_octet_packets" },
289 { "rx_1024_to_1522_octet_packets" },
290 { "rx_1523_to_2047_octet_packets" },
291 { "rx_2048_to_4095_octet_packets" },
292 { "rx_4096_to_8191_octet_packets" },
293 { "rx_8192_to_9022_octet_packets" },
300 { "tx_flow_control" },
302 { "tx_single_collisions" },
303 { "tx_mult_collisions" },
305 { "tx_excessive_collisions" },
306 { "tx_late_collisions" },
307 { "tx_collide_2times" },
308 { "tx_collide_3times" },
309 { "tx_collide_4times" },
310 { "tx_collide_5times" },
311 { "tx_collide_6times" },
312 { "tx_collide_7times" },
313 { "tx_collide_8times" },
314 { "tx_collide_9times" },
315 { "tx_collide_10times" },
316 { "tx_collide_11times" },
317 { "tx_collide_12times" },
318 { "tx_collide_13times" },
319 { "tx_collide_14times" },
320 { "tx_collide_15times" },
321 { "tx_ucast_packets" },
322 { "tx_mcast_packets" },
323 { "tx_bcast_packets" },
324 { "tx_carrier_sense_errors" },
328 { "dma_writeq_full" },
329 { "dma_write_prioq_full" },
333 { "rx_threshold_hit" },
335 { "dma_readq_full" },
336 { "dma_read_prioq_full" },
337 { "tx_comp_queue_full" },
339 { "ring_set_send_prod_index" },
340 { "ring_status_update" },
342 { "nic_avoided_irqs" },
343 { "nic_tx_threshold_hit" }
346 static const struct {
347 const char string[ETH_GSTRING_LEN];
348 } ethtool_test_keys[TG3_NUM_TEST] = {
349 { "nvram test (online) " },
350 { "link test (online) " },
351 { "register test (offline)" },
352 { "memory test (offline)" },
353 { "loopback test (offline)" },
354 { "interrupt test (offline)" },
357 static void tg3_write32(struct tg3 *tp, u32 off, u32 val)
359 writel(val, tp->regs + off);
362 static u32 tg3_read32(struct tg3 *tp, u32 off)
364 return (readl(tp->regs + off));
367 static void tg3_ape_write32(struct tg3 *tp, u32 off, u32 val)
369 writel(val, tp->aperegs + off);
372 static u32 tg3_ape_read32(struct tg3 *tp, u32 off)
374 return (readl(tp->aperegs + off));
377 static void tg3_write_indirect_reg32(struct tg3 *tp, u32 off, u32 val)
381 spin_lock_irqsave(&tp->indirect_lock, flags);
382 pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off);
383 pci_write_config_dword(tp->pdev, TG3PCI_REG_DATA, val);
384 spin_unlock_irqrestore(&tp->indirect_lock, flags);
387 static void tg3_write_flush_reg32(struct tg3 *tp, u32 off, u32 val)
389 writel(val, tp->regs + off);
390 readl(tp->regs + off);
393 static u32 tg3_read_indirect_reg32(struct tg3 *tp, u32 off)
398 spin_lock_irqsave(&tp->indirect_lock, flags);
399 pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off);
400 pci_read_config_dword(tp->pdev, TG3PCI_REG_DATA, &val);
401 spin_unlock_irqrestore(&tp->indirect_lock, flags);
405 static void tg3_write_indirect_mbox(struct tg3 *tp, u32 off, u32 val)
409 if (off == (MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW)) {
410 pci_write_config_dword(tp->pdev, TG3PCI_RCV_RET_RING_CON_IDX +
411 TG3_64BIT_REG_LOW, val);
414 if (off == TG3_RX_STD_PROD_IDX_REG) {
415 pci_write_config_dword(tp->pdev, TG3PCI_STD_RING_PROD_IDX +
416 TG3_64BIT_REG_LOW, val);
420 spin_lock_irqsave(&tp->indirect_lock, flags);
421 pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off + 0x5600);
422 pci_write_config_dword(tp->pdev, TG3PCI_REG_DATA, val);
423 spin_unlock_irqrestore(&tp->indirect_lock, flags);
425 /* In indirect mode when disabling interrupts, we also need
426 * to clear the interrupt bit in the GRC local ctrl register.
428 if ((off == (MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW)) &&
430 pci_write_config_dword(tp->pdev, TG3PCI_MISC_LOCAL_CTRL,
431 tp->grc_local_ctrl|GRC_LCLCTRL_CLEARINT);
435 static u32 tg3_read_indirect_mbox(struct tg3 *tp, u32 off)
440 spin_lock_irqsave(&tp->indirect_lock, flags);
441 pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off + 0x5600);
442 pci_read_config_dword(tp->pdev, TG3PCI_REG_DATA, &val);
443 spin_unlock_irqrestore(&tp->indirect_lock, flags);
447 /* usec_wait specifies the wait time in usec when writing to certain registers
448 * where it is unsafe to read back the register without some delay.
449 * GRC_LOCAL_CTRL is one example if the GPIOs are toggled to switch power.
450 * TG3PCI_CLOCK_CTRL is another example if the clock frequencies are changed.
452 static void _tw32_flush(struct tg3 *tp, u32 off, u32 val, u32 usec_wait)
454 if ((tp->tg3_flags & TG3_FLAG_PCIX_TARGET_HWBUG) ||
455 (tp->tg3_flags2 & TG3_FLG2_ICH_WORKAROUND))
456 /* Non-posted methods */
457 tp->write32(tp, off, val);
460 tg3_write32(tp, off, val);
465 /* Wait again after the read for the posted method to guarantee that
466 * the wait time is met.
472 static inline void tw32_mailbox_flush(struct tg3 *tp, u32 off, u32 val)
474 tp->write32_mbox(tp, off, val);
475 if (!(tp->tg3_flags & TG3_FLAG_MBOX_WRITE_REORDER) &&
476 !(tp->tg3_flags2 & TG3_FLG2_ICH_WORKAROUND))
477 tp->read32_mbox(tp, off);
480 static void tg3_write32_tx_mbox(struct tg3 *tp, u32 off, u32 val)
482 void __iomem *mbox = tp->regs + off;
484 if (tp->tg3_flags & TG3_FLAG_TXD_MBOX_HWBUG)
486 if (tp->tg3_flags & TG3_FLAG_MBOX_WRITE_REORDER)
490 static u32 tg3_read32_mbox_5906(struct tg3 *tp, u32 off)
492 return (readl(tp->regs + off + GRCMBOX_BASE));
495 static void tg3_write32_mbox_5906(struct tg3 *tp, u32 off, u32 val)
497 writel(val, tp->regs + off + GRCMBOX_BASE);
500 #define tw32_mailbox(reg, val) tp->write32_mbox(tp, reg, val)
501 #define tw32_mailbox_f(reg, val) tw32_mailbox_flush(tp, (reg), (val))
502 #define tw32_rx_mbox(reg, val) tp->write32_rx_mbox(tp, reg, val)
503 #define tw32_tx_mbox(reg, val) tp->write32_tx_mbox(tp, reg, val)
504 #define tr32_mailbox(reg) tp->read32_mbox(tp, reg)
506 #define tw32(reg,val) tp->write32(tp, reg, val)
507 #define tw32_f(reg,val) _tw32_flush(tp,(reg),(val), 0)
508 #define tw32_wait_f(reg,val,us) _tw32_flush(tp,(reg),(val), (us))
509 #define tr32(reg) tp->read32(tp, reg)
511 static void tg3_write_mem(struct tg3 *tp, u32 off, u32 val)
515 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) &&
516 (off >= NIC_SRAM_STATS_BLK) && (off < NIC_SRAM_TX_BUFFER_DESC))
519 spin_lock_irqsave(&tp->indirect_lock, flags);
520 if (tp->tg3_flags & TG3_FLAG_SRAM_USE_CONFIG) {
521 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off);
522 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
524 /* Always leave this as zero. */
525 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
527 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, off);
528 tw32_f(TG3PCI_MEM_WIN_DATA, val);
530 /* Always leave this as zero. */
531 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, 0);
533 spin_unlock_irqrestore(&tp->indirect_lock, flags);
536 static void tg3_read_mem(struct tg3 *tp, u32 off, u32 *val)
540 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) &&
541 (off >= NIC_SRAM_STATS_BLK) && (off < NIC_SRAM_TX_BUFFER_DESC)) {
546 spin_lock_irqsave(&tp->indirect_lock, flags);
547 if (tp->tg3_flags & TG3_FLAG_SRAM_USE_CONFIG) {
548 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off);
549 pci_read_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
551 /* Always leave this as zero. */
552 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
554 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, off);
555 *val = tr32(TG3PCI_MEM_WIN_DATA);
557 /* Always leave this as zero. */
558 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, 0);
560 spin_unlock_irqrestore(&tp->indirect_lock, flags);
563 static void tg3_ape_lock_init(struct tg3 *tp)
567 /* Make sure the driver hasn't any stale locks. */
568 for (i = 0; i < 8; i++)
569 tg3_ape_write32(tp, TG3_APE_LOCK_GRANT + 4 * i,
570 APE_LOCK_GRANT_DRIVER);
573 static int tg3_ape_lock(struct tg3 *tp, int locknum)
579 if (!(tp->tg3_flags3 & TG3_FLG3_ENABLE_APE))
583 case TG3_APE_LOCK_GRC:
584 case TG3_APE_LOCK_MEM:
592 tg3_ape_write32(tp, TG3_APE_LOCK_REQ + off, APE_LOCK_REQ_DRIVER);
594 /* Wait for up to 1 millisecond to acquire lock. */
595 for (i = 0; i < 100; i++) {
596 status = tg3_ape_read32(tp, TG3_APE_LOCK_GRANT + off);
597 if (status == APE_LOCK_GRANT_DRIVER)
602 if (status != APE_LOCK_GRANT_DRIVER) {
603 /* Revoke the lock request. */
604 tg3_ape_write32(tp, TG3_APE_LOCK_GRANT + off,
605 APE_LOCK_GRANT_DRIVER);
613 static void tg3_ape_unlock(struct tg3 *tp, int locknum)
617 if (!(tp->tg3_flags3 & TG3_FLG3_ENABLE_APE))
621 case TG3_APE_LOCK_GRC:
622 case TG3_APE_LOCK_MEM:
629 tg3_ape_write32(tp, TG3_APE_LOCK_GRANT + off, APE_LOCK_GRANT_DRIVER);
632 static void tg3_disable_ints(struct tg3 *tp)
636 tw32(TG3PCI_MISC_HOST_CTRL,
637 (tp->misc_host_ctrl | MISC_HOST_CTRL_MASK_PCI_INT));
638 for (i = 0; i < tp->irq_max; i++)
639 tw32_mailbox_f(tp->napi[i].int_mbox, 0x00000001);
642 static void tg3_enable_ints(struct tg3 *tp)
650 tw32(TG3PCI_MISC_HOST_CTRL,
651 (tp->misc_host_ctrl & ~MISC_HOST_CTRL_MASK_PCI_INT));
653 for (i = 0; i < tp->irq_cnt; i++) {
654 struct tg3_napi *tnapi = &tp->napi[i];
655 tw32_mailbox_f(tnapi->int_mbox, tnapi->last_tag << 24);
656 if (tp->tg3_flags2 & TG3_FLG2_1SHOT_MSI)
657 tw32_mailbox_f(tnapi->int_mbox, tnapi->last_tag << 24);
659 coal_now |= tnapi->coal_now;
662 /* Force an initial interrupt */
663 if (!(tp->tg3_flags & TG3_FLAG_TAGGED_STATUS) &&
664 (tp->napi[0].hw_status->status & SD_STATUS_UPDATED))
665 tw32(GRC_LOCAL_CTRL, tp->grc_local_ctrl | GRC_LCLCTRL_SETINT);
667 tw32(HOSTCC_MODE, tp->coalesce_mode |
668 HOSTCC_MODE_ENABLE | coal_now);
671 static inline unsigned int tg3_has_work(struct tg3_napi *tnapi)
673 struct tg3 *tp = tnapi->tp;
674 struct tg3_hw_status *sblk = tnapi->hw_status;
675 unsigned int work_exists = 0;
677 /* check for phy events */
678 if (!(tp->tg3_flags &
679 (TG3_FLAG_USE_LINKCHG_REG |
680 TG3_FLAG_POLL_SERDES))) {
681 if (sblk->status & SD_STATUS_LINK_CHG)
684 /* check for RX/TX work to do */
685 if (sblk->idx[0].tx_consumer != tnapi->tx_cons ||
686 *(tnapi->rx_rcb_prod_idx) != tnapi->rx_rcb_ptr)
693 * similar to tg3_enable_ints, but it accurately determines whether there
694 * is new work pending and can return without flushing the PIO write
695 * which reenables interrupts
697 static void tg3_int_reenable(struct tg3_napi *tnapi)
699 struct tg3 *tp = tnapi->tp;
701 tw32_mailbox(tnapi->int_mbox, tnapi->last_tag << 24);
704 /* When doing tagged status, this work check is unnecessary.
705 * The last_tag we write above tells the chip which piece of
706 * work we've completed.
708 if (!(tp->tg3_flags & TG3_FLAG_TAGGED_STATUS) &&
710 tw32(HOSTCC_MODE, tp->coalesce_mode |
711 HOSTCC_MODE_ENABLE | tnapi->coal_now);
714 static void tg3_napi_disable(struct tg3 *tp)
718 for (i = tp->irq_cnt - 1; i >= 0; i--)
719 napi_disable(&tp->napi[i].napi);
722 static void tg3_napi_enable(struct tg3 *tp)
726 for (i = 0; i < tp->irq_cnt; i++)
727 napi_enable(&tp->napi[i].napi);
730 static inline void tg3_netif_stop(struct tg3 *tp)
732 tp->dev->trans_start = jiffies; /* prevent tx timeout */
733 tg3_napi_disable(tp);
734 netif_tx_disable(tp->dev);
737 static inline void tg3_netif_start(struct tg3 *tp)
739 /* NOTE: unconditional netif_tx_wake_all_queues is only
740 * appropriate so long as all callers are assured to
741 * have free tx slots (such as after tg3_init_hw)
743 netif_tx_wake_all_queues(tp->dev);
746 tp->napi[0].hw_status->status |= SD_STATUS_UPDATED;
750 static void tg3_switch_clocks(struct tg3 *tp)
755 if ((tp->tg3_flags & TG3_FLAG_CPMU_PRESENT) ||
756 (tp->tg3_flags2 & TG3_FLG2_5780_CLASS))
759 clock_ctrl = tr32(TG3PCI_CLOCK_CTRL);
761 orig_clock_ctrl = clock_ctrl;
762 clock_ctrl &= (CLOCK_CTRL_FORCE_CLKRUN |
763 CLOCK_CTRL_CLKRUN_OENABLE |
765 tp->pci_clock_ctrl = clock_ctrl;
767 if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
768 if (orig_clock_ctrl & CLOCK_CTRL_625_CORE) {
769 tw32_wait_f(TG3PCI_CLOCK_CTRL,
770 clock_ctrl | CLOCK_CTRL_625_CORE, 40);
772 } else if ((orig_clock_ctrl & CLOCK_CTRL_44MHZ_CORE) != 0) {
773 tw32_wait_f(TG3PCI_CLOCK_CTRL,
775 (CLOCK_CTRL_44MHZ_CORE | CLOCK_CTRL_ALTCLK),
777 tw32_wait_f(TG3PCI_CLOCK_CTRL,
778 clock_ctrl | (CLOCK_CTRL_ALTCLK),
781 tw32_wait_f(TG3PCI_CLOCK_CTRL, clock_ctrl, 40);
784 #define PHY_BUSY_LOOPS 5000
786 static int tg3_readphy(struct tg3 *tp, int reg, u32 *val)
792 if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
794 (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
800 frame_val = ((tp->phy_addr << MI_COM_PHY_ADDR_SHIFT) &
801 MI_COM_PHY_ADDR_MASK);
802 frame_val |= ((reg << MI_COM_REG_ADDR_SHIFT) &
803 MI_COM_REG_ADDR_MASK);
804 frame_val |= (MI_COM_CMD_READ | MI_COM_START);
806 tw32_f(MAC_MI_COM, frame_val);
808 loops = PHY_BUSY_LOOPS;
811 frame_val = tr32(MAC_MI_COM);
813 if ((frame_val & MI_COM_BUSY) == 0) {
815 frame_val = tr32(MAC_MI_COM);
823 *val = frame_val & MI_COM_DATA_MASK;
827 if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
828 tw32_f(MAC_MI_MODE, tp->mi_mode);
835 static int tg3_writephy(struct tg3 *tp, int reg, u32 val)
841 if ((tp->tg3_flags3 & TG3_FLG3_PHY_IS_FET) &&
842 (reg == MII_TG3_CTRL || reg == MII_TG3_AUX_CTRL))
845 if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
847 (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
851 frame_val = ((tp->phy_addr << MI_COM_PHY_ADDR_SHIFT) &
852 MI_COM_PHY_ADDR_MASK);
853 frame_val |= ((reg << MI_COM_REG_ADDR_SHIFT) &
854 MI_COM_REG_ADDR_MASK);
855 frame_val |= (val & MI_COM_DATA_MASK);
856 frame_val |= (MI_COM_CMD_WRITE | MI_COM_START);
858 tw32_f(MAC_MI_COM, frame_val);
860 loops = PHY_BUSY_LOOPS;
863 frame_val = tr32(MAC_MI_COM);
864 if ((frame_val & MI_COM_BUSY) == 0) {
866 frame_val = tr32(MAC_MI_COM);
876 if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
877 tw32_f(MAC_MI_MODE, tp->mi_mode);
884 static int tg3_bmcr_reset(struct tg3 *tp)
889 /* OK, reset it, and poll the BMCR_RESET bit until it
890 * clears or we time out.
892 phy_control = BMCR_RESET;
893 err = tg3_writephy(tp, MII_BMCR, phy_control);
899 err = tg3_readphy(tp, MII_BMCR, &phy_control);
903 if ((phy_control & BMCR_RESET) == 0) {
915 static int tg3_mdio_read(struct mii_bus *bp, int mii_id, int reg)
917 struct tg3 *tp = bp->priv;
920 spin_lock_bh(&tp->lock);
922 if (tg3_readphy(tp, reg, &val))
925 spin_unlock_bh(&tp->lock);
930 static int tg3_mdio_write(struct mii_bus *bp, int mii_id, int reg, u16 val)
932 struct tg3 *tp = bp->priv;
935 spin_lock_bh(&tp->lock);
937 if (tg3_writephy(tp, reg, val))
940 spin_unlock_bh(&tp->lock);
945 static int tg3_mdio_reset(struct mii_bus *bp)
950 static void tg3_mdio_config_5785(struct tg3 *tp)
953 struct phy_device *phydev;
955 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
956 switch (phydev->drv->phy_id & phydev->drv->phy_id_mask) {
957 case TG3_PHY_ID_BCM50610:
958 case TG3_PHY_ID_BCM50610M:
959 val = MAC_PHYCFG2_50610_LED_MODES;
961 case TG3_PHY_ID_BCMAC131:
962 val = MAC_PHYCFG2_AC131_LED_MODES;
964 case TG3_PHY_ID_RTL8211C:
965 val = MAC_PHYCFG2_RTL8211C_LED_MODES;
967 case TG3_PHY_ID_RTL8201E:
968 val = MAC_PHYCFG2_RTL8201E_LED_MODES;
974 if (phydev->interface != PHY_INTERFACE_MODE_RGMII) {
975 tw32(MAC_PHYCFG2, val);
977 val = tr32(MAC_PHYCFG1);
978 val &= ~(MAC_PHYCFG1_RGMII_INT |
979 MAC_PHYCFG1_RXCLK_TO_MASK | MAC_PHYCFG1_TXCLK_TO_MASK);
980 val |= MAC_PHYCFG1_RXCLK_TIMEOUT | MAC_PHYCFG1_TXCLK_TIMEOUT;
981 tw32(MAC_PHYCFG1, val);
986 if (!(tp->tg3_flags3 & TG3_FLG3_RGMII_STD_IBND_DISABLE))
987 val |= MAC_PHYCFG2_EMODE_MASK_MASK |
988 MAC_PHYCFG2_FMODE_MASK_MASK |
989 MAC_PHYCFG2_GMODE_MASK_MASK |
990 MAC_PHYCFG2_ACT_MASK_MASK |
991 MAC_PHYCFG2_QUAL_MASK_MASK |
992 MAC_PHYCFG2_INBAND_ENABLE;
994 tw32(MAC_PHYCFG2, val);
996 val = tr32(MAC_PHYCFG1);
997 val &= ~(MAC_PHYCFG1_RXCLK_TO_MASK | MAC_PHYCFG1_TXCLK_TO_MASK |
998 MAC_PHYCFG1_RGMII_EXT_RX_DEC | MAC_PHYCFG1_RGMII_SND_STAT_EN);
999 if (!(tp->tg3_flags3 & TG3_FLG3_RGMII_STD_IBND_DISABLE)) {
1000 if (tp->tg3_flags3 & TG3_FLG3_RGMII_EXT_IBND_RX_EN)
1001 val |= MAC_PHYCFG1_RGMII_EXT_RX_DEC;
1002 if (tp->tg3_flags3 & TG3_FLG3_RGMII_EXT_IBND_TX_EN)
1003 val |= MAC_PHYCFG1_RGMII_SND_STAT_EN;
1005 val |= MAC_PHYCFG1_RXCLK_TIMEOUT | MAC_PHYCFG1_TXCLK_TIMEOUT |
1006 MAC_PHYCFG1_RGMII_INT | MAC_PHYCFG1_TXC_DRV;
1007 tw32(MAC_PHYCFG1, val);
1009 val = tr32(MAC_EXT_RGMII_MODE);
1010 val &= ~(MAC_RGMII_MODE_RX_INT_B |
1011 MAC_RGMII_MODE_RX_QUALITY |
1012 MAC_RGMII_MODE_RX_ACTIVITY |
1013 MAC_RGMII_MODE_RX_ENG_DET |
1014 MAC_RGMII_MODE_TX_ENABLE |
1015 MAC_RGMII_MODE_TX_LOWPWR |
1016 MAC_RGMII_MODE_TX_RESET);
1017 if (!(tp->tg3_flags3 & TG3_FLG3_RGMII_STD_IBND_DISABLE)) {
1018 if (tp->tg3_flags3 & TG3_FLG3_RGMII_EXT_IBND_RX_EN)
1019 val |= MAC_RGMII_MODE_RX_INT_B |
1020 MAC_RGMII_MODE_RX_QUALITY |
1021 MAC_RGMII_MODE_RX_ACTIVITY |
1022 MAC_RGMII_MODE_RX_ENG_DET;
1023 if (tp->tg3_flags3 & TG3_FLG3_RGMII_EXT_IBND_TX_EN)
1024 val |= MAC_RGMII_MODE_TX_ENABLE |
1025 MAC_RGMII_MODE_TX_LOWPWR |
1026 MAC_RGMII_MODE_TX_RESET;
1028 tw32(MAC_EXT_RGMII_MODE, val);
1031 static void tg3_mdio_start(struct tg3 *tp)
1033 tp->mi_mode &= ~MAC_MI_MODE_AUTO_POLL;
1034 tw32_f(MAC_MI_MODE, tp->mi_mode);
1037 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717) {
1038 u32 funcnum, is_serdes;
1040 funcnum = tr32(TG3_CPMU_STATUS) & TG3_CPMU_STATUS_PCIE_FUNC;
1046 if (tp->pci_chip_rev_id != CHIPREV_ID_5717_A0)
1047 is_serdes = tr32(SG_DIG_STATUS) & SG_DIG_IS_SERDES;
1049 is_serdes = tr32(TG3_CPMU_PHY_STRAP) &
1050 TG3_CPMU_PHY_STRAP_IS_SERDES;
1054 tp->phy_addr = TG3_PHY_MII_ADDR;
1056 if ((tp->tg3_flags3 & TG3_FLG3_MDIOBUS_INITED) &&
1057 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
1058 tg3_mdio_config_5785(tp);
1061 static int tg3_mdio_init(struct tg3 *tp)
1065 struct phy_device *phydev;
1069 if (!(tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB) ||
1070 (tp->tg3_flags3 & TG3_FLG3_MDIOBUS_INITED))
1073 tp->mdio_bus = mdiobus_alloc();
1074 if (tp->mdio_bus == NULL)
1077 tp->mdio_bus->name = "tg3 mdio bus";
1078 snprintf(tp->mdio_bus->id, MII_BUS_ID_SIZE, "%x",
1079 (tp->pdev->bus->number << 8) | tp->pdev->devfn);
1080 tp->mdio_bus->priv = tp;
1081 tp->mdio_bus->parent = &tp->pdev->dev;
1082 tp->mdio_bus->read = &tg3_mdio_read;
1083 tp->mdio_bus->write = &tg3_mdio_write;
1084 tp->mdio_bus->reset = &tg3_mdio_reset;
1085 tp->mdio_bus->phy_mask = ~(1 << TG3_PHY_MII_ADDR);
1086 tp->mdio_bus->irq = &tp->mdio_irq[0];
1088 for (i = 0; i < PHY_MAX_ADDR; i++)
1089 tp->mdio_bus->irq[i] = PHY_POLL;
1091 /* The bus registration will look for all the PHYs on the mdio bus.
1092 * Unfortunately, it does not ensure the PHY is powered up before
1093 * accessing the PHY ID registers. A chip reset is the
1094 * quickest way to bring the device back to an operational state..
1096 if (tg3_readphy(tp, MII_BMCR, ®) || (reg & BMCR_PDOWN))
1099 i = mdiobus_register(tp->mdio_bus);
1101 printk(KERN_WARNING "%s: mdiobus_reg failed (0x%x)\n",
1103 mdiobus_free(tp->mdio_bus);
1107 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
1109 if (!phydev || !phydev->drv) {
1110 printk(KERN_WARNING "%s: No PHY devices\n", tp->dev->name);
1111 mdiobus_unregister(tp->mdio_bus);
1112 mdiobus_free(tp->mdio_bus);
1116 switch (phydev->drv->phy_id & phydev->drv->phy_id_mask) {
1117 case TG3_PHY_ID_BCM57780:
1118 phydev->interface = PHY_INTERFACE_MODE_GMII;
1119 phydev->dev_flags |= PHY_BRCM_AUTO_PWRDWN_ENABLE;
1121 case TG3_PHY_ID_BCM50610:
1122 case TG3_PHY_ID_BCM50610M:
1123 phydev->dev_flags |= PHY_BRCM_CLEAR_RGMII_MODE |
1124 PHY_BRCM_RX_REFCLK_UNUSED |
1125 PHY_BRCM_DIS_TXCRXC_NOENRGY |
1126 PHY_BRCM_AUTO_PWRDWN_ENABLE;
1127 if (tp->tg3_flags3 & TG3_FLG3_RGMII_STD_IBND_DISABLE)
1128 phydev->dev_flags |= PHY_BRCM_STD_IBND_DISABLE;
1129 if (tp->tg3_flags3 & TG3_FLG3_RGMII_EXT_IBND_RX_EN)
1130 phydev->dev_flags |= PHY_BRCM_EXT_IBND_RX_ENABLE;
1131 if (tp->tg3_flags3 & TG3_FLG3_RGMII_EXT_IBND_TX_EN)
1132 phydev->dev_flags |= PHY_BRCM_EXT_IBND_TX_ENABLE;
1134 case TG3_PHY_ID_RTL8211C:
1135 phydev->interface = PHY_INTERFACE_MODE_RGMII;
1137 case TG3_PHY_ID_RTL8201E:
1138 case TG3_PHY_ID_BCMAC131:
1139 phydev->interface = PHY_INTERFACE_MODE_MII;
1140 phydev->dev_flags |= PHY_BRCM_AUTO_PWRDWN_ENABLE;
1141 tp->tg3_flags3 |= TG3_FLG3_PHY_IS_FET;
1145 tp->tg3_flags3 |= TG3_FLG3_MDIOBUS_INITED;
1147 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
1148 tg3_mdio_config_5785(tp);
1153 static void tg3_mdio_fini(struct tg3 *tp)
1155 if (tp->tg3_flags3 & TG3_FLG3_MDIOBUS_INITED) {
1156 tp->tg3_flags3 &= ~TG3_FLG3_MDIOBUS_INITED;
1157 mdiobus_unregister(tp->mdio_bus);
1158 mdiobus_free(tp->mdio_bus);
1162 /* tp->lock is held. */
1163 static inline void tg3_generate_fw_event(struct tg3 *tp)
1167 val = tr32(GRC_RX_CPU_EVENT);
1168 val |= GRC_RX_CPU_DRIVER_EVENT;
1169 tw32_f(GRC_RX_CPU_EVENT, val);
1171 tp->last_event_jiffies = jiffies;
1174 #define TG3_FW_EVENT_TIMEOUT_USEC 2500
1176 /* tp->lock is held. */
1177 static void tg3_wait_for_event_ack(struct tg3 *tp)
1180 unsigned int delay_cnt;
1183 /* If enough time has passed, no wait is necessary. */
1184 time_remain = (long)(tp->last_event_jiffies + 1 +
1185 usecs_to_jiffies(TG3_FW_EVENT_TIMEOUT_USEC)) -
1187 if (time_remain < 0)
1190 /* Check if we can shorten the wait time. */
1191 delay_cnt = jiffies_to_usecs(time_remain);
1192 if (delay_cnt > TG3_FW_EVENT_TIMEOUT_USEC)
1193 delay_cnt = TG3_FW_EVENT_TIMEOUT_USEC;
1194 delay_cnt = (delay_cnt >> 3) + 1;
1196 for (i = 0; i < delay_cnt; i++) {
1197 if (!(tr32(GRC_RX_CPU_EVENT) & GRC_RX_CPU_DRIVER_EVENT))
1203 /* tp->lock is held. */
1204 static void tg3_ump_link_report(struct tg3 *tp)
1209 if (!(tp->tg3_flags2 & TG3_FLG2_5780_CLASS) ||
1210 !(tp->tg3_flags & TG3_FLAG_ENABLE_ASF))
1213 tg3_wait_for_event_ack(tp);
1215 tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX, FWCMD_NICDRV_LINK_UPDATE);
1217 tg3_write_mem(tp, NIC_SRAM_FW_CMD_LEN_MBOX, 14);
1220 if (!tg3_readphy(tp, MII_BMCR, ®))
1222 if (!tg3_readphy(tp, MII_BMSR, ®))
1223 val |= (reg & 0xffff);
1224 tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX, val);
1227 if (!tg3_readphy(tp, MII_ADVERTISE, ®))
1229 if (!tg3_readphy(tp, MII_LPA, ®))
1230 val |= (reg & 0xffff);
1231 tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 4, val);
1234 if (!(tp->tg3_flags2 & TG3_FLG2_MII_SERDES)) {
1235 if (!tg3_readphy(tp, MII_CTRL1000, ®))
1237 if (!tg3_readphy(tp, MII_STAT1000, ®))
1238 val |= (reg & 0xffff);
1240 tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 8, val);
1242 if (!tg3_readphy(tp, MII_PHYADDR, ®))
1246 tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 12, val);
1248 tg3_generate_fw_event(tp);
1251 static void tg3_link_report(struct tg3 *tp)
1253 if (!netif_carrier_ok(tp->dev)) {
1254 if (netif_msg_link(tp))
1255 printk(KERN_INFO PFX "%s: Link is down.\n",
1257 tg3_ump_link_report(tp);
1258 } else if (netif_msg_link(tp)) {
1259 printk(KERN_INFO PFX "%s: Link is up at %d Mbps, %s duplex.\n",
1261 (tp->link_config.active_speed == SPEED_1000 ?
1263 (tp->link_config.active_speed == SPEED_100 ?
1265 (tp->link_config.active_duplex == DUPLEX_FULL ?
1268 printk(KERN_INFO PFX
1269 "%s: Flow control is %s for TX and %s for RX.\n",
1271 (tp->link_config.active_flowctrl & FLOW_CTRL_TX) ?
1273 (tp->link_config.active_flowctrl & FLOW_CTRL_RX) ?
1275 tg3_ump_link_report(tp);
1279 static u16 tg3_advert_flowctrl_1000T(u8 flow_ctrl)
1283 if ((flow_ctrl & FLOW_CTRL_TX) && (flow_ctrl & FLOW_CTRL_RX))
1284 miireg = ADVERTISE_PAUSE_CAP;
1285 else if (flow_ctrl & FLOW_CTRL_TX)
1286 miireg = ADVERTISE_PAUSE_ASYM;
1287 else if (flow_ctrl & FLOW_CTRL_RX)
1288 miireg = ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM;
1295 static u16 tg3_advert_flowctrl_1000X(u8 flow_ctrl)
1299 if ((flow_ctrl & FLOW_CTRL_TX) && (flow_ctrl & FLOW_CTRL_RX))
1300 miireg = ADVERTISE_1000XPAUSE;
1301 else if (flow_ctrl & FLOW_CTRL_TX)
1302 miireg = ADVERTISE_1000XPSE_ASYM;
1303 else if (flow_ctrl & FLOW_CTRL_RX)
1304 miireg = ADVERTISE_1000XPAUSE | ADVERTISE_1000XPSE_ASYM;
1311 static u8 tg3_resolve_flowctrl_1000X(u16 lcladv, u16 rmtadv)
1315 if (lcladv & ADVERTISE_1000XPAUSE) {
1316 if (lcladv & ADVERTISE_1000XPSE_ASYM) {
1317 if (rmtadv & LPA_1000XPAUSE)
1318 cap = FLOW_CTRL_TX | FLOW_CTRL_RX;
1319 else if (rmtadv & LPA_1000XPAUSE_ASYM)
1322 if (rmtadv & LPA_1000XPAUSE)
1323 cap = FLOW_CTRL_TX | FLOW_CTRL_RX;
1325 } else if (lcladv & ADVERTISE_1000XPSE_ASYM) {
1326 if ((rmtadv & LPA_1000XPAUSE) && (rmtadv & LPA_1000XPAUSE_ASYM))
1333 static void tg3_setup_flow_control(struct tg3 *tp, u32 lcladv, u32 rmtadv)
1337 u32 old_rx_mode = tp->rx_mode;
1338 u32 old_tx_mode = tp->tx_mode;
1340 if (tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB)
1341 autoneg = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]->autoneg;
1343 autoneg = tp->link_config.autoneg;
1345 if (autoneg == AUTONEG_ENABLE &&
1346 (tp->tg3_flags & TG3_FLAG_PAUSE_AUTONEG)) {
1347 if (tp->tg3_flags2 & TG3_FLG2_ANY_SERDES)
1348 flowctrl = tg3_resolve_flowctrl_1000X(lcladv, rmtadv);
1350 flowctrl = mii_resolve_flowctrl_fdx(lcladv, rmtadv);
1352 flowctrl = tp->link_config.flowctrl;
1354 tp->link_config.active_flowctrl = flowctrl;
1356 if (flowctrl & FLOW_CTRL_RX)
1357 tp->rx_mode |= RX_MODE_FLOW_CTRL_ENABLE;
1359 tp->rx_mode &= ~RX_MODE_FLOW_CTRL_ENABLE;
1361 if (old_rx_mode != tp->rx_mode)
1362 tw32_f(MAC_RX_MODE, tp->rx_mode);
1364 if (flowctrl & FLOW_CTRL_TX)
1365 tp->tx_mode |= TX_MODE_FLOW_CTRL_ENABLE;
1367 tp->tx_mode &= ~TX_MODE_FLOW_CTRL_ENABLE;
1369 if (old_tx_mode != tp->tx_mode)
1370 tw32_f(MAC_TX_MODE, tp->tx_mode);
1373 static void tg3_adjust_link(struct net_device *dev)
1375 u8 oldflowctrl, linkmesg = 0;
1376 u32 mac_mode, lcl_adv, rmt_adv;
1377 struct tg3 *tp = netdev_priv(dev);
1378 struct phy_device *phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
1380 spin_lock_bh(&tp->lock);
1382 mac_mode = tp->mac_mode & ~(MAC_MODE_PORT_MODE_MASK |
1383 MAC_MODE_HALF_DUPLEX);
1385 oldflowctrl = tp->link_config.active_flowctrl;
1391 if (phydev->speed == SPEED_100 || phydev->speed == SPEED_10)
1392 mac_mode |= MAC_MODE_PORT_MODE_MII;
1393 else if (phydev->speed == SPEED_1000 ||
1394 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785)
1395 mac_mode |= MAC_MODE_PORT_MODE_GMII;
1397 mac_mode |= MAC_MODE_PORT_MODE_MII;
1399 if (phydev->duplex == DUPLEX_HALF)
1400 mac_mode |= MAC_MODE_HALF_DUPLEX;
1402 lcl_adv = tg3_advert_flowctrl_1000T(
1403 tp->link_config.flowctrl);
1406 rmt_adv = LPA_PAUSE_CAP;
1407 if (phydev->asym_pause)
1408 rmt_adv |= LPA_PAUSE_ASYM;
1411 tg3_setup_flow_control(tp, lcl_adv, rmt_adv);
1413 mac_mode |= MAC_MODE_PORT_MODE_GMII;
1415 if (mac_mode != tp->mac_mode) {
1416 tp->mac_mode = mac_mode;
1417 tw32_f(MAC_MODE, tp->mac_mode);
1421 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785) {
1422 if (phydev->speed == SPEED_10)
1424 MAC_MI_STAT_10MBPS_MODE |
1425 MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
1427 tw32(MAC_MI_STAT, MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
1430 if (phydev->speed == SPEED_1000 && phydev->duplex == DUPLEX_HALF)
1431 tw32(MAC_TX_LENGTHS,
1432 ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
1433 (6 << TX_LENGTHS_IPG_SHIFT) |
1434 (0xff << TX_LENGTHS_SLOT_TIME_SHIFT)));
1436 tw32(MAC_TX_LENGTHS,
1437 ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
1438 (6 << TX_LENGTHS_IPG_SHIFT) |
1439 (32 << TX_LENGTHS_SLOT_TIME_SHIFT)));
1441 if ((phydev->link && tp->link_config.active_speed == SPEED_INVALID) ||
1442 (!phydev->link && tp->link_config.active_speed != SPEED_INVALID) ||
1443 phydev->speed != tp->link_config.active_speed ||
1444 phydev->duplex != tp->link_config.active_duplex ||
1445 oldflowctrl != tp->link_config.active_flowctrl)
1448 tp->link_config.active_speed = phydev->speed;
1449 tp->link_config.active_duplex = phydev->duplex;
1451 spin_unlock_bh(&tp->lock);
1454 tg3_link_report(tp);
1457 static int tg3_phy_init(struct tg3 *tp)
1459 struct phy_device *phydev;
1461 if (tp->tg3_flags3 & TG3_FLG3_PHY_CONNECTED)
1464 /* Bring the PHY back to a known state. */
1467 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
1469 /* Attach the MAC to the PHY. */
1470 phydev = phy_connect(tp->dev, dev_name(&phydev->dev), tg3_adjust_link,
1471 phydev->dev_flags, phydev->interface);
1472 if (IS_ERR(phydev)) {
1473 printk(KERN_ERR "%s: Could not attach to PHY\n", tp->dev->name);
1474 return PTR_ERR(phydev);
1477 /* Mask with MAC supported features. */
1478 switch (phydev->interface) {
1479 case PHY_INTERFACE_MODE_GMII:
1480 case PHY_INTERFACE_MODE_RGMII:
1481 if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY)) {
1482 phydev->supported &= (PHY_GBIT_FEATURES |
1484 SUPPORTED_Asym_Pause);
1488 case PHY_INTERFACE_MODE_MII:
1489 phydev->supported &= (PHY_BASIC_FEATURES |
1491 SUPPORTED_Asym_Pause);
1494 phy_disconnect(tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]);
1498 tp->tg3_flags3 |= TG3_FLG3_PHY_CONNECTED;
1500 phydev->advertising = phydev->supported;
1505 static void tg3_phy_start(struct tg3 *tp)
1507 struct phy_device *phydev;
1509 if (!(tp->tg3_flags3 & TG3_FLG3_PHY_CONNECTED))
1512 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
1514 if (tp->link_config.phy_is_low_power) {
1515 tp->link_config.phy_is_low_power = 0;
1516 phydev->speed = tp->link_config.orig_speed;
1517 phydev->duplex = tp->link_config.orig_duplex;
1518 phydev->autoneg = tp->link_config.orig_autoneg;
1519 phydev->advertising = tp->link_config.orig_advertising;
1524 phy_start_aneg(phydev);
1527 static void tg3_phy_stop(struct tg3 *tp)
1529 if (!(tp->tg3_flags3 & TG3_FLG3_PHY_CONNECTED))
1532 phy_stop(tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]);
1535 static void tg3_phy_fini(struct tg3 *tp)
1537 if (tp->tg3_flags3 & TG3_FLG3_PHY_CONNECTED) {
1538 phy_disconnect(tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]);
1539 tp->tg3_flags3 &= ~TG3_FLG3_PHY_CONNECTED;
1543 static void tg3_phydsp_write(struct tg3 *tp, u32 reg, u32 val)
1545 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, reg);
1546 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, val);
1549 static void tg3_phy_fet_toggle_apd(struct tg3 *tp, bool enable)
1553 if (!tg3_readphy(tp, MII_TG3_FET_TEST, &phytest)) {
1556 tg3_writephy(tp, MII_TG3_FET_TEST,
1557 phytest | MII_TG3_FET_SHADOW_EN);
1558 if (!tg3_readphy(tp, MII_TG3_FET_SHDW_AUXSTAT2, &phy)) {
1560 phy |= MII_TG3_FET_SHDW_AUXSTAT2_APD;
1562 phy &= ~MII_TG3_FET_SHDW_AUXSTAT2_APD;
1563 tg3_writephy(tp, MII_TG3_FET_SHDW_AUXSTAT2, phy);
1565 tg3_writephy(tp, MII_TG3_FET_TEST, phytest);
1569 static void tg3_phy_toggle_apd(struct tg3 *tp, bool enable)
1573 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS) ||
1574 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 &&
1575 (tp->tg3_flags2 & TG3_FLG2_MII_SERDES)))
1578 if (tp->tg3_flags3 & TG3_FLG3_PHY_IS_FET) {
1579 tg3_phy_fet_toggle_apd(tp, enable);
1583 reg = MII_TG3_MISC_SHDW_WREN |
1584 MII_TG3_MISC_SHDW_SCR5_SEL |
1585 MII_TG3_MISC_SHDW_SCR5_LPED |
1586 MII_TG3_MISC_SHDW_SCR5_DLPTLM |
1587 MII_TG3_MISC_SHDW_SCR5_SDTL |
1588 MII_TG3_MISC_SHDW_SCR5_C125OE;
1589 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5784 || !enable)
1590 reg |= MII_TG3_MISC_SHDW_SCR5_DLLAPD;
1592 tg3_writephy(tp, MII_TG3_MISC_SHDW, reg);
1595 reg = MII_TG3_MISC_SHDW_WREN |
1596 MII_TG3_MISC_SHDW_APD_SEL |
1597 MII_TG3_MISC_SHDW_APD_WKTM_84MS;
1599 reg |= MII_TG3_MISC_SHDW_APD_ENABLE;
1601 tg3_writephy(tp, MII_TG3_MISC_SHDW, reg);
1604 static void tg3_phy_toggle_automdix(struct tg3 *tp, int enable)
1608 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS) ||
1609 (tp->tg3_flags2 & TG3_FLG2_ANY_SERDES))
1612 if (tp->tg3_flags3 & TG3_FLG3_PHY_IS_FET) {
1615 if (!tg3_readphy(tp, MII_TG3_FET_TEST, &ephy)) {
1616 u32 reg = MII_TG3_FET_SHDW_MISCCTRL;
1618 tg3_writephy(tp, MII_TG3_FET_TEST,
1619 ephy | MII_TG3_FET_SHADOW_EN);
1620 if (!tg3_readphy(tp, reg, &phy)) {
1622 phy |= MII_TG3_FET_SHDW_MISCCTRL_MDIX;
1624 phy &= ~MII_TG3_FET_SHDW_MISCCTRL_MDIX;
1625 tg3_writephy(tp, reg, phy);
1627 tg3_writephy(tp, MII_TG3_FET_TEST, ephy);
1630 phy = MII_TG3_AUXCTL_MISC_RDSEL_MISC |
1631 MII_TG3_AUXCTL_SHDWSEL_MISC;
1632 if (!tg3_writephy(tp, MII_TG3_AUX_CTRL, phy) &&
1633 !tg3_readphy(tp, MII_TG3_AUX_CTRL, &phy)) {
1635 phy |= MII_TG3_AUXCTL_MISC_FORCE_AMDIX;
1637 phy &= ~MII_TG3_AUXCTL_MISC_FORCE_AMDIX;
1638 phy |= MII_TG3_AUXCTL_MISC_WREN;
1639 tg3_writephy(tp, MII_TG3_AUX_CTRL, phy);
1644 static void tg3_phy_set_wirespeed(struct tg3 *tp)
1648 if (tp->tg3_flags2 & TG3_FLG2_NO_ETH_WIRE_SPEED)
1651 if (!tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x7007) &&
1652 !tg3_readphy(tp, MII_TG3_AUX_CTRL, &val))
1653 tg3_writephy(tp, MII_TG3_AUX_CTRL,
1654 (val | (1 << 15) | (1 << 4)));
1657 static void tg3_phy_apply_otp(struct tg3 *tp)
1666 /* Enable SM_DSP clock and tx 6dB coding. */
1667 phy = MII_TG3_AUXCTL_SHDWSEL_AUXCTL |
1668 MII_TG3_AUXCTL_ACTL_SMDSP_ENA |
1669 MII_TG3_AUXCTL_ACTL_TX_6DB;
1670 tg3_writephy(tp, MII_TG3_AUX_CTRL, phy);
1672 phy = ((otp & TG3_OTP_AGCTGT_MASK) >> TG3_OTP_AGCTGT_SHIFT);
1673 phy |= MII_TG3_DSP_TAP1_AGCTGT_DFLT;
1674 tg3_phydsp_write(tp, MII_TG3_DSP_TAP1, phy);
1676 phy = ((otp & TG3_OTP_HPFFLTR_MASK) >> TG3_OTP_HPFFLTR_SHIFT) |
1677 ((otp & TG3_OTP_HPFOVER_MASK) >> TG3_OTP_HPFOVER_SHIFT);
1678 tg3_phydsp_write(tp, MII_TG3_DSP_AADJ1CH0, phy);
1680 phy = ((otp & TG3_OTP_LPFDIS_MASK) >> TG3_OTP_LPFDIS_SHIFT);
1681 phy |= MII_TG3_DSP_AADJ1CH3_ADCCKADJ;
1682 tg3_phydsp_write(tp, MII_TG3_DSP_AADJ1CH3, phy);
1684 phy = ((otp & TG3_OTP_VDAC_MASK) >> TG3_OTP_VDAC_SHIFT);
1685 tg3_phydsp_write(tp, MII_TG3_DSP_EXP75, phy);
1687 phy = ((otp & TG3_OTP_10BTAMP_MASK) >> TG3_OTP_10BTAMP_SHIFT);
1688 tg3_phydsp_write(tp, MII_TG3_DSP_EXP96, phy);
1690 phy = ((otp & TG3_OTP_ROFF_MASK) >> TG3_OTP_ROFF_SHIFT) |
1691 ((otp & TG3_OTP_RCOFF_MASK) >> TG3_OTP_RCOFF_SHIFT);
1692 tg3_phydsp_write(tp, MII_TG3_DSP_EXP97, phy);
1694 /* Turn off SM_DSP clock. */
1695 phy = MII_TG3_AUXCTL_SHDWSEL_AUXCTL |
1696 MII_TG3_AUXCTL_ACTL_TX_6DB;
1697 tg3_writephy(tp, MII_TG3_AUX_CTRL, phy);
1700 static int tg3_wait_macro_done(struct tg3 *tp)
1707 if (!tg3_readphy(tp, 0x16, &tmp32)) {
1708 if ((tmp32 & 0x1000) == 0)
1718 static int tg3_phy_write_and_check_testpat(struct tg3 *tp, int *resetp)
1720 static const u32 test_pat[4][6] = {
1721 { 0x00005555, 0x00000005, 0x00002aaa, 0x0000000a, 0x00003456, 0x00000003 },
1722 { 0x00002aaa, 0x0000000a, 0x00003333, 0x00000003, 0x0000789a, 0x00000005 },
1723 { 0x00005a5a, 0x00000005, 0x00002a6a, 0x0000000a, 0x00001bcd, 0x00000003 },
1724 { 0x00002a5a, 0x0000000a, 0x000033c3, 0x00000003, 0x00002ef1, 0x00000005 }
1728 for (chan = 0; chan < 4; chan++) {
1731 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
1732 (chan * 0x2000) | 0x0200);
1733 tg3_writephy(tp, 0x16, 0x0002);
1735 for (i = 0; i < 6; i++)
1736 tg3_writephy(tp, MII_TG3_DSP_RW_PORT,
1739 tg3_writephy(tp, 0x16, 0x0202);
1740 if (tg3_wait_macro_done(tp)) {
1745 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
1746 (chan * 0x2000) | 0x0200);
1747 tg3_writephy(tp, 0x16, 0x0082);
1748 if (tg3_wait_macro_done(tp)) {
1753 tg3_writephy(tp, 0x16, 0x0802);
1754 if (tg3_wait_macro_done(tp)) {
1759 for (i = 0; i < 6; i += 2) {
1762 if (tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &low) ||
1763 tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &high) ||
1764 tg3_wait_macro_done(tp)) {
1770 if (low != test_pat[chan][i] ||
1771 high != test_pat[chan][i+1]) {
1772 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000b);
1773 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x4001);
1774 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x4005);
1784 static int tg3_phy_reset_chanpat(struct tg3 *tp)
1788 for (chan = 0; chan < 4; chan++) {
1791 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
1792 (chan * 0x2000) | 0x0200);
1793 tg3_writephy(tp, 0x16, 0x0002);
1794 for (i = 0; i < 6; i++)
1795 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x000);
1796 tg3_writephy(tp, 0x16, 0x0202);
1797 if (tg3_wait_macro_done(tp))
1804 static int tg3_phy_reset_5703_4_5(struct tg3 *tp)
1806 u32 reg32, phy9_orig;
1807 int retries, do_phy_reset, err;
1813 err = tg3_bmcr_reset(tp);
1819 /* Disable transmitter and interrupt. */
1820 if (tg3_readphy(tp, MII_TG3_EXT_CTRL, ®32))
1824 tg3_writephy(tp, MII_TG3_EXT_CTRL, reg32);
1826 /* Set full-duplex, 1000 mbps. */
1827 tg3_writephy(tp, MII_BMCR,
1828 BMCR_FULLDPLX | TG3_BMCR_SPEED1000);
1830 /* Set to master mode. */
1831 if (tg3_readphy(tp, MII_TG3_CTRL, &phy9_orig))
1834 tg3_writephy(tp, MII_TG3_CTRL,
1835 (MII_TG3_CTRL_AS_MASTER |
1836 MII_TG3_CTRL_ENABLE_AS_MASTER));
1838 /* Enable SM_DSP_CLOCK and 6dB. */
1839 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0c00);
1841 /* Block the PHY control access. */
1842 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8005);
1843 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0800);
1845 err = tg3_phy_write_and_check_testpat(tp, &do_phy_reset);
1848 } while (--retries);
1850 err = tg3_phy_reset_chanpat(tp);
1854 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8005);
1855 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0000);
1857 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8200);
1858 tg3_writephy(tp, 0x16, 0x0000);
1860 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
1861 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
1862 /* Set Extended packet length bit for jumbo frames */
1863 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x4400);
1866 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0400);
1869 tg3_writephy(tp, MII_TG3_CTRL, phy9_orig);
1871 if (!tg3_readphy(tp, MII_TG3_EXT_CTRL, ®32)) {
1873 tg3_writephy(tp, MII_TG3_EXT_CTRL, reg32);
1880 /* This will reset the tigon3 PHY if there is no valid
1881 * link unless the FORCE argument is non-zero.
1883 static int tg3_phy_reset(struct tg3 *tp)
1889 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
1892 val = tr32(GRC_MISC_CFG);
1893 tw32_f(GRC_MISC_CFG, val & ~GRC_MISC_CFG_EPHY_IDDQ);
1896 err = tg3_readphy(tp, MII_BMSR, &phy_status);
1897 err |= tg3_readphy(tp, MII_BMSR, &phy_status);
1901 if (netif_running(tp->dev) && netif_carrier_ok(tp->dev)) {
1902 netif_carrier_off(tp->dev);
1903 tg3_link_report(tp);
1906 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
1907 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
1908 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
1909 err = tg3_phy_reset_5703_4_5(tp);
1916 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 &&
1917 GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5784_AX) {
1918 cpmuctrl = tr32(TG3_CPMU_CTRL);
1919 if (cpmuctrl & CPMU_CTRL_GPHY_10MB_RXONLY)
1921 cpmuctrl & ~CPMU_CTRL_GPHY_10MB_RXONLY);
1924 err = tg3_bmcr_reset(tp);
1928 if (cpmuctrl & CPMU_CTRL_GPHY_10MB_RXONLY) {
1931 phy = MII_TG3_DSP_EXP8_AEDW | MII_TG3_DSP_EXP8_REJ2MHz;
1932 tg3_phydsp_write(tp, MII_TG3_DSP_EXP8, phy);
1934 tw32(TG3_CPMU_CTRL, cpmuctrl);
1937 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5784_AX ||
1938 GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5761_AX) {
1941 val = tr32(TG3_CPMU_LSPD_1000MB_CLK);
1942 if ((val & CPMU_LSPD_1000MB_MACCLK_MASK) ==
1943 CPMU_LSPD_1000MB_MACCLK_12_5) {
1944 val &= ~CPMU_LSPD_1000MB_MACCLK_MASK;
1946 tw32_f(TG3_CPMU_LSPD_1000MB_CLK, val);
1950 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 &&
1951 (tp->tg3_flags2 & TG3_FLG2_MII_SERDES))
1954 tg3_phy_apply_otp(tp);
1956 if (tp->tg3_flags3 & TG3_FLG3_PHY_ENABLE_APD)
1957 tg3_phy_toggle_apd(tp, true);
1959 tg3_phy_toggle_apd(tp, false);
1962 if (tp->tg3_flags2 & TG3_FLG2_PHY_ADC_BUG) {
1963 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0c00);
1964 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x201f);
1965 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x2aaa);
1966 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000a);
1967 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0323);
1968 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0400);
1970 if (tp->tg3_flags2 & TG3_FLG2_PHY_5704_A0_BUG) {
1971 tg3_writephy(tp, 0x1c, 0x8d68);
1972 tg3_writephy(tp, 0x1c, 0x8d68);
1974 if (tp->tg3_flags2 & TG3_FLG2_PHY_BER_BUG) {
1975 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0c00);
1976 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000a);
1977 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x310b);
1978 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x201f);
1979 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x9506);
1980 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x401f);
1981 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x14e2);
1982 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0400);
1984 else if (tp->tg3_flags2 & TG3_FLG2_PHY_JITTER_BUG) {
1985 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0c00);
1986 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000a);
1987 if (tp->tg3_flags2 & TG3_FLG2_PHY_ADJUST_TRIM) {
1988 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x110b);
1989 tg3_writephy(tp, MII_TG3_TEST1,
1990 MII_TG3_TEST1_TRIM_EN | 0x4);
1992 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x010b);
1993 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0400);
1995 /* Set Extended packet length bit (bit 14) on all chips that */
1996 /* support jumbo frames */
1997 if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5401) {
1998 /* Cannot do read-modify-write on 5401 */
1999 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x4c20);
2000 } else if (tp->tg3_flags & TG3_FLAG_JUMBO_CAPABLE) {
2003 /* Set bit 14 with read-modify-write to preserve other bits */
2004 if (!tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0007) &&
2005 !tg3_readphy(tp, MII_TG3_AUX_CTRL, &phy_reg))
2006 tg3_writephy(tp, MII_TG3_AUX_CTRL, phy_reg | 0x4000);
2009 /* Set phy register 0x10 bit 0 to high fifo elasticity to support
2010 * jumbo frames transmission.
2012 if (tp->tg3_flags & TG3_FLAG_JUMBO_CAPABLE) {
2015 if (!tg3_readphy(tp, MII_TG3_EXT_CTRL, &phy_reg))
2016 tg3_writephy(tp, MII_TG3_EXT_CTRL,
2017 phy_reg | MII_TG3_EXT_CTRL_FIFO_ELASTIC);
2020 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
2021 /* adjust output voltage */
2022 tg3_writephy(tp, MII_TG3_FET_PTEST, 0x12);
2025 tg3_phy_toggle_automdix(tp, 1);
2026 tg3_phy_set_wirespeed(tp);
2030 static void tg3_frob_aux_power(struct tg3 *tp)
2032 struct tg3 *tp_peer = tp;
2034 /* The GPIOs do something completely different on 57765. */
2035 if ((tp->tg3_flags2 & TG3_FLG2_IS_NIC) == 0 ||
2036 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765)
2039 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
2040 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714 ||
2041 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717) {
2042 struct net_device *dev_peer;
2044 dev_peer = pci_get_drvdata(tp->pdev_peer);
2045 /* remove_one() may have been run on the peer. */
2049 tp_peer = netdev_priv(dev_peer);
2052 if ((tp->tg3_flags & TG3_FLAG_WOL_ENABLE) != 0 ||
2053 (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) != 0 ||
2054 (tp_peer->tg3_flags & TG3_FLAG_WOL_ENABLE) != 0 ||
2055 (tp_peer->tg3_flags & TG3_FLAG_ENABLE_ASF) != 0) {
2056 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
2057 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
2058 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
2059 (GRC_LCLCTRL_GPIO_OE0 |
2060 GRC_LCLCTRL_GPIO_OE1 |
2061 GRC_LCLCTRL_GPIO_OE2 |
2062 GRC_LCLCTRL_GPIO_OUTPUT0 |
2063 GRC_LCLCTRL_GPIO_OUTPUT1),
2065 } else if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761 ||
2066 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761S) {
2067 /* The 5761 non-e device swaps GPIO 0 and GPIO 2. */
2068 u32 grc_local_ctrl = GRC_LCLCTRL_GPIO_OE0 |
2069 GRC_LCLCTRL_GPIO_OE1 |
2070 GRC_LCLCTRL_GPIO_OE2 |
2071 GRC_LCLCTRL_GPIO_OUTPUT0 |
2072 GRC_LCLCTRL_GPIO_OUTPUT1 |
2074 tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl, 100);
2076 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OUTPUT2;
2077 tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl, 100);
2079 grc_local_ctrl &= ~GRC_LCLCTRL_GPIO_OUTPUT0;
2080 tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl, 100);
2083 u32 grc_local_ctrl = 0;
2085 if (tp_peer != tp &&
2086 (tp_peer->tg3_flags & TG3_FLAG_INIT_COMPLETE) != 0)
2089 /* Workaround to prevent overdrawing Amps. */
2090 if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
2092 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE3;
2093 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
2094 grc_local_ctrl, 100);
2097 /* On 5753 and variants, GPIO2 cannot be used. */
2098 no_gpio2 = tp->nic_sram_data_cfg &
2099 NIC_SRAM_DATA_CFG_NO_GPIO2;
2101 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE0 |
2102 GRC_LCLCTRL_GPIO_OE1 |
2103 GRC_LCLCTRL_GPIO_OE2 |
2104 GRC_LCLCTRL_GPIO_OUTPUT1 |
2105 GRC_LCLCTRL_GPIO_OUTPUT2;
2107 grc_local_ctrl &= ~(GRC_LCLCTRL_GPIO_OE2 |
2108 GRC_LCLCTRL_GPIO_OUTPUT2);
2110 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
2111 grc_local_ctrl, 100);
2113 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OUTPUT0;
2115 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
2116 grc_local_ctrl, 100);
2119 grc_local_ctrl &= ~GRC_LCLCTRL_GPIO_OUTPUT2;
2120 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
2121 grc_local_ctrl, 100);
2125 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
2126 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701) {
2127 if (tp_peer != tp &&
2128 (tp_peer->tg3_flags & TG3_FLAG_INIT_COMPLETE) != 0)
2131 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
2132 (GRC_LCLCTRL_GPIO_OE1 |
2133 GRC_LCLCTRL_GPIO_OUTPUT1), 100);
2135 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
2136 GRC_LCLCTRL_GPIO_OE1, 100);
2138 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
2139 (GRC_LCLCTRL_GPIO_OE1 |
2140 GRC_LCLCTRL_GPIO_OUTPUT1), 100);
2145 static int tg3_5700_link_polarity(struct tg3 *tp, u32 speed)
2147 if (tp->led_ctrl == LED_CTRL_MODE_PHY_2)
2149 else if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5411) {
2150 if (speed != SPEED_10)
2152 } else if (speed == SPEED_10)
2158 static int tg3_setup_phy(struct tg3 *, int);
2160 #define RESET_KIND_SHUTDOWN 0
2161 #define RESET_KIND_INIT 1
2162 #define RESET_KIND_SUSPEND 2
2164 static void tg3_write_sig_post_reset(struct tg3 *, int);
2165 static int tg3_halt_cpu(struct tg3 *, u32);
2167 static void tg3_power_down_phy(struct tg3 *tp, bool do_low_power)
2171 if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) {
2172 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
2173 u32 sg_dig_ctrl = tr32(SG_DIG_CTRL);
2174 u32 serdes_cfg = tr32(MAC_SERDES_CFG);
2177 SG_DIG_USING_HW_AUTONEG | SG_DIG_SOFT_RESET;
2178 tw32(SG_DIG_CTRL, sg_dig_ctrl);
2179 tw32(MAC_SERDES_CFG, serdes_cfg | (1 << 15));
2184 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
2186 val = tr32(GRC_MISC_CFG);
2187 tw32_f(GRC_MISC_CFG, val | GRC_MISC_CFG_EPHY_IDDQ);
2190 } else if (tp->tg3_flags3 & TG3_FLG3_PHY_IS_FET) {
2192 if (!tg3_readphy(tp, MII_TG3_FET_TEST, &phytest)) {
2195 tg3_writephy(tp, MII_ADVERTISE, 0);
2196 tg3_writephy(tp, MII_BMCR,
2197 BMCR_ANENABLE | BMCR_ANRESTART);
2199 tg3_writephy(tp, MII_TG3_FET_TEST,
2200 phytest | MII_TG3_FET_SHADOW_EN);
2201 if (!tg3_readphy(tp, MII_TG3_FET_SHDW_AUXMODE4, &phy)) {
2202 phy |= MII_TG3_FET_SHDW_AUXMODE4_SBPD;
2204 MII_TG3_FET_SHDW_AUXMODE4,
2207 tg3_writephy(tp, MII_TG3_FET_TEST, phytest);
2210 } else if (do_low_power) {
2211 tg3_writephy(tp, MII_TG3_EXT_CTRL,
2212 MII_TG3_EXT_CTRL_FORCE_LED_OFF);
2214 tg3_writephy(tp, MII_TG3_AUX_CTRL,
2215 MII_TG3_AUXCTL_SHDWSEL_PWRCTL |
2216 MII_TG3_AUXCTL_PCTL_100TX_LPWR |
2217 MII_TG3_AUXCTL_PCTL_SPR_ISOLATE |
2218 MII_TG3_AUXCTL_PCTL_VREG_11V);
2221 /* The PHY should not be powered down on some chips because
2224 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
2225 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
2226 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780 &&
2227 (tp->tg3_flags2 & TG3_FLG2_MII_SERDES)))
2230 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5784_AX ||
2231 GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5761_AX) {
2232 val = tr32(TG3_CPMU_LSPD_1000MB_CLK);
2233 val &= ~CPMU_LSPD_1000MB_MACCLK_MASK;
2234 val |= CPMU_LSPD_1000MB_MACCLK_12_5;
2235 tw32_f(TG3_CPMU_LSPD_1000MB_CLK, val);
2238 tg3_writephy(tp, MII_BMCR, BMCR_PDOWN);
2241 /* tp->lock is held. */
2242 static int tg3_nvram_lock(struct tg3 *tp)
2244 if (tp->tg3_flags & TG3_FLAG_NVRAM) {
2247 if (tp->nvram_lock_cnt == 0) {
2248 tw32(NVRAM_SWARB, SWARB_REQ_SET1);
2249 for (i = 0; i < 8000; i++) {
2250 if (tr32(NVRAM_SWARB) & SWARB_GNT1)
2255 tw32(NVRAM_SWARB, SWARB_REQ_CLR1);
2259 tp->nvram_lock_cnt++;
2264 /* tp->lock is held. */
2265 static void tg3_nvram_unlock(struct tg3 *tp)
2267 if (tp->tg3_flags & TG3_FLAG_NVRAM) {
2268 if (tp->nvram_lock_cnt > 0)
2269 tp->nvram_lock_cnt--;
2270 if (tp->nvram_lock_cnt == 0)
2271 tw32_f(NVRAM_SWARB, SWARB_REQ_CLR1);
2275 /* tp->lock is held. */
2276 static void tg3_enable_nvram_access(struct tg3 *tp)
2278 if ((tp->tg3_flags2 & TG3_FLG2_5750_PLUS) &&
2279 !(tp->tg3_flags3 & TG3_FLG3_PROTECTED_NVRAM)) {
2280 u32 nvaccess = tr32(NVRAM_ACCESS);
2282 tw32(NVRAM_ACCESS, nvaccess | ACCESS_ENABLE);
2286 /* tp->lock is held. */
2287 static void tg3_disable_nvram_access(struct tg3 *tp)
2289 if ((tp->tg3_flags2 & TG3_FLG2_5750_PLUS) &&
2290 !(tp->tg3_flags3 & TG3_FLG3_PROTECTED_NVRAM)) {
2291 u32 nvaccess = tr32(NVRAM_ACCESS);
2293 tw32(NVRAM_ACCESS, nvaccess & ~ACCESS_ENABLE);
2297 static int tg3_nvram_read_using_eeprom(struct tg3 *tp,
2298 u32 offset, u32 *val)
2303 if (offset > EEPROM_ADDR_ADDR_MASK || (offset % 4) != 0)
2306 tmp = tr32(GRC_EEPROM_ADDR) & ~(EEPROM_ADDR_ADDR_MASK |
2307 EEPROM_ADDR_DEVID_MASK |
2309 tw32(GRC_EEPROM_ADDR,
2311 (0 << EEPROM_ADDR_DEVID_SHIFT) |
2312 ((offset << EEPROM_ADDR_ADDR_SHIFT) &
2313 EEPROM_ADDR_ADDR_MASK) |
2314 EEPROM_ADDR_READ | EEPROM_ADDR_START);
2316 for (i = 0; i < 1000; i++) {
2317 tmp = tr32(GRC_EEPROM_ADDR);
2319 if (tmp & EEPROM_ADDR_COMPLETE)
2323 if (!(tmp & EEPROM_ADDR_COMPLETE))
2326 tmp = tr32(GRC_EEPROM_DATA);
2329 * The data will always be opposite the native endian
2330 * format. Perform a blind byteswap to compensate.
2337 #define NVRAM_CMD_TIMEOUT 10000
2339 static int tg3_nvram_exec_cmd(struct tg3 *tp, u32 nvram_cmd)
2343 tw32(NVRAM_CMD, nvram_cmd);
2344 for (i = 0; i < NVRAM_CMD_TIMEOUT; i++) {
2346 if (tr32(NVRAM_CMD) & NVRAM_CMD_DONE) {
2352 if (i == NVRAM_CMD_TIMEOUT)
2358 static u32 tg3_nvram_phys_addr(struct tg3 *tp, u32 addr)
2360 if ((tp->tg3_flags & TG3_FLAG_NVRAM) &&
2361 (tp->tg3_flags & TG3_FLAG_NVRAM_BUFFERED) &&
2362 (tp->tg3_flags2 & TG3_FLG2_FLASH) &&
2363 !(tp->tg3_flags3 & TG3_FLG3_NO_NVRAM_ADDR_TRANS) &&
2364 (tp->nvram_jedecnum == JEDEC_ATMEL))
2366 addr = ((addr / tp->nvram_pagesize) <<
2367 ATMEL_AT45DB0X1B_PAGE_POS) +
2368 (addr % tp->nvram_pagesize);
2373 static u32 tg3_nvram_logical_addr(struct tg3 *tp, u32 addr)
2375 if ((tp->tg3_flags & TG3_FLAG_NVRAM) &&
2376 (tp->tg3_flags & TG3_FLAG_NVRAM_BUFFERED) &&
2377 (tp->tg3_flags2 & TG3_FLG2_FLASH) &&
2378 !(tp->tg3_flags3 & TG3_FLG3_NO_NVRAM_ADDR_TRANS) &&
2379 (tp->nvram_jedecnum == JEDEC_ATMEL))
2381 addr = ((addr >> ATMEL_AT45DB0X1B_PAGE_POS) *
2382 tp->nvram_pagesize) +
2383 (addr & ((1 << ATMEL_AT45DB0X1B_PAGE_POS) - 1));
2388 /* NOTE: Data read in from NVRAM is byteswapped according to
2389 * the byteswapping settings for all other register accesses.
2390 * tg3 devices are BE devices, so on a BE machine, the data
2391 * returned will be exactly as it is seen in NVRAM. On a LE
2392 * machine, the 32-bit value will be byteswapped.
2394 static int tg3_nvram_read(struct tg3 *tp, u32 offset, u32 *val)
2398 if (!(tp->tg3_flags & TG3_FLAG_NVRAM))
2399 return tg3_nvram_read_using_eeprom(tp, offset, val);
2401 offset = tg3_nvram_phys_addr(tp, offset);
2403 if (offset > NVRAM_ADDR_MSK)
2406 ret = tg3_nvram_lock(tp);
2410 tg3_enable_nvram_access(tp);
2412 tw32(NVRAM_ADDR, offset);
2413 ret = tg3_nvram_exec_cmd(tp, NVRAM_CMD_RD | NVRAM_CMD_GO |
2414 NVRAM_CMD_FIRST | NVRAM_CMD_LAST | NVRAM_CMD_DONE);
2417 *val = tr32(NVRAM_RDDATA);
2419 tg3_disable_nvram_access(tp);
2421 tg3_nvram_unlock(tp);
2426 /* Ensures NVRAM data is in bytestream format. */
2427 static int tg3_nvram_read_be32(struct tg3 *tp, u32 offset, __be32 *val)
2430 int res = tg3_nvram_read(tp, offset, &v);
2432 *val = cpu_to_be32(v);
2436 /* tp->lock is held. */
2437 static void __tg3_set_mac_addr(struct tg3 *tp, int skip_mac_1)
2439 u32 addr_high, addr_low;
2442 addr_high = ((tp->dev->dev_addr[0] << 8) |
2443 tp->dev->dev_addr[1]);
2444 addr_low = ((tp->dev->dev_addr[2] << 24) |
2445 (tp->dev->dev_addr[3] << 16) |
2446 (tp->dev->dev_addr[4] << 8) |
2447 (tp->dev->dev_addr[5] << 0));
2448 for (i = 0; i < 4; i++) {
2449 if (i == 1 && skip_mac_1)
2451 tw32(MAC_ADDR_0_HIGH + (i * 8), addr_high);
2452 tw32(MAC_ADDR_0_LOW + (i * 8), addr_low);
2455 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
2456 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
2457 for (i = 0; i < 12; i++) {
2458 tw32(MAC_EXTADDR_0_HIGH + (i * 8), addr_high);
2459 tw32(MAC_EXTADDR_0_LOW + (i * 8), addr_low);
2463 addr_high = (tp->dev->dev_addr[0] +
2464 tp->dev->dev_addr[1] +
2465 tp->dev->dev_addr[2] +
2466 tp->dev->dev_addr[3] +
2467 tp->dev->dev_addr[4] +
2468 tp->dev->dev_addr[5]) &
2469 TX_BACKOFF_SEED_MASK;
2470 tw32(MAC_TX_BACKOFF_SEED, addr_high);
2473 static int tg3_set_power_state(struct tg3 *tp, pci_power_t state)
2476 bool device_should_wake, do_low_power;
2478 /* Make sure register accesses (indirect or otherwise)
2479 * will function correctly.
2481 pci_write_config_dword(tp->pdev,
2482 TG3PCI_MISC_HOST_CTRL,
2483 tp->misc_host_ctrl);
2487 pci_enable_wake(tp->pdev, state, false);
2488 pci_set_power_state(tp->pdev, PCI_D0);
2490 /* Switch out of Vaux if it is a NIC */
2491 if (tp->tg3_flags2 & TG3_FLG2_IS_NIC)
2492 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl, 100);
2502 printk(KERN_ERR PFX "%s: Invalid power state (D%d) requested\n",
2503 tp->dev->name, state);
2507 /* Restore the CLKREQ setting. */
2508 if (tp->tg3_flags3 & TG3_FLG3_CLKREQ_BUG) {
2511 pci_read_config_word(tp->pdev,
2512 tp->pcie_cap + PCI_EXP_LNKCTL,
2514 lnkctl |= PCI_EXP_LNKCTL_CLKREQ_EN;
2515 pci_write_config_word(tp->pdev,
2516 tp->pcie_cap + PCI_EXP_LNKCTL,
2520 misc_host_ctrl = tr32(TG3PCI_MISC_HOST_CTRL);
2521 tw32(TG3PCI_MISC_HOST_CTRL,
2522 misc_host_ctrl | MISC_HOST_CTRL_MASK_PCI_INT);
2524 device_should_wake = pci_pme_capable(tp->pdev, state) &&
2525 device_may_wakeup(&tp->pdev->dev) &&
2526 (tp->tg3_flags & TG3_FLAG_WOL_ENABLE);
2528 if (tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB) {
2529 do_low_power = false;
2530 if ((tp->tg3_flags3 & TG3_FLG3_PHY_CONNECTED) &&
2531 !tp->link_config.phy_is_low_power) {
2532 struct phy_device *phydev;
2533 u32 phyid, advertising;
2535 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
2537 tp->link_config.phy_is_low_power = 1;
2539 tp->link_config.orig_speed = phydev->speed;
2540 tp->link_config.orig_duplex = phydev->duplex;
2541 tp->link_config.orig_autoneg = phydev->autoneg;
2542 tp->link_config.orig_advertising = phydev->advertising;
2544 advertising = ADVERTISED_TP |
2546 ADVERTISED_Autoneg |
2547 ADVERTISED_10baseT_Half;
2549 if ((tp->tg3_flags & TG3_FLAG_ENABLE_ASF) ||
2550 device_should_wake) {
2551 if (tp->tg3_flags & TG3_FLAG_WOL_SPEED_100MB)
2553 ADVERTISED_100baseT_Half |
2554 ADVERTISED_100baseT_Full |
2555 ADVERTISED_10baseT_Full;
2557 advertising |= ADVERTISED_10baseT_Full;
2560 phydev->advertising = advertising;
2562 phy_start_aneg(phydev);
2564 phyid = phydev->drv->phy_id & phydev->drv->phy_id_mask;
2565 if (phyid != TG3_PHY_ID_BCMAC131) {
2566 phyid &= TG3_PHY_OUI_MASK;
2567 if (phyid == TG3_PHY_OUI_1 ||
2568 phyid == TG3_PHY_OUI_2 ||
2569 phyid == TG3_PHY_OUI_3)
2570 do_low_power = true;
2574 do_low_power = true;
2576 if (tp->link_config.phy_is_low_power == 0) {
2577 tp->link_config.phy_is_low_power = 1;
2578 tp->link_config.orig_speed = tp->link_config.speed;
2579 tp->link_config.orig_duplex = tp->link_config.duplex;
2580 tp->link_config.orig_autoneg = tp->link_config.autoneg;
2583 if (!(tp->tg3_flags2 & TG3_FLG2_ANY_SERDES)) {
2584 tp->link_config.speed = SPEED_10;
2585 tp->link_config.duplex = DUPLEX_HALF;
2586 tp->link_config.autoneg = AUTONEG_ENABLE;
2587 tg3_setup_phy(tp, 0);
2591 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
2594 val = tr32(GRC_VCPU_EXT_CTRL);
2595 tw32(GRC_VCPU_EXT_CTRL, val | GRC_VCPU_EXT_CTRL_DISABLE_WOL);
2596 } else if (!(tp->tg3_flags & TG3_FLAG_ENABLE_ASF)) {
2600 for (i = 0; i < 200; i++) {
2601 tg3_read_mem(tp, NIC_SRAM_FW_ASF_STATUS_MBOX, &val);
2602 if (val == ~NIC_SRAM_FIRMWARE_MBOX_MAGIC1)
2607 if (tp->tg3_flags & TG3_FLAG_WOL_CAP)
2608 tg3_write_mem(tp, NIC_SRAM_WOL_MBOX, WOL_SIGNATURE |
2609 WOL_DRV_STATE_SHUTDOWN |
2613 if (device_should_wake) {
2616 if (!(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)) {
2618 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x5a);
2622 if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES)
2623 mac_mode = MAC_MODE_PORT_MODE_GMII;
2625 mac_mode = MAC_MODE_PORT_MODE_MII;
2627 mac_mode |= tp->mac_mode & MAC_MODE_LINK_POLARITY;
2628 if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
2630 u32 speed = (tp->tg3_flags &
2631 TG3_FLAG_WOL_SPEED_100MB) ?
2632 SPEED_100 : SPEED_10;
2633 if (tg3_5700_link_polarity(tp, speed))
2634 mac_mode |= MAC_MODE_LINK_POLARITY;
2636 mac_mode &= ~MAC_MODE_LINK_POLARITY;
2639 mac_mode = MAC_MODE_PORT_MODE_TBI;
2642 if (!(tp->tg3_flags2 & TG3_FLG2_5750_PLUS))
2643 tw32(MAC_LED_CTRL, tp->led_ctrl);
2645 mac_mode |= MAC_MODE_MAGIC_PKT_ENABLE;
2646 if (((tp->tg3_flags2 & TG3_FLG2_5705_PLUS) &&
2647 !(tp->tg3_flags2 & TG3_FLG2_5780_CLASS)) &&
2648 ((tp->tg3_flags & TG3_FLAG_ENABLE_ASF) ||
2649 (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE)))
2650 mac_mode |= MAC_MODE_KEEP_FRAME_IN_WOL;
2652 if (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE) {
2653 mac_mode |= tp->mac_mode &
2654 (MAC_MODE_APE_TX_EN | MAC_MODE_APE_RX_EN);
2655 if (mac_mode & MAC_MODE_APE_TX_EN)
2656 mac_mode |= MAC_MODE_TDE_ENABLE;
2659 tw32_f(MAC_MODE, mac_mode);
2662 tw32_f(MAC_RX_MODE, RX_MODE_ENABLE);
2666 if (!(tp->tg3_flags & TG3_FLAG_WOL_SPEED_100MB) &&
2667 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
2668 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)) {
2671 base_val = tp->pci_clock_ctrl;
2672 base_val |= (CLOCK_CTRL_RXCLK_DISABLE |
2673 CLOCK_CTRL_TXCLK_DISABLE);
2675 tw32_wait_f(TG3PCI_CLOCK_CTRL, base_val | CLOCK_CTRL_ALTCLK |
2676 CLOCK_CTRL_PWRDOWN_PLL133, 40);
2677 } else if ((tp->tg3_flags2 & TG3_FLG2_5780_CLASS) ||
2678 (tp->tg3_flags & TG3_FLAG_CPMU_PRESENT) ||
2679 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)) {
2681 } else if (!((tp->tg3_flags2 & TG3_FLG2_5750_PLUS) &&
2682 (tp->tg3_flags & TG3_FLAG_ENABLE_ASF))) {
2683 u32 newbits1, newbits2;
2685 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
2686 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
2687 newbits1 = (CLOCK_CTRL_RXCLK_DISABLE |
2688 CLOCK_CTRL_TXCLK_DISABLE |
2690 newbits2 = newbits1 | CLOCK_CTRL_44MHZ_CORE;
2691 } else if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
2692 newbits1 = CLOCK_CTRL_625_CORE;
2693 newbits2 = newbits1 | CLOCK_CTRL_ALTCLK;
2695 newbits1 = CLOCK_CTRL_ALTCLK;
2696 newbits2 = newbits1 | CLOCK_CTRL_44MHZ_CORE;
2699 tw32_wait_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl | newbits1,
2702 tw32_wait_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl | newbits2,
2705 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
2708 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
2709 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
2710 newbits3 = (CLOCK_CTRL_RXCLK_DISABLE |
2711 CLOCK_CTRL_TXCLK_DISABLE |
2712 CLOCK_CTRL_44MHZ_CORE);
2714 newbits3 = CLOCK_CTRL_44MHZ_CORE;
2717 tw32_wait_f(TG3PCI_CLOCK_CTRL,
2718 tp->pci_clock_ctrl | newbits3, 40);
2722 if (!(device_should_wake) &&
2723 !(tp->tg3_flags & TG3_FLAG_ENABLE_ASF))
2724 tg3_power_down_phy(tp, do_low_power);
2726 tg3_frob_aux_power(tp);
2728 /* Workaround for unstable PLL clock */
2729 if ((GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_AX) ||
2730 (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_BX)) {
2731 u32 val = tr32(0x7d00);
2733 val &= ~((1 << 16) | (1 << 4) | (1 << 2) | (1 << 1) | 1);
2735 if (!(tp->tg3_flags & TG3_FLAG_ENABLE_ASF)) {
2738 err = tg3_nvram_lock(tp);
2739 tg3_halt_cpu(tp, RX_CPU_BASE);
2741 tg3_nvram_unlock(tp);
2745 tg3_write_sig_post_reset(tp, RESET_KIND_SHUTDOWN);
2747 if (device_should_wake)
2748 pci_enable_wake(tp->pdev, state, true);
2750 /* Finally, set the new power state. */
2751 pci_set_power_state(tp->pdev, state);
2756 static void tg3_aux_stat_to_speed_duplex(struct tg3 *tp, u32 val, u16 *speed, u8 *duplex)
2758 switch (val & MII_TG3_AUX_STAT_SPDMASK) {
2759 case MII_TG3_AUX_STAT_10HALF:
2761 *duplex = DUPLEX_HALF;
2764 case MII_TG3_AUX_STAT_10FULL:
2766 *duplex = DUPLEX_FULL;
2769 case MII_TG3_AUX_STAT_100HALF:
2771 *duplex = DUPLEX_HALF;
2774 case MII_TG3_AUX_STAT_100FULL:
2776 *duplex = DUPLEX_FULL;
2779 case MII_TG3_AUX_STAT_1000HALF:
2780 *speed = SPEED_1000;
2781 *duplex = DUPLEX_HALF;
2784 case MII_TG3_AUX_STAT_1000FULL:
2785 *speed = SPEED_1000;
2786 *duplex = DUPLEX_FULL;
2790 if (tp->tg3_flags3 & TG3_FLG3_PHY_IS_FET) {
2791 *speed = (val & MII_TG3_AUX_STAT_100) ? SPEED_100 :
2793 *duplex = (val & MII_TG3_AUX_STAT_FULL) ? DUPLEX_FULL :
2797 *speed = SPEED_INVALID;
2798 *duplex = DUPLEX_INVALID;
2803 static void tg3_phy_copper_begin(struct tg3 *tp)
2808 if (tp->link_config.phy_is_low_power) {
2809 /* Entering low power mode. Disable gigabit and
2810 * 100baseT advertisements.
2812 tg3_writephy(tp, MII_TG3_CTRL, 0);
2814 new_adv = (ADVERTISE_10HALF | ADVERTISE_10FULL |
2815 ADVERTISE_CSMA | ADVERTISE_PAUSE_CAP);
2816 if (tp->tg3_flags & TG3_FLAG_WOL_SPEED_100MB)
2817 new_adv |= (ADVERTISE_100HALF | ADVERTISE_100FULL);
2819 tg3_writephy(tp, MII_ADVERTISE, new_adv);
2820 } else if (tp->link_config.speed == SPEED_INVALID) {
2821 if (tp->tg3_flags & TG3_FLAG_10_100_ONLY)
2822 tp->link_config.advertising &=
2823 ~(ADVERTISED_1000baseT_Half |
2824 ADVERTISED_1000baseT_Full);
2826 new_adv = ADVERTISE_CSMA;
2827 if (tp->link_config.advertising & ADVERTISED_10baseT_Half)
2828 new_adv |= ADVERTISE_10HALF;
2829 if (tp->link_config.advertising & ADVERTISED_10baseT_Full)
2830 new_adv |= ADVERTISE_10FULL;
2831 if (tp->link_config.advertising & ADVERTISED_100baseT_Half)
2832 new_adv |= ADVERTISE_100HALF;
2833 if (tp->link_config.advertising & ADVERTISED_100baseT_Full)
2834 new_adv |= ADVERTISE_100FULL;
2836 new_adv |= tg3_advert_flowctrl_1000T(tp->link_config.flowctrl);
2838 tg3_writephy(tp, MII_ADVERTISE, new_adv);
2840 if (tp->link_config.advertising &
2841 (ADVERTISED_1000baseT_Half | ADVERTISED_1000baseT_Full)) {
2843 if (tp->link_config.advertising & ADVERTISED_1000baseT_Half)
2844 new_adv |= MII_TG3_CTRL_ADV_1000_HALF;
2845 if (tp->link_config.advertising & ADVERTISED_1000baseT_Full)
2846 new_adv |= MII_TG3_CTRL_ADV_1000_FULL;
2847 if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY) &&
2848 (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
2849 tp->pci_chip_rev_id == CHIPREV_ID_5701_B0))
2850 new_adv |= (MII_TG3_CTRL_AS_MASTER |
2851 MII_TG3_CTRL_ENABLE_AS_MASTER);
2852 tg3_writephy(tp, MII_TG3_CTRL, new_adv);
2854 tg3_writephy(tp, MII_TG3_CTRL, 0);
2857 new_adv = tg3_advert_flowctrl_1000T(tp->link_config.flowctrl);
2858 new_adv |= ADVERTISE_CSMA;
2860 /* Asking for a specific link mode. */
2861 if (tp->link_config.speed == SPEED_1000) {
2862 tg3_writephy(tp, MII_ADVERTISE, new_adv);
2864 if (tp->link_config.duplex == DUPLEX_FULL)
2865 new_adv = MII_TG3_CTRL_ADV_1000_FULL;
2867 new_adv = MII_TG3_CTRL_ADV_1000_HALF;
2868 if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
2869 tp->pci_chip_rev_id == CHIPREV_ID_5701_B0)
2870 new_adv |= (MII_TG3_CTRL_AS_MASTER |
2871 MII_TG3_CTRL_ENABLE_AS_MASTER);
2873 if (tp->link_config.speed == SPEED_100) {
2874 if (tp->link_config.duplex == DUPLEX_FULL)
2875 new_adv |= ADVERTISE_100FULL;
2877 new_adv |= ADVERTISE_100HALF;
2879 if (tp->link_config.duplex == DUPLEX_FULL)
2880 new_adv |= ADVERTISE_10FULL;
2882 new_adv |= ADVERTISE_10HALF;
2884 tg3_writephy(tp, MII_ADVERTISE, new_adv);
2889 tg3_writephy(tp, MII_TG3_CTRL, new_adv);
2892 if (tp->link_config.autoneg == AUTONEG_DISABLE &&
2893 tp->link_config.speed != SPEED_INVALID) {
2894 u32 bmcr, orig_bmcr;
2896 tp->link_config.active_speed = tp->link_config.speed;
2897 tp->link_config.active_duplex = tp->link_config.duplex;
2900 switch (tp->link_config.speed) {
2906 bmcr |= BMCR_SPEED100;
2910 bmcr |= TG3_BMCR_SPEED1000;
2914 if (tp->link_config.duplex == DUPLEX_FULL)
2915 bmcr |= BMCR_FULLDPLX;
2917 if (!tg3_readphy(tp, MII_BMCR, &orig_bmcr) &&
2918 (bmcr != orig_bmcr)) {
2919 tg3_writephy(tp, MII_BMCR, BMCR_LOOPBACK);
2920 for (i = 0; i < 1500; i++) {
2924 if (tg3_readphy(tp, MII_BMSR, &tmp) ||
2925 tg3_readphy(tp, MII_BMSR, &tmp))
2927 if (!(tmp & BMSR_LSTATUS)) {
2932 tg3_writephy(tp, MII_BMCR, bmcr);
2936 tg3_writephy(tp, MII_BMCR,
2937 BMCR_ANENABLE | BMCR_ANRESTART);
2941 static int tg3_init_5401phy_dsp(struct tg3 *tp)
2945 /* Turn off tap power management. */
2946 /* Set Extended packet length bit */
2947 err = tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x4c20);
2949 err |= tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x0012);
2950 err |= tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x1804);
2952 err |= tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x0013);
2953 err |= tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x1204);
2955 err |= tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8006);
2956 err |= tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0132);
2958 err |= tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8006);
2959 err |= tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0232);
2961 err |= tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x201f);
2962 err |= tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0a20);
2969 static int tg3_copper_is_advertising_all(struct tg3 *tp, u32 mask)
2971 u32 adv_reg, all_mask = 0;
2973 if (mask & ADVERTISED_10baseT_Half)
2974 all_mask |= ADVERTISE_10HALF;
2975 if (mask & ADVERTISED_10baseT_Full)
2976 all_mask |= ADVERTISE_10FULL;
2977 if (mask & ADVERTISED_100baseT_Half)
2978 all_mask |= ADVERTISE_100HALF;
2979 if (mask & ADVERTISED_100baseT_Full)
2980 all_mask |= ADVERTISE_100FULL;
2982 if (tg3_readphy(tp, MII_ADVERTISE, &adv_reg))
2985 if ((adv_reg & all_mask) != all_mask)
2987 if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY)) {
2991 if (mask & ADVERTISED_1000baseT_Half)
2992 all_mask |= ADVERTISE_1000HALF;
2993 if (mask & ADVERTISED_1000baseT_Full)
2994 all_mask |= ADVERTISE_1000FULL;
2996 if (tg3_readphy(tp, MII_TG3_CTRL, &tg3_ctrl))
2999 if ((tg3_ctrl & all_mask) != all_mask)
3005 static int tg3_adv_1000T_flowctrl_ok(struct tg3 *tp, u32 *lcladv, u32 *rmtadv)
3009 if (tg3_readphy(tp, MII_ADVERTISE, lcladv))
3012 curadv = *lcladv & (ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM);
3013 reqadv = tg3_advert_flowctrl_1000T(tp->link_config.flowctrl);
3015 if (tp->link_config.active_duplex == DUPLEX_FULL) {
3016 if (curadv != reqadv)
3019 if (tp->tg3_flags & TG3_FLAG_PAUSE_AUTONEG)
3020 tg3_readphy(tp, MII_LPA, rmtadv);
3022 /* Reprogram the advertisement register, even if it
3023 * does not affect the current link. If the link
3024 * gets renegotiated in the future, we can save an
3025 * additional renegotiation cycle by advertising
3026 * it correctly in the first place.
3028 if (curadv != reqadv) {
3029 *lcladv &= ~(ADVERTISE_PAUSE_CAP |
3030 ADVERTISE_PAUSE_ASYM);
3031 tg3_writephy(tp, MII_ADVERTISE, *lcladv | reqadv);
3038 static int tg3_setup_copper_phy(struct tg3 *tp, int force_reset)
3040 int current_link_up;
3042 u32 lcl_adv, rmt_adv;
3050 (MAC_STATUS_SYNC_CHANGED |
3051 MAC_STATUS_CFG_CHANGED |
3052 MAC_STATUS_MI_COMPLETION |
3053 MAC_STATUS_LNKSTATE_CHANGED));
3056 if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
3058 (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
3062 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x02);
3064 /* Some third-party PHYs need to be reset on link going
3067 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
3068 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
3069 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) &&
3070 netif_carrier_ok(tp->dev)) {
3071 tg3_readphy(tp, MII_BMSR, &bmsr);
3072 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
3073 !(bmsr & BMSR_LSTATUS))
3079 if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5401) {
3080 tg3_readphy(tp, MII_BMSR, &bmsr);
3081 if (tg3_readphy(tp, MII_BMSR, &bmsr) ||
3082 !(tp->tg3_flags & TG3_FLAG_INIT_COMPLETE))
3085 if (!(bmsr & BMSR_LSTATUS)) {
3086 err = tg3_init_5401phy_dsp(tp);
3090 tg3_readphy(tp, MII_BMSR, &bmsr);
3091 for (i = 0; i < 1000; i++) {
3093 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
3094 (bmsr & BMSR_LSTATUS)) {
3100 if ((tp->phy_id & PHY_ID_REV_MASK) == PHY_REV_BCM5401_B0 &&
3101 !(bmsr & BMSR_LSTATUS) &&
3102 tp->link_config.active_speed == SPEED_1000) {
3103 err = tg3_phy_reset(tp);
3105 err = tg3_init_5401phy_dsp(tp);
3110 } else if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
3111 tp->pci_chip_rev_id == CHIPREV_ID_5701_B0) {
3112 /* 5701 {A0,B0} CRC bug workaround */
3113 tg3_writephy(tp, 0x15, 0x0a75);
3114 tg3_writephy(tp, 0x1c, 0x8c68);
3115 tg3_writephy(tp, 0x1c, 0x8d68);
3116 tg3_writephy(tp, 0x1c, 0x8c68);
3119 /* Clear pending interrupts... */
3120 tg3_readphy(tp, MII_TG3_ISTAT, &dummy);
3121 tg3_readphy(tp, MII_TG3_ISTAT, &dummy);
3123 if (tp->tg3_flags & TG3_FLAG_USE_MI_INTERRUPT)
3124 tg3_writephy(tp, MII_TG3_IMASK, ~MII_TG3_INT_LINKCHG);
3125 else if (!(tp->tg3_flags3 & TG3_FLG3_PHY_IS_FET))
3126 tg3_writephy(tp, MII_TG3_IMASK, ~0);
3128 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
3129 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
3130 if (tp->led_ctrl == LED_CTRL_MODE_PHY_1)
3131 tg3_writephy(tp, MII_TG3_EXT_CTRL,
3132 MII_TG3_EXT_CTRL_LNK3_LED_MODE);
3134 tg3_writephy(tp, MII_TG3_EXT_CTRL, 0);
3137 current_link_up = 0;
3138 current_speed = SPEED_INVALID;
3139 current_duplex = DUPLEX_INVALID;
3141 if (tp->tg3_flags2 & TG3_FLG2_CAPACITIVE_COUPLING) {
3144 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x4007);
3145 tg3_readphy(tp, MII_TG3_AUX_CTRL, &val);
3146 if (!(val & (1 << 10))) {
3148 tg3_writephy(tp, MII_TG3_AUX_CTRL, val);
3154 for (i = 0; i < 100; i++) {
3155 tg3_readphy(tp, MII_BMSR, &bmsr);
3156 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
3157 (bmsr & BMSR_LSTATUS))
3162 if (bmsr & BMSR_LSTATUS) {
3165 tg3_readphy(tp, MII_TG3_AUX_STAT, &aux_stat);
3166 for (i = 0; i < 2000; i++) {
3168 if (!tg3_readphy(tp, MII_TG3_AUX_STAT, &aux_stat) &&
3173 tg3_aux_stat_to_speed_duplex(tp, aux_stat,
3178 for (i = 0; i < 200; i++) {
3179 tg3_readphy(tp, MII_BMCR, &bmcr);
3180 if (tg3_readphy(tp, MII_BMCR, &bmcr))
3182 if (bmcr && bmcr != 0x7fff)
3190 tp->link_config.active_speed = current_speed;
3191 tp->link_config.active_duplex = current_duplex;
3193 if (tp->link_config.autoneg == AUTONEG_ENABLE) {
3194 if ((bmcr & BMCR_ANENABLE) &&
3195 tg3_copper_is_advertising_all(tp,
3196 tp->link_config.advertising)) {
3197 if (tg3_adv_1000T_flowctrl_ok(tp, &lcl_adv,
3199 current_link_up = 1;
3202 if (!(bmcr & BMCR_ANENABLE) &&
3203 tp->link_config.speed == current_speed &&
3204 tp->link_config.duplex == current_duplex &&
3205 tp->link_config.flowctrl ==
3206 tp->link_config.active_flowctrl) {
3207 current_link_up = 1;
3211 if (current_link_up == 1 &&
3212 tp->link_config.active_duplex == DUPLEX_FULL)
3213 tg3_setup_flow_control(tp, lcl_adv, rmt_adv);
3217 if (current_link_up == 0 || tp->link_config.phy_is_low_power) {
3220 tg3_phy_copper_begin(tp);
3222 tg3_readphy(tp, MII_BMSR, &tmp);
3223 if (!tg3_readphy(tp, MII_BMSR, &tmp) &&
3224 (tmp & BMSR_LSTATUS))
3225 current_link_up = 1;
3228 tp->mac_mode &= ~MAC_MODE_PORT_MODE_MASK;
3229 if (current_link_up == 1) {
3230 if (tp->link_config.active_speed == SPEED_100 ||
3231 tp->link_config.active_speed == SPEED_10)
3232 tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
3234 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
3235 } else if (tp->tg3_flags3 & TG3_FLG3_PHY_IS_FET)
3236 tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
3238 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
3240 tp->mac_mode &= ~MAC_MODE_HALF_DUPLEX;
3241 if (tp->link_config.active_duplex == DUPLEX_HALF)
3242 tp->mac_mode |= MAC_MODE_HALF_DUPLEX;
3244 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700) {
3245 if (current_link_up == 1 &&
3246 tg3_5700_link_polarity(tp, tp->link_config.active_speed))
3247 tp->mac_mode |= MAC_MODE_LINK_POLARITY;
3249 tp->mac_mode &= ~MAC_MODE_LINK_POLARITY;
3252 /* ??? Without this setting Netgear GA302T PHY does not
3253 * ??? send/receive packets...
3255 if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5411 &&
3256 tp->pci_chip_rev_id == CHIPREV_ID_5700_ALTIMA) {
3257 tp->mi_mode |= MAC_MI_MODE_AUTO_POLL;
3258 tw32_f(MAC_MI_MODE, tp->mi_mode);
3262 tw32_f(MAC_MODE, tp->mac_mode);
3265 if (tp->tg3_flags & TG3_FLAG_USE_LINKCHG_REG) {
3266 /* Polled via timer. */
3267 tw32_f(MAC_EVENT, 0);
3269 tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
3273 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 &&
3274 current_link_up == 1 &&
3275 tp->link_config.active_speed == SPEED_1000 &&
3276 ((tp->tg3_flags & TG3_FLAG_PCIX_MODE) ||
3277 (tp->tg3_flags & TG3_FLAG_PCI_HIGH_SPEED))) {
3280 (MAC_STATUS_SYNC_CHANGED |
3281 MAC_STATUS_CFG_CHANGED));
3284 NIC_SRAM_FIRMWARE_MBOX,
3285 NIC_SRAM_FIRMWARE_MBOX_MAGIC2);
3288 /* Prevent send BD corruption. */
3289 if (tp->tg3_flags3 & TG3_FLG3_CLKREQ_BUG) {
3290 u16 oldlnkctl, newlnkctl;
3292 pci_read_config_word(tp->pdev,
3293 tp->pcie_cap + PCI_EXP_LNKCTL,
3295 if (tp->link_config.active_speed == SPEED_100 ||
3296 tp->link_config.active_speed == SPEED_10)
3297 newlnkctl = oldlnkctl & ~PCI_EXP_LNKCTL_CLKREQ_EN;
3299 newlnkctl = oldlnkctl | PCI_EXP_LNKCTL_CLKREQ_EN;
3300 if (newlnkctl != oldlnkctl)
3301 pci_write_config_word(tp->pdev,
3302 tp->pcie_cap + PCI_EXP_LNKCTL,
3306 if (current_link_up != netif_carrier_ok(tp->dev)) {
3307 if (current_link_up)
3308 netif_carrier_on(tp->dev);
3310 netif_carrier_off(tp->dev);
3311 tg3_link_report(tp);
3317 struct tg3_fiber_aneginfo {
3319 #define ANEG_STATE_UNKNOWN 0
3320 #define ANEG_STATE_AN_ENABLE 1
3321 #define ANEG_STATE_RESTART_INIT 2
3322 #define ANEG_STATE_RESTART 3
3323 #define ANEG_STATE_DISABLE_LINK_OK 4
3324 #define ANEG_STATE_ABILITY_DETECT_INIT 5
3325 #define ANEG_STATE_ABILITY_DETECT 6
3326 #define ANEG_STATE_ACK_DETECT_INIT 7
3327 #define ANEG_STATE_ACK_DETECT 8
3328 #define ANEG_STATE_COMPLETE_ACK_INIT 9
3329 #define ANEG_STATE_COMPLETE_ACK 10
3330 #define ANEG_STATE_IDLE_DETECT_INIT 11
3331 #define ANEG_STATE_IDLE_DETECT 12
3332 #define ANEG_STATE_LINK_OK 13
3333 #define ANEG_STATE_NEXT_PAGE_WAIT_INIT 14
3334 #define ANEG_STATE_NEXT_PAGE_WAIT 15
3337 #define MR_AN_ENABLE 0x00000001
3338 #define MR_RESTART_AN 0x00000002
3339 #define MR_AN_COMPLETE 0x00000004
3340 #define MR_PAGE_RX 0x00000008
3341 #define MR_NP_LOADED 0x00000010
3342 #define MR_TOGGLE_TX 0x00000020
3343 #define MR_LP_ADV_FULL_DUPLEX 0x00000040
3344 #define MR_LP_ADV_HALF_DUPLEX 0x00000080
3345 #define MR_LP_ADV_SYM_PAUSE 0x00000100
3346 #define MR_LP_ADV_ASYM_PAUSE 0x00000200
3347 #define MR_LP_ADV_REMOTE_FAULT1 0x00000400
3348 #define MR_LP_ADV_REMOTE_FAULT2 0x00000800
3349 #define MR_LP_ADV_NEXT_PAGE 0x00001000
3350 #define MR_TOGGLE_RX 0x00002000
3351 #define MR_NP_RX 0x00004000
3353 #define MR_LINK_OK 0x80000000
3355 unsigned long link_time, cur_time;
3357 u32 ability_match_cfg;
3358 int ability_match_count;
3360 char ability_match, idle_match, ack_match;
3362 u32 txconfig, rxconfig;
3363 #define ANEG_CFG_NP 0x00000080
3364 #define ANEG_CFG_ACK 0x00000040
3365 #define ANEG_CFG_RF2 0x00000020
3366 #define ANEG_CFG_RF1 0x00000010
3367 #define ANEG_CFG_PS2 0x00000001
3368 #define ANEG_CFG_PS1 0x00008000
3369 #define ANEG_CFG_HD 0x00004000
3370 #define ANEG_CFG_FD 0x00002000
3371 #define ANEG_CFG_INVAL 0x00001f06
3376 #define ANEG_TIMER_ENAB 2
3377 #define ANEG_FAILED -1
3379 #define ANEG_STATE_SETTLE_TIME 10000
3381 static int tg3_fiber_aneg_smachine(struct tg3 *tp,
3382 struct tg3_fiber_aneginfo *ap)
3385 unsigned long delta;
3389 if (ap->state == ANEG_STATE_UNKNOWN) {
3393 ap->ability_match_cfg = 0;
3394 ap->ability_match_count = 0;
3395 ap->ability_match = 0;
3401 if (tr32(MAC_STATUS) & MAC_STATUS_RCVD_CFG) {
3402 rx_cfg_reg = tr32(MAC_RX_AUTO_NEG);
3404 if (rx_cfg_reg != ap->ability_match_cfg) {
3405 ap->ability_match_cfg = rx_cfg_reg;
3406 ap->ability_match = 0;
3407 ap->ability_match_count = 0;
3409 if (++ap->ability_match_count > 1) {
3410 ap->ability_match = 1;
3411 ap->ability_match_cfg = rx_cfg_reg;
3414 if (rx_cfg_reg & ANEG_CFG_ACK)
3422 ap->ability_match_cfg = 0;
3423 ap->ability_match_count = 0;
3424 ap->ability_match = 0;
3430 ap->rxconfig = rx_cfg_reg;
3434 case ANEG_STATE_UNKNOWN:
3435 if (ap->flags & (MR_AN_ENABLE | MR_RESTART_AN))
3436 ap->state = ANEG_STATE_AN_ENABLE;
3439 case ANEG_STATE_AN_ENABLE:
3440 ap->flags &= ~(MR_AN_COMPLETE | MR_PAGE_RX);
3441 if (ap->flags & MR_AN_ENABLE) {
3444 ap->ability_match_cfg = 0;
3445 ap->ability_match_count = 0;
3446 ap->ability_match = 0;
3450 ap->state = ANEG_STATE_RESTART_INIT;
3452 ap->state = ANEG_STATE_DISABLE_LINK_OK;
3456 case ANEG_STATE_RESTART_INIT:
3457 ap->link_time = ap->cur_time;
3458 ap->flags &= ~(MR_NP_LOADED);
3460 tw32(MAC_TX_AUTO_NEG, 0);
3461 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
3462 tw32_f(MAC_MODE, tp->mac_mode);
3465 ret = ANEG_TIMER_ENAB;
3466 ap->state = ANEG_STATE_RESTART;
3469 case ANEG_STATE_RESTART:
3470 delta = ap->cur_time - ap->link_time;
3471 if (delta > ANEG_STATE_SETTLE_TIME) {
3472 ap->state = ANEG_STATE_ABILITY_DETECT_INIT;
3474 ret = ANEG_TIMER_ENAB;
3478 case ANEG_STATE_DISABLE_LINK_OK:
3482 case ANEG_STATE_ABILITY_DETECT_INIT:
3483 ap->flags &= ~(MR_TOGGLE_TX);
3484 ap->txconfig = ANEG_CFG_FD;
3485 flowctrl = tg3_advert_flowctrl_1000X(tp->link_config.flowctrl);
3486 if (flowctrl & ADVERTISE_1000XPAUSE)
3487 ap->txconfig |= ANEG_CFG_PS1;
3488 if (flowctrl & ADVERTISE_1000XPSE_ASYM)
3489 ap->txconfig |= ANEG_CFG_PS2;
3490 tw32(MAC_TX_AUTO_NEG, ap->txconfig);
3491 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
3492 tw32_f(MAC_MODE, tp->mac_mode);
3495 ap->state = ANEG_STATE_ABILITY_DETECT;
3498 case ANEG_STATE_ABILITY_DETECT:
3499 if (ap->ability_match != 0 && ap->rxconfig != 0) {
3500 ap->state = ANEG_STATE_ACK_DETECT_INIT;
3504 case ANEG_STATE_ACK_DETECT_INIT:
3505 ap->txconfig |= ANEG_CFG_ACK;
3506 tw32(MAC_TX_AUTO_NEG, ap->txconfig);
3507 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
3508 tw32_f(MAC_MODE, tp->mac_mode);
3511 ap->state = ANEG_STATE_ACK_DETECT;
3514 case ANEG_STATE_ACK_DETECT:
3515 if (ap->ack_match != 0) {
3516 if ((ap->rxconfig & ~ANEG_CFG_ACK) ==
3517 (ap->ability_match_cfg & ~ANEG_CFG_ACK)) {
3518 ap->state = ANEG_STATE_COMPLETE_ACK_INIT;
3520 ap->state = ANEG_STATE_AN_ENABLE;
3522 } else if (ap->ability_match != 0 &&
3523 ap->rxconfig == 0) {
3524 ap->state = ANEG_STATE_AN_ENABLE;
3528 case ANEG_STATE_COMPLETE_ACK_INIT:
3529 if (ap->rxconfig & ANEG_CFG_INVAL) {
3533 ap->flags &= ~(MR_LP_ADV_FULL_DUPLEX |
3534 MR_LP_ADV_HALF_DUPLEX |
3535 MR_LP_ADV_SYM_PAUSE |
3536 MR_LP_ADV_ASYM_PAUSE |
3537 MR_LP_ADV_REMOTE_FAULT1 |
3538 MR_LP_ADV_REMOTE_FAULT2 |
3539 MR_LP_ADV_NEXT_PAGE |
3542 if (ap->rxconfig & ANEG_CFG_FD)
3543 ap->flags |= MR_LP_ADV_FULL_DUPLEX;
3544 if (ap->rxconfig & ANEG_CFG_HD)
3545 ap->flags |= MR_LP_ADV_HALF_DUPLEX;
3546 if (ap->rxconfig & ANEG_CFG_PS1)
3547 ap->flags |= MR_LP_ADV_SYM_PAUSE;
3548 if (ap->rxconfig & ANEG_CFG_PS2)
3549 ap->flags |= MR_LP_ADV_ASYM_PAUSE;
3550 if (ap->rxconfig & ANEG_CFG_RF1)
3551 ap->flags |= MR_LP_ADV_REMOTE_FAULT1;
3552 if (ap->rxconfig & ANEG_CFG_RF2)
3553 ap->flags |= MR_LP_ADV_REMOTE_FAULT2;
3554 if (ap->rxconfig & ANEG_CFG_NP)
3555 ap->flags |= MR_LP_ADV_NEXT_PAGE;
3557 ap->link_time = ap->cur_time;
3559 ap->flags ^= (MR_TOGGLE_TX);
3560 if (ap->rxconfig & 0x0008)
3561 ap->flags |= MR_TOGGLE_RX;
3562 if (ap->rxconfig & ANEG_CFG_NP)
3563 ap->flags |= MR_NP_RX;
3564 ap->flags |= MR_PAGE_RX;
3566 ap->state = ANEG_STATE_COMPLETE_ACK;
3567 ret = ANEG_TIMER_ENAB;
3570 case ANEG_STATE_COMPLETE_ACK:
3571 if (ap->ability_match != 0 &&
3572 ap->rxconfig == 0) {
3573 ap->state = ANEG_STATE_AN_ENABLE;
3576 delta = ap->cur_time - ap->link_time;
3577 if (delta > ANEG_STATE_SETTLE_TIME) {
3578 if (!(ap->flags & (MR_LP_ADV_NEXT_PAGE))) {
3579 ap->state = ANEG_STATE_IDLE_DETECT_INIT;
3581 if ((ap->txconfig & ANEG_CFG_NP) == 0 &&
3582 !(ap->flags & MR_NP_RX)) {
3583 ap->state = ANEG_STATE_IDLE_DETECT_INIT;
3591 case ANEG_STATE_IDLE_DETECT_INIT:
3592 ap->link_time = ap->cur_time;
3593 tp->mac_mode &= ~MAC_MODE_SEND_CONFIGS;
3594 tw32_f(MAC_MODE, tp->mac_mode);
3597 ap->state = ANEG_STATE_IDLE_DETECT;
3598 ret = ANEG_TIMER_ENAB;
3601 case ANEG_STATE_IDLE_DETECT:
3602 if (ap->ability_match != 0 &&
3603 ap->rxconfig == 0) {
3604 ap->state = ANEG_STATE_AN_ENABLE;
3607 delta = ap->cur_time - ap->link_time;
3608 if (delta > ANEG_STATE_SETTLE_TIME) {
3609 /* XXX another gem from the Broadcom driver :( */
3610 ap->state = ANEG_STATE_LINK_OK;
3614 case ANEG_STATE_LINK_OK:
3615 ap->flags |= (MR_AN_COMPLETE | MR_LINK_OK);
3619 case ANEG_STATE_NEXT_PAGE_WAIT_INIT:
3620 /* ??? unimplemented */
3623 case ANEG_STATE_NEXT_PAGE_WAIT:
3624 /* ??? unimplemented */
3635 static int fiber_autoneg(struct tg3 *tp, u32 *txflags, u32 *rxflags)
3638 struct tg3_fiber_aneginfo aninfo;
3639 int status = ANEG_FAILED;
3643 tw32_f(MAC_TX_AUTO_NEG, 0);
3645 tmp = tp->mac_mode & ~MAC_MODE_PORT_MODE_MASK;
3646 tw32_f(MAC_MODE, tmp | MAC_MODE_PORT_MODE_GMII);
3649 tw32_f(MAC_MODE, tp->mac_mode | MAC_MODE_SEND_CONFIGS);
3652 memset(&aninfo, 0, sizeof(aninfo));
3653 aninfo.flags |= MR_AN_ENABLE;
3654 aninfo.state = ANEG_STATE_UNKNOWN;
3655 aninfo.cur_time = 0;
3657 while (++tick < 195000) {
3658 status = tg3_fiber_aneg_smachine(tp, &aninfo);
3659 if (status == ANEG_DONE || status == ANEG_FAILED)
3665 tp->mac_mode &= ~MAC_MODE_SEND_CONFIGS;
3666 tw32_f(MAC_MODE, tp->mac_mode);
3669 *txflags = aninfo.txconfig;
3670 *rxflags = aninfo.flags;
3672 if (status == ANEG_DONE &&
3673 (aninfo.flags & (MR_AN_COMPLETE | MR_LINK_OK |
3674 MR_LP_ADV_FULL_DUPLEX)))
3680 static void tg3_init_bcm8002(struct tg3 *tp)
3682 u32 mac_status = tr32(MAC_STATUS);
3685 /* Reset when initting first time or we have a link. */
3686 if ((tp->tg3_flags & TG3_FLAG_INIT_COMPLETE) &&
3687 !(mac_status & MAC_STATUS_PCS_SYNCED))
3690 /* Set PLL lock range. */
3691 tg3_writephy(tp, 0x16, 0x8007);
3694 tg3_writephy(tp, MII_BMCR, BMCR_RESET);
3696 /* Wait for reset to complete. */
3697 /* XXX schedule_timeout() ... */
3698 for (i = 0; i < 500; i++)
3701 /* Config mode; select PMA/Ch 1 regs. */
3702 tg3_writephy(tp, 0x10, 0x8411);
3704 /* Enable auto-lock and comdet, select txclk for tx. */
3705 tg3_writephy(tp, 0x11, 0x0a10);
3707 tg3_writephy(tp, 0x18, 0x00a0);
3708 tg3_writephy(tp, 0x16, 0x41ff);
3710 /* Assert and deassert POR. */
3711 tg3_writephy(tp, 0x13, 0x0400);
3713 tg3_writephy(tp, 0x13, 0x0000);
3715 tg3_writephy(tp, 0x11, 0x0a50);
3717 tg3_writephy(tp, 0x11, 0x0a10);
3719 /* Wait for signal to stabilize */
3720 /* XXX schedule_timeout() ... */
3721 for (i = 0; i < 15000; i++)
3724 /* Deselect the channel register so we can read the PHYID
3727 tg3_writephy(tp, 0x10, 0x8011);
3730 static int tg3_setup_fiber_hw_autoneg(struct tg3 *tp, u32 mac_status)
3733 u32 sg_dig_ctrl, sg_dig_status;
3734 u32 serdes_cfg, expected_sg_dig_ctrl;
3735 int workaround, port_a;
3736 int current_link_up;
3739 expected_sg_dig_ctrl = 0;
3742 current_link_up = 0;
3744 if (tp->pci_chip_rev_id != CHIPREV_ID_5704_A0 &&
3745 tp->pci_chip_rev_id != CHIPREV_ID_5704_A1) {
3747 if (tr32(TG3PCI_DUAL_MAC_CTRL) & DUAL_MAC_CTRL_ID)
3750 /* preserve bits 0-11,13,14 for signal pre-emphasis */
3751 /* preserve bits 20-23 for voltage regulator */
3752 serdes_cfg = tr32(MAC_SERDES_CFG) & 0x00f06fff;
3755 sg_dig_ctrl = tr32(SG_DIG_CTRL);
3757 if (tp->link_config.autoneg != AUTONEG_ENABLE) {
3758 if (sg_dig_ctrl & SG_DIG_USING_HW_AUTONEG) {
3760 u32 val = serdes_cfg;
3766 tw32_f(MAC_SERDES_CFG, val);
3769 tw32_f(SG_DIG_CTRL, SG_DIG_COMMON_SETUP);
3771 if (mac_status & MAC_STATUS_PCS_SYNCED) {
3772 tg3_setup_flow_control(tp, 0, 0);
3773 current_link_up = 1;
3778 /* Want auto-negotiation. */
3779 expected_sg_dig_ctrl = SG_DIG_USING_HW_AUTONEG | SG_DIG_COMMON_SETUP;
3781 flowctrl = tg3_advert_flowctrl_1000X(tp->link_config.flowctrl);
3782 if (flowctrl & ADVERTISE_1000XPAUSE)
3783 expected_sg_dig_ctrl |= SG_DIG_PAUSE_CAP;
3784 if (flowctrl & ADVERTISE_1000XPSE_ASYM)
3785 expected_sg_dig_ctrl |= SG_DIG_ASYM_PAUSE;
3787 if (sg_dig_ctrl != expected_sg_dig_ctrl) {
3788 if ((tp->tg3_flags2 & TG3_FLG2_PARALLEL_DETECT) &&
3789 tp->serdes_counter &&
3790 ((mac_status & (MAC_STATUS_PCS_SYNCED |
3791 MAC_STATUS_RCVD_CFG)) ==
3792 MAC_STATUS_PCS_SYNCED)) {
3793 tp->serdes_counter--;
3794 current_link_up = 1;
3799 tw32_f(MAC_SERDES_CFG, serdes_cfg | 0xc011000);
3800 tw32_f(SG_DIG_CTRL, expected_sg_dig_ctrl | SG_DIG_SOFT_RESET);
3802 tw32_f(SG_DIG_CTRL, expected_sg_dig_ctrl);
3804 tp->serdes_counter = SERDES_AN_TIMEOUT_5704S;
3805 tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
3806 } else if (mac_status & (MAC_STATUS_PCS_SYNCED |
3807 MAC_STATUS_SIGNAL_DET)) {
3808 sg_dig_status = tr32(SG_DIG_STATUS);
3809 mac_status = tr32(MAC_STATUS);
3811 if ((sg_dig_status & SG_DIG_AUTONEG_COMPLETE) &&
3812 (mac_status & MAC_STATUS_PCS_SYNCED)) {
3813 u32 local_adv = 0, remote_adv = 0;
3815 if (sg_dig_ctrl & SG_DIG_PAUSE_CAP)
3816 local_adv |= ADVERTISE_1000XPAUSE;
3817 if (sg_dig_ctrl & SG_DIG_ASYM_PAUSE)
3818 local_adv |= ADVERTISE_1000XPSE_ASYM;
3820 if (sg_dig_status & SG_DIG_PARTNER_PAUSE_CAPABLE)
3821 remote_adv |= LPA_1000XPAUSE;
3822 if (sg_dig_status & SG_DIG_PARTNER_ASYM_PAUSE)
3823 remote_adv |= LPA_1000XPAUSE_ASYM;
3825 tg3_setup_flow_control(tp, local_adv, remote_adv);
3826 current_link_up = 1;
3827 tp->serdes_counter = 0;
3828 tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
3829 } else if (!(sg_dig_status & SG_DIG_AUTONEG_COMPLETE)) {
3830 if (tp->serdes_counter)
3831 tp->serdes_counter--;
3834 u32 val = serdes_cfg;
3841 tw32_f(MAC_SERDES_CFG, val);
3844 tw32_f(SG_DIG_CTRL, SG_DIG_COMMON_SETUP);
3847 /* Link parallel detection - link is up */
3848 /* only if we have PCS_SYNC and not */
3849 /* receiving config code words */
3850 mac_status = tr32(MAC_STATUS);
3851 if ((mac_status & MAC_STATUS_PCS_SYNCED) &&
3852 !(mac_status & MAC_STATUS_RCVD_CFG)) {
3853 tg3_setup_flow_control(tp, 0, 0);
3854 current_link_up = 1;
3856 TG3_FLG2_PARALLEL_DETECT;
3857 tp->serdes_counter =
3858 SERDES_PARALLEL_DET_TIMEOUT;
3860 goto restart_autoneg;
3864 tp->serdes_counter = SERDES_AN_TIMEOUT_5704S;
3865 tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
3869 return current_link_up;
3872 static int tg3_setup_fiber_by_hand(struct tg3 *tp, u32 mac_status)
3874 int current_link_up = 0;
3876 if (!(mac_status & MAC_STATUS_PCS_SYNCED))
3879 if (tp->link_config.autoneg == AUTONEG_ENABLE) {
3880 u32 txflags, rxflags;
3883 if (fiber_autoneg(tp, &txflags, &rxflags)) {
3884 u32 local_adv = 0, remote_adv = 0;
3886 if (txflags & ANEG_CFG_PS1)
3887 local_adv |= ADVERTISE_1000XPAUSE;
3888 if (txflags & ANEG_CFG_PS2)
3889 local_adv |= ADVERTISE_1000XPSE_ASYM;
3891 if (rxflags & MR_LP_ADV_SYM_PAUSE)
3892 remote_adv |= LPA_1000XPAUSE;
3893 if (rxflags & MR_LP_ADV_ASYM_PAUSE)
3894 remote_adv |= LPA_1000XPAUSE_ASYM;
3896 tg3_setup_flow_control(tp, local_adv, remote_adv);
3898 current_link_up = 1;
3900 for (i = 0; i < 30; i++) {
3903 (MAC_STATUS_SYNC_CHANGED |
3904 MAC_STATUS_CFG_CHANGED));
3906 if ((tr32(MAC_STATUS) &
3907 (MAC_STATUS_SYNC_CHANGED |
3908 MAC_STATUS_CFG_CHANGED)) == 0)
3912 mac_status = tr32(MAC_STATUS);
3913 if (current_link_up == 0 &&
3914 (mac_status & MAC_STATUS_PCS_SYNCED) &&
3915 !(mac_status & MAC_STATUS_RCVD_CFG))
3916 current_link_up = 1;
3918 tg3_setup_flow_control(tp, 0, 0);
3920 /* Forcing 1000FD link up. */
3921 current_link_up = 1;
3923 tw32_f(MAC_MODE, (tp->mac_mode | MAC_MODE_SEND_CONFIGS));
3926 tw32_f(MAC_MODE, tp->mac_mode);
3931 return current_link_up;
3934 static int tg3_setup_fiber_phy(struct tg3 *tp, int force_reset)
3937 u16 orig_active_speed;
3938 u8 orig_active_duplex;
3940 int current_link_up;
3943 orig_pause_cfg = tp->link_config.active_flowctrl;
3944 orig_active_speed = tp->link_config.active_speed;
3945 orig_active_duplex = tp->link_config.active_duplex;
3947 if (!(tp->tg3_flags2 & TG3_FLG2_HW_AUTONEG) &&
3948 netif_carrier_ok(tp->dev) &&
3949 (tp->tg3_flags & TG3_FLAG_INIT_COMPLETE)) {
3950 mac_status = tr32(MAC_STATUS);
3951 mac_status &= (MAC_STATUS_PCS_SYNCED |
3952 MAC_STATUS_SIGNAL_DET |
3953 MAC_STATUS_CFG_CHANGED |
3954 MAC_STATUS_RCVD_CFG);
3955 if (mac_status == (MAC_STATUS_PCS_SYNCED |
3956 MAC_STATUS_SIGNAL_DET)) {
3957 tw32_f(MAC_STATUS, (MAC_STATUS_SYNC_CHANGED |
3958 MAC_STATUS_CFG_CHANGED));
3963 tw32_f(MAC_TX_AUTO_NEG, 0);
3965 tp->mac_mode &= ~(MAC_MODE_PORT_MODE_MASK | MAC_MODE_HALF_DUPLEX);
3966 tp->mac_mode |= MAC_MODE_PORT_MODE_TBI;
3967 tw32_f(MAC_MODE, tp->mac_mode);
3970 if (tp->phy_id == PHY_ID_BCM8002)
3971 tg3_init_bcm8002(tp);
3973 /* Enable link change event even when serdes polling. */
3974 tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
3977 current_link_up = 0;
3978 mac_status = tr32(MAC_STATUS);
3980 if (tp->tg3_flags2 & TG3_FLG2_HW_AUTONEG)
3981 current_link_up = tg3_setup_fiber_hw_autoneg(tp, mac_status);
3983 current_link_up = tg3_setup_fiber_by_hand(tp, mac_status);
3985 tp->napi[0].hw_status->status =
3986 (SD_STATUS_UPDATED |
3987 (tp->napi[0].hw_status->status & ~SD_STATUS_LINK_CHG));
3989 for (i = 0; i < 100; i++) {
3990 tw32_f(MAC_STATUS, (MAC_STATUS_SYNC_CHANGED |
3991 MAC_STATUS_CFG_CHANGED));
3993 if ((tr32(MAC_STATUS) & (MAC_STATUS_SYNC_CHANGED |
3994 MAC_STATUS_CFG_CHANGED |
3995 MAC_STATUS_LNKSTATE_CHANGED)) == 0)
3999 mac_status = tr32(MAC_STATUS);
4000 if ((mac_status & MAC_STATUS_PCS_SYNCED) == 0) {
4001 current_link_up = 0;
4002 if (tp->link_config.autoneg == AUTONEG_ENABLE &&
4003 tp->serdes_counter == 0) {
4004 tw32_f(MAC_MODE, (tp->mac_mode |
4005 MAC_MODE_SEND_CONFIGS));
4007 tw32_f(MAC_MODE, tp->mac_mode);
4011 if (current_link_up == 1) {
4012 tp->link_config.active_speed = SPEED_1000;
4013 tp->link_config.active_duplex = DUPLEX_FULL;
4014 tw32(MAC_LED_CTRL, (tp->led_ctrl |
4015 LED_CTRL_LNKLED_OVERRIDE |
4016 LED_CTRL_1000MBPS_ON));
4018 tp->link_config.active_speed = SPEED_INVALID;
4019 tp->link_config.active_duplex = DUPLEX_INVALID;
4020 tw32(MAC_LED_CTRL, (tp->led_ctrl |
4021 LED_CTRL_LNKLED_OVERRIDE |
4022 LED_CTRL_TRAFFIC_OVERRIDE));
4025 if (current_link_up != netif_carrier_ok(tp->dev)) {
4026 if (current_link_up)
4027 netif_carrier_on(tp->dev);
4029 netif_carrier_off(tp->dev);
4030 tg3_link_report(tp);
4032 u32 now_pause_cfg = tp->link_config.active_flowctrl;
4033 if (orig_pause_cfg != now_pause_cfg ||
4034 orig_active_speed != tp->link_config.active_speed ||
4035 orig_active_duplex != tp->link_config.active_duplex)
4036 tg3_link_report(tp);
4042 static int tg3_setup_fiber_mii_phy(struct tg3 *tp, int force_reset)
4044 int current_link_up, err = 0;
4048 u32 local_adv, remote_adv;
4050 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
4051 tw32_f(MAC_MODE, tp->mac_mode);
4057 (MAC_STATUS_SYNC_CHANGED |
4058 MAC_STATUS_CFG_CHANGED |
4059 MAC_STATUS_MI_COMPLETION |
4060 MAC_STATUS_LNKSTATE_CHANGED));
4066 current_link_up = 0;
4067 current_speed = SPEED_INVALID;
4068 current_duplex = DUPLEX_INVALID;
4070 err |= tg3_readphy(tp, MII_BMSR, &bmsr);
4071 err |= tg3_readphy(tp, MII_BMSR, &bmsr);
4072 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) {
4073 if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
4074 bmsr |= BMSR_LSTATUS;
4076 bmsr &= ~BMSR_LSTATUS;
4079 err |= tg3_readphy(tp, MII_BMCR, &bmcr);
4081 if ((tp->link_config.autoneg == AUTONEG_ENABLE) && !force_reset &&
4082 (tp->tg3_flags2 & TG3_FLG2_PARALLEL_DETECT)) {
4083 /* do nothing, just check for link up at the end */
4084 } else if (tp->link_config.autoneg == AUTONEG_ENABLE) {
4087 err |= tg3_readphy(tp, MII_ADVERTISE, &adv);
4088 new_adv = adv & ~(ADVERTISE_1000XFULL | ADVERTISE_1000XHALF |
4089 ADVERTISE_1000XPAUSE |
4090 ADVERTISE_1000XPSE_ASYM |
4093 new_adv |= tg3_advert_flowctrl_1000X(tp->link_config.flowctrl);
4095 if (tp->link_config.advertising & ADVERTISED_1000baseT_Half)
4096 new_adv |= ADVERTISE_1000XHALF;
4097 if (tp->link_config.advertising & ADVERTISED_1000baseT_Full)
4098 new_adv |= ADVERTISE_1000XFULL;
4100 if ((new_adv != adv) || !(bmcr & BMCR_ANENABLE)) {
4101 tg3_writephy(tp, MII_ADVERTISE, new_adv);
4102 bmcr |= BMCR_ANENABLE | BMCR_ANRESTART;
4103 tg3_writephy(tp, MII_BMCR, bmcr);
4105 tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
4106 tp->serdes_counter = SERDES_AN_TIMEOUT_5714S;
4107 tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
4114 bmcr &= ~BMCR_SPEED1000;
4115 new_bmcr = bmcr & ~(BMCR_ANENABLE | BMCR_FULLDPLX);
4117 if (tp->link_config.duplex == DUPLEX_FULL)
4118 new_bmcr |= BMCR_FULLDPLX;
4120 if (new_bmcr != bmcr) {
4121 /* BMCR_SPEED1000 is a reserved bit that needs
4122 * to be set on write.
4124 new_bmcr |= BMCR_SPEED1000;
4126 /* Force a linkdown */
4127 if (netif_carrier_ok(tp->dev)) {
4130 err |= tg3_readphy(tp, MII_ADVERTISE, &adv);
4131 adv &= ~(ADVERTISE_1000XFULL |
4132 ADVERTISE_1000XHALF |
4134 tg3_writephy(tp, MII_ADVERTISE, adv);
4135 tg3_writephy(tp, MII_BMCR, bmcr |
4139 netif_carrier_off(tp->dev);
4141 tg3_writephy(tp, MII_BMCR, new_bmcr);
4143 err |= tg3_readphy(tp, MII_BMSR, &bmsr);
4144 err |= tg3_readphy(tp, MII_BMSR, &bmsr);
4145 if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
4147 if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
4148 bmsr |= BMSR_LSTATUS;
4150 bmsr &= ~BMSR_LSTATUS;
4152 tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
4156 if (bmsr & BMSR_LSTATUS) {
4157 current_speed = SPEED_1000;
4158 current_link_up = 1;
4159 if (bmcr & BMCR_FULLDPLX)
4160 current_duplex = DUPLEX_FULL;
4162 current_duplex = DUPLEX_HALF;
4167 if (bmcr & BMCR_ANENABLE) {
4170 err |= tg3_readphy(tp, MII_ADVERTISE, &local_adv);
4171 err |= tg3_readphy(tp, MII_LPA, &remote_adv);
4172 common = local_adv & remote_adv;
4173 if (common & (ADVERTISE_1000XHALF |
4174 ADVERTISE_1000XFULL)) {
4175 if (common & ADVERTISE_1000XFULL)
4176 current_duplex = DUPLEX_FULL;
4178 current_duplex = DUPLEX_HALF;
4181 current_link_up = 0;
4185 if (current_link_up == 1 && current_duplex == DUPLEX_FULL)
4186 tg3_setup_flow_control(tp, local_adv, remote_adv);
4188 tp->mac_mode &= ~MAC_MODE_HALF_DUPLEX;
4189 if (tp->link_config.active_duplex == DUPLEX_HALF)
4190 tp->mac_mode |= MAC_MODE_HALF_DUPLEX;
4192 tw32_f(MAC_MODE, tp->mac_mode);
4195 tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
4197 tp->link_config.active_speed = current_speed;
4198 tp->link_config.active_duplex = current_duplex;
4200 if (current_link_up != netif_carrier_ok(tp->dev)) {
4201 if (current_link_up)
4202 netif_carrier_on(tp->dev);
4204 netif_carrier_off(tp->dev);
4205 tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
4207 tg3_link_report(tp);
4212 static void tg3_serdes_parallel_detect(struct tg3 *tp)
4214 if (tp->serdes_counter) {
4215 /* Give autoneg time to complete. */
4216 tp->serdes_counter--;
4219 if (!netif_carrier_ok(tp->dev) &&
4220 (tp->link_config.autoneg == AUTONEG_ENABLE)) {
4223 tg3_readphy(tp, MII_BMCR, &bmcr);
4224 if (bmcr & BMCR_ANENABLE) {
4227 /* Select shadow register 0x1f */
4228 tg3_writephy(tp, 0x1c, 0x7c00);
4229 tg3_readphy(tp, 0x1c, &phy1);
4231 /* Select expansion interrupt status register */
4232 tg3_writephy(tp, 0x17, 0x0f01);
4233 tg3_readphy(tp, 0x15, &phy2);
4234 tg3_readphy(tp, 0x15, &phy2);
4236 if ((phy1 & 0x10) && !(phy2 & 0x20)) {
4237 /* We have signal detect and not receiving
4238 * config code words, link is up by parallel
4242 bmcr &= ~BMCR_ANENABLE;
4243 bmcr |= BMCR_SPEED1000 | BMCR_FULLDPLX;
4244 tg3_writephy(tp, MII_BMCR, bmcr);
4245 tp->tg3_flags2 |= TG3_FLG2_PARALLEL_DETECT;
4249 else if (netif_carrier_ok(tp->dev) &&
4250 (tp->link_config.autoneg == AUTONEG_ENABLE) &&
4251 (tp->tg3_flags2 & TG3_FLG2_PARALLEL_DETECT)) {
4254 /* Select expansion interrupt status register */
4255 tg3_writephy(tp, 0x17, 0x0f01);
4256 tg3_readphy(tp, 0x15, &phy2);
4260 /* Config code words received, turn on autoneg. */
4261 tg3_readphy(tp, MII_BMCR, &bmcr);
4262 tg3_writephy(tp, MII_BMCR, bmcr | BMCR_ANENABLE);
4264 tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
4270 static int tg3_setup_phy(struct tg3 *tp, int force_reset)
4274 if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) {
4275 err = tg3_setup_fiber_phy(tp, force_reset);
4276 } else if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES) {
4277 err = tg3_setup_fiber_mii_phy(tp, force_reset);
4279 err = tg3_setup_copper_phy(tp, force_reset);
4282 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5784_AX) {
4285 val = tr32(TG3_CPMU_CLCK_STAT) & CPMU_CLCK_STAT_MAC_CLCK_MASK;
4286 if (val == CPMU_CLCK_STAT_MAC_CLCK_62_5)
4288 else if (val == CPMU_CLCK_STAT_MAC_CLCK_6_25)
4293 val = tr32(GRC_MISC_CFG) & ~GRC_MISC_CFG_PRESCALAR_MASK;
4294 val |= (scale << GRC_MISC_CFG_PRESCALAR_SHIFT);
4295 tw32(GRC_MISC_CFG, val);
4298 if (tp->link_config.active_speed == SPEED_1000 &&
4299 tp->link_config.active_duplex == DUPLEX_HALF)
4300 tw32(MAC_TX_LENGTHS,
4301 ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
4302 (6 << TX_LENGTHS_IPG_SHIFT) |
4303 (0xff << TX_LENGTHS_SLOT_TIME_SHIFT)));
4305 tw32(MAC_TX_LENGTHS,
4306 ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
4307 (6 << TX_LENGTHS_IPG_SHIFT) |
4308 (32 << TX_LENGTHS_SLOT_TIME_SHIFT)));
4310 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
4311 if (netif_carrier_ok(tp->dev)) {
4312 tw32(HOSTCC_STAT_COAL_TICKS,
4313 tp->coal.stats_block_coalesce_usecs);
4315 tw32(HOSTCC_STAT_COAL_TICKS, 0);
4319 if (tp->tg3_flags & TG3_FLAG_ASPM_WORKAROUND) {
4320 u32 val = tr32(PCIE_PWR_MGMT_THRESH);
4321 if (!netif_carrier_ok(tp->dev))
4322 val = (val & ~PCIE_PWR_MGMT_L1_THRESH_MSK) |
4325 val |= PCIE_PWR_MGMT_L1_THRESH_MSK;
4326 tw32(PCIE_PWR_MGMT_THRESH, val);
4332 /* This is called whenever we suspect that the system chipset is re-
4333 * ordering the sequence of MMIO to the tx send mailbox. The symptom
4334 * is bogus tx completions. We try to recover by setting the
4335 * TG3_FLAG_MBOX_WRITE_REORDER flag and resetting the chip later
4338 static void tg3_tx_recover(struct tg3 *tp)
4340 BUG_ON((tp->tg3_flags & TG3_FLAG_MBOX_WRITE_REORDER) ||
4341 tp->write32_tx_mbox == tg3_write_indirect_mbox);
4343 printk(KERN_WARNING PFX "%s: The system may be re-ordering memory-"
4344 "mapped I/O cycles to the network device, attempting to "
4345 "recover. Please report the problem to the driver maintainer "
4346 "and include system chipset information.\n", tp->dev->name);
4348 spin_lock(&tp->lock);
4349 tp->tg3_flags |= TG3_FLAG_TX_RECOVERY_PENDING;
4350 spin_unlock(&tp->lock);
4353 static inline u32 tg3_tx_avail(struct tg3_napi *tnapi)
4356 return tnapi->tx_pending -
4357 ((tnapi->tx_prod - tnapi->tx_cons) & (TG3_TX_RING_SIZE - 1));
4360 /* Tigon3 never reports partial packet sends. So we do not
4361 * need special logic to handle SKBs that have not had all
4362 * of their frags sent yet, like SunGEM does.
4364 static void tg3_tx(struct tg3_napi *tnapi)
4366 struct tg3 *tp = tnapi->tp;
4367 u32 hw_idx = tnapi->hw_status->idx[0].tx_consumer;
4368 u32 sw_idx = tnapi->tx_cons;
4369 struct netdev_queue *txq;
4370 int index = tnapi - tp->napi;
4372 if (tp->tg3_flags3 & TG3_FLG3_ENABLE_TSS)
4375 txq = netdev_get_tx_queue(tp->dev, index);
4377 while (sw_idx != hw_idx) {
4378 struct ring_info *ri = &tnapi->tx_buffers[sw_idx];
4379 struct sk_buff *skb = ri->skb;
4382 if (unlikely(skb == NULL)) {
4387 pci_unmap_single(tp->pdev,
4388 pci_unmap_addr(ri, mapping),
4394 sw_idx = NEXT_TX(sw_idx);
4396 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
4397 ri = &tnapi->tx_buffers[sw_idx];
4398 if (unlikely(ri->skb != NULL || sw_idx == hw_idx))
4401 pci_unmap_page(tp->pdev,
4402 pci_unmap_addr(ri, mapping),
4403 skb_shinfo(skb)->frags[i].size,
4405 sw_idx = NEXT_TX(sw_idx);
4410 if (unlikely(tx_bug)) {
4416 tnapi->tx_cons = sw_idx;
4418 /* Need to make the tx_cons update visible to tg3_start_xmit()
4419 * before checking for netif_queue_stopped(). Without the
4420 * memory barrier, there is a small possibility that tg3_start_xmit()
4421 * will miss it and cause the queue to be stopped forever.
4425 if (unlikely(netif_tx_queue_stopped(txq) &&
4426 (tg3_tx_avail(tnapi) > TG3_TX_WAKEUP_THRESH(tnapi)))) {
4427 __netif_tx_lock(txq, smp_processor_id());
4428 if (netif_tx_queue_stopped(txq) &&
4429 (tg3_tx_avail(tnapi) > TG3_TX_WAKEUP_THRESH(tnapi)))
4430 netif_tx_wake_queue(txq);
4431 __netif_tx_unlock(txq);
4435 static void tg3_rx_skb_free(struct tg3 *tp, struct ring_info *ri, u32 map_sz)
4440 pci_unmap_single(tp->pdev, pci_unmap_addr(ri, mapping),
4441 map_sz, PCI_DMA_FROMDEVICE);
4442 dev_kfree_skb_any(ri->skb);
4446 /* Returns size of skb allocated or < 0 on error.
4448 * We only need to fill in the address because the other members
4449 * of the RX descriptor are invariant, see tg3_init_rings.
4451 * Note the purposeful assymetry of cpu vs. chip accesses. For
4452 * posting buffers we only dirty the first cache line of the RX
4453 * descriptor (containing the address). Whereas for the RX status
4454 * buffers the cpu only reads the last cacheline of the RX descriptor
4455 * (to fetch the error flags, vlan tag, checksum, and opaque cookie).
4457 static int tg3_alloc_rx_skb(struct tg3 *tp, struct tg3_rx_prodring_set *tpr,
4458 u32 opaque_key, u32 dest_idx_unmasked)
4460 struct tg3_rx_buffer_desc *desc;
4461 struct ring_info *map, *src_map;
4462 struct sk_buff *skb;
4464 int skb_size, dest_idx;
4467 switch (opaque_key) {
4468 case RXD_OPAQUE_RING_STD:
4469 dest_idx = dest_idx_unmasked % TG3_RX_RING_SIZE;
4470 desc = &tpr->rx_std[dest_idx];
4471 map = &tpr->rx_std_buffers[dest_idx];
4472 skb_size = tp->rx_pkt_map_sz;
4475 case RXD_OPAQUE_RING_JUMBO:
4476 dest_idx = dest_idx_unmasked % TG3_RX_JUMBO_RING_SIZE;
4477 desc = &tpr->rx_jmb[dest_idx].std;
4478 map = &tpr->rx_jmb_buffers[dest_idx];
4479 skb_size = TG3_RX_JMB_MAP_SZ;
4486 /* Do not overwrite any of the map or rp information
4487 * until we are sure we can commit to a new buffer.
4489 * Callers depend upon this behavior and assume that
4490 * we leave everything unchanged if we fail.
4492 skb = netdev_alloc_skb(tp->dev, skb_size + tp->rx_offset);
4496 skb_reserve(skb, tp->rx_offset);
4498 mapping = pci_map_single(tp->pdev, skb->data, skb_size,
4499 PCI_DMA_FROMDEVICE);
4500 if (pci_dma_mapping_error(tp->pdev, mapping)) {
4506 pci_unmap_addr_set(map, mapping, mapping);
4508 desc->addr_hi = ((u64)mapping >> 32);
4509 desc->addr_lo = ((u64)mapping & 0xffffffff);
4514 /* We only need to move over in the address because the other
4515 * members of the RX descriptor are invariant. See notes above
4516 * tg3_alloc_rx_skb for full details.
4518 static void tg3_recycle_rx(struct tg3_napi *tnapi,
4519 struct tg3_rx_prodring_set *dpr,
4520 u32 opaque_key, int src_idx,
4521 u32 dest_idx_unmasked)
4523 struct tg3 *tp = tnapi->tp;
4524 struct tg3_rx_buffer_desc *src_desc, *dest_desc;
4525 struct ring_info *src_map, *dest_map;
4527 struct tg3_rx_prodring_set *spr = &tp->prodring[0];
4529 switch (opaque_key) {
4530 case RXD_OPAQUE_RING_STD:
4531 dest_idx = dest_idx_unmasked % TG3_RX_RING_SIZE;
4532 dest_desc = &dpr->rx_std[dest_idx];
4533 dest_map = &dpr->rx_std_buffers[dest_idx];
4534 src_desc = &spr->rx_std[src_idx];
4535 src_map = &spr->rx_std_buffers[src_idx];
4538 case RXD_OPAQUE_RING_JUMBO:
4539 dest_idx = dest_idx_unmasked % TG3_RX_JUMBO_RING_SIZE;
4540 dest_desc = &dpr->rx_jmb[dest_idx].std;
4541 dest_map = &dpr->rx_jmb_buffers[dest_idx];
4542 src_desc = &spr->rx_jmb[src_idx].std;
4543 src_map = &spr->rx_jmb_buffers[src_idx];
4550 dest_map->skb = src_map->skb;
4551 pci_unmap_addr_set(dest_map, mapping,
4552 pci_unmap_addr(src_map, mapping));
4553 dest_desc->addr_hi = src_desc->addr_hi;
4554 dest_desc->addr_lo = src_desc->addr_lo;
4556 /* Ensure that the update to the skb happens after the physical
4557 * addresses have been transferred to the new BD location.
4561 src_map->skb = NULL;
4564 /* The RX ring scheme is composed of multiple rings which post fresh
4565 * buffers to the chip, and one special ring the chip uses to report
4566 * status back to the host.
4568 * The special ring reports the status of received packets to the
4569 * host. The chip does not write into the original descriptor the
4570 * RX buffer was obtained from. The chip simply takes the original
4571 * descriptor as provided by the host, updates the status and length
4572 * field, then writes this into the next status ring entry.
4574 * Each ring the host uses to post buffers to the chip is described
4575 * by a TG3_BDINFO entry in the chips SRAM area. When a packet arrives,
4576 * it is first placed into the on-chip ram. When the packet's length
4577 * is known, it walks down the TG3_BDINFO entries to select the ring.
4578 * Each TG3_BDINFO specifies a MAXLEN field and the first TG3_BDINFO
4579 * which is within the range of the new packet's length is chosen.
4581 * The "separate ring for rx status" scheme may sound queer, but it makes
4582 * sense from a cache coherency perspective. If only the host writes
4583 * to the buffer post rings, and only the chip writes to the rx status
4584 * rings, then cache lines never move beyond shared-modified state.
4585 * If both the host and chip were to write into the same ring, cache line
4586 * eviction could occur since both entities want it in an exclusive state.
4588 static int tg3_rx(struct tg3_napi *tnapi, int budget)
4590 struct tg3 *tp = tnapi->tp;
4591 u32 work_mask, rx_std_posted = 0;
4592 u32 std_prod_idx, jmb_prod_idx;
4593 u32 sw_idx = tnapi->rx_rcb_ptr;
4596 struct tg3_rx_prodring_set *tpr = tnapi->prodring;
4598 hw_idx = *(tnapi->rx_rcb_prod_idx);
4600 * We need to order the read of hw_idx and the read of
4601 * the opaque cookie.
4606 std_prod_idx = tpr->rx_std_prod_idx;
4607 jmb_prod_idx = tpr->rx_jmb_prod_idx;
4608 while (sw_idx != hw_idx && budget > 0) {
4609 struct ring_info *ri;
4610 struct tg3_rx_buffer_desc *desc = &tnapi->rx_rcb[sw_idx];
4612 struct sk_buff *skb;
4613 dma_addr_t dma_addr;
4614 u32 opaque_key, desc_idx, *post_ptr;
4616 desc_idx = desc->opaque & RXD_OPAQUE_INDEX_MASK;
4617 opaque_key = desc->opaque & RXD_OPAQUE_RING_MASK;
4618 if (opaque_key == RXD_OPAQUE_RING_STD) {
4619 ri = &tp->prodring[0].rx_std_buffers[desc_idx];
4620 dma_addr = pci_unmap_addr(ri, mapping);
4622 post_ptr = &std_prod_idx;
4624 } else if (opaque_key == RXD_OPAQUE_RING_JUMBO) {
4625 ri = &tp->prodring[0].rx_jmb_buffers[desc_idx];
4626 dma_addr = pci_unmap_addr(ri, mapping);
4628 post_ptr = &jmb_prod_idx;
4630 goto next_pkt_nopost;
4632 work_mask |= opaque_key;
4634 if ((desc->err_vlan & RXD_ERR_MASK) != 0 &&
4635 (desc->err_vlan != RXD_ERR_ODD_NIBBLE_RCVD_MII)) {
4637 tg3_recycle_rx(tnapi, tpr, opaque_key,
4638 desc_idx, *post_ptr);
4640 /* Other statistics kept track of by card. */
4641 tp->net_stats.rx_dropped++;
4645 len = ((desc->idx_len & RXD_LEN_MASK) >> RXD_LEN_SHIFT) -
4648 if (len > RX_COPY_THRESHOLD &&
4649 tp->rx_offset == NET_IP_ALIGN) {
4650 /* rx_offset will likely not equal NET_IP_ALIGN
4651 * if this is a 5701 card running in PCI-X mode
4652 * [see tg3_get_invariants()]
4656 skb_size = tg3_alloc_rx_skb(tp, tpr, opaque_key,
4663 pci_unmap_single(tp->pdev, dma_addr, skb_size,
4664 PCI_DMA_FROMDEVICE);
4668 struct sk_buff *copy_skb;
4670 tg3_recycle_rx(tnapi, tpr, opaque_key,
4671 desc_idx, *post_ptr);
4673 copy_skb = netdev_alloc_skb(tp->dev,
4674 len + TG3_RAW_IP_ALIGN);
4675 if (copy_skb == NULL)
4676 goto drop_it_no_recycle;
4678 skb_reserve(copy_skb, TG3_RAW_IP_ALIGN);
4679 skb_put(copy_skb, len);
4680 pci_dma_sync_single_for_cpu(tp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE);
4681 skb_copy_from_linear_data(skb, copy_skb->data, len);
4682 pci_dma_sync_single_for_device(tp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE);
4684 /* We'll reuse the original ring buffer. */
4688 if ((tp->tg3_flags & TG3_FLAG_RX_CHECKSUMS) &&
4689 (desc->type_flags & RXD_FLAG_TCPUDP_CSUM) &&
4690 (((desc->ip_tcp_csum & RXD_TCPCSUM_MASK)
4691 >> RXD_TCPCSUM_SHIFT) == 0xffff))
4692 skb->ip_summed = CHECKSUM_UNNECESSARY;
4694 skb->ip_summed = CHECKSUM_NONE;
4696 skb->protocol = eth_type_trans(skb, tp->dev);
4698 if (len > (tp->dev->mtu + ETH_HLEN) &&
4699 skb->protocol != htons(ETH_P_8021Q)) {
4704 #if TG3_VLAN_TAG_USED
4705 if (tp->vlgrp != NULL &&
4706 desc->type_flags & RXD_FLAG_VLAN) {
4707 vlan_gro_receive(&tnapi->napi, tp->vlgrp,
4708 desc->err_vlan & RXD_VLAN_MASK, skb);
4711 napi_gro_receive(&tnapi->napi, skb);
4719 if (unlikely(rx_std_posted >= tp->rx_std_max_post)) {
4720 tpr->rx_std_prod_idx = std_prod_idx % TG3_RX_RING_SIZE;
4721 tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG,
4722 tpr->rx_std_prod_idx);
4723 work_mask &= ~RXD_OPAQUE_RING_STD;
4728 sw_idx &= (TG3_RX_RCB_RING_SIZE(tp) - 1);
4730 /* Refresh hw_idx to see if there is new work */
4731 if (sw_idx == hw_idx) {
4732 hw_idx = *(tnapi->rx_rcb_prod_idx);
4737 /* ACK the status ring. */
4738 tnapi->rx_rcb_ptr = sw_idx;
4739 tw32_rx_mbox(tnapi->consmbox, sw_idx);
4741 /* Refill RX ring(s). */
4742 if (!(tp->tg3_flags3 & TG3_FLG3_ENABLE_RSS)) {
4743 if (work_mask & RXD_OPAQUE_RING_STD) {
4744 tpr->rx_std_prod_idx = std_prod_idx % TG3_RX_RING_SIZE;
4745 tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG,
4746 tpr->rx_std_prod_idx);
4748 if (work_mask & RXD_OPAQUE_RING_JUMBO) {
4749 tpr->rx_jmb_prod_idx = jmb_prod_idx %
4750 TG3_RX_JUMBO_RING_SIZE;
4751 tw32_rx_mbox(TG3_RX_JMB_PROD_IDX_REG,
4752 tpr->rx_jmb_prod_idx);
4755 } else if (work_mask) {
4756 /* rx_std_buffers[] and rx_jmb_buffers[] entries must be
4757 * updated before the producer indices can be updated.
4761 tpr->rx_std_prod_idx = std_prod_idx % TG3_RX_RING_SIZE;
4762 tpr->rx_jmb_prod_idx = jmb_prod_idx % TG3_RX_JUMBO_RING_SIZE;
4764 if (tnapi != &tp->napi[1])
4765 napi_schedule(&tp->napi[1].napi);
4771 static void tg3_poll_link(struct tg3 *tp)
4773 /* handle link change and other phy events */
4774 if (!(tp->tg3_flags &
4775 (TG3_FLAG_USE_LINKCHG_REG |
4776 TG3_FLAG_POLL_SERDES))) {
4777 struct tg3_hw_status *sblk = tp->napi[0].hw_status;
4779 if (sblk->status & SD_STATUS_LINK_CHG) {
4780 sblk->status = SD_STATUS_UPDATED |
4781 (sblk->status & ~SD_STATUS_LINK_CHG);
4782 spin_lock(&tp->lock);
4783 if (tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB) {
4785 (MAC_STATUS_SYNC_CHANGED |
4786 MAC_STATUS_CFG_CHANGED |
4787 MAC_STATUS_MI_COMPLETION |
4788 MAC_STATUS_LNKSTATE_CHANGED));
4791 tg3_setup_phy(tp, 0);
4792 spin_unlock(&tp->lock);
4797 static void tg3_rx_prodring_xfer(struct tg3 *tp,
4798 struct tg3_rx_prodring_set *dpr,
4799 struct tg3_rx_prodring_set *spr)
4801 u32 si, di, cpycnt, src_prod_idx;
4805 src_prod_idx = spr->rx_std_prod_idx;
4807 /* Make sure updates to the rx_std_buffers[] entries and the
4808 * standard producer index are seen in the correct order.
4812 if (spr->rx_std_cons_idx == src_prod_idx)
4815 if (spr->rx_std_cons_idx < src_prod_idx)
4816 cpycnt = src_prod_idx - spr->rx_std_cons_idx;
4818 cpycnt = TG3_RX_RING_SIZE - spr->rx_std_cons_idx;
4820 cpycnt = min(cpycnt, TG3_RX_RING_SIZE - dpr->rx_std_prod_idx);
4822 si = spr->rx_std_cons_idx;
4823 di = dpr->rx_std_prod_idx;
4825 for (i = di; i < di + cpycnt; i++) {
4826 if (dpr->rx_std_buffers[i].skb) {
4835 /* Ensure that updates to the rx_std_buffers ring and the
4836 * shadowed hardware producer ring from tg3_recycle_skb() are
4837 * ordered correctly WRT the skb check above.
4841 memcpy(&dpr->rx_std_buffers[di],
4842 &spr->rx_std_buffers[si],
4843 cpycnt * sizeof(struct ring_info));
4845 for (i = 0; i < cpycnt; i++, di++, si++) {
4846 struct tg3_rx_buffer_desc *sbd, *dbd;
4847 sbd = &spr->rx_std[si];
4848 dbd = &dpr->rx_std[di];
4849 dbd->addr_hi = sbd->addr_hi;
4850 dbd->addr_lo = sbd->addr_lo;
4853 spr->rx_std_cons_idx = (spr->rx_std_cons_idx + cpycnt) %
4855 dpr->rx_std_prod_idx = (dpr->rx_std_prod_idx + cpycnt) %
4860 src_prod_idx = spr->rx_jmb_prod_idx;
4862 /* Make sure updates to the rx_jmb_buffers[] entries and
4863 * the jumbo producer index are seen in the correct order.
4867 if (spr->rx_jmb_cons_idx == src_prod_idx)
4870 if (spr->rx_jmb_cons_idx < src_prod_idx)
4871 cpycnt = src_prod_idx - spr->rx_jmb_cons_idx;
4873 cpycnt = TG3_RX_JUMBO_RING_SIZE - spr->rx_jmb_cons_idx;
4875 cpycnt = min(cpycnt,
4876 TG3_RX_JUMBO_RING_SIZE - dpr->rx_jmb_prod_idx);
4878 si = spr->rx_jmb_cons_idx;
4879 di = dpr->rx_jmb_prod_idx;
4881 for (i = di; i < di + cpycnt; i++) {
4882 if (dpr->rx_jmb_buffers[i].skb) {
4891 /* Ensure that updates to the rx_jmb_buffers ring and the
4892 * shadowed hardware producer ring from tg3_recycle_skb() are
4893 * ordered correctly WRT the skb check above.
4897 memcpy(&dpr->rx_jmb_buffers[di],
4898 &spr->rx_jmb_buffers[si],
4899 cpycnt * sizeof(struct ring_info));
4901 for (i = 0; i < cpycnt; i++, di++, si++) {
4902 struct tg3_rx_buffer_desc *sbd, *dbd;
4903 sbd = &spr->rx_jmb[si].std;
4904 dbd = &dpr->rx_jmb[di].std;
4905 dbd->addr_hi = sbd->addr_hi;
4906 dbd->addr_lo = sbd->addr_lo;
4909 spr->rx_jmb_cons_idx = (spr->rx_jmb_cons_idx + cpycnt) %
4910 TG3_RX_JUMBO_RING_SIZE;
4911 dpr->rx_jmb_prod_idx = (dpr->rx_jmb_prod_idx + cpycnt) %
4912 TG3_RX_JUMBO_RING_SIZE;
4916 static int tg3_poll_work(struct tg3_napi *tnapi, int work_done, int budget)
4918 struct tg3 *tp = tnapi->tp;
4920 /* run TX completion thread */
4921 if (tnapi->hw_status->idx[0].tx_consumer != tnapi->tx_cons) {
4923 if (unlikely(tp->tg3_flags & TG3_FLAG_TX_RECOVERY_PENDING))
4927 /* run RX thread, within the bounds set by NAPI.
4928 * All RX "locking" is done by ensuring outside
4929 * code synchronizes with tg3->napi.poll()
4931 if (*(tnapi->rx_rcb_prod_idx) != tnapi->rx_rcb_ptr)
4932 work_done += tg3_rx(tnapi, budget - work_done);
4934 if ((tp->tg3_flags3 & TG3_FLG3_ENABLE_RSS) && tnapi == &tp->napi[1]) {
4935 struct tg3_rx_prodring_set *dpr = &tp->prodring[0];
4937 u32 std_prod_idx = dpr->rx_std_prod_idx;
4938 u32 jmb_prod_idx = dpr->rx_jmb_prod_idx;
4940 for (i = 1; i < tp->irq_cnt; i++)
4941 tg3_rx_prodring_xfer(tp, dpr, tp->napi[i].prodring);
4945 if (std_prod_idx != dpr->rx_std_prod_idx)
4946 tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG,
4947 dpr->rx_std_prod_idx);
4949 if (jmb_prod_idx != dpr->rx_jmb_prod_idx)
4950 tw32_rx_mbox(TG3_RX_JMB_PROD_IDX_REG,
4951 dpr->rx_jmb_prod_idx);
4959 static int tg3_poll_msix(struct napi_struct *napi, int budget)
4961 struct tg3_napi *tnapi = container_of(napi, struct tg3_napi, napi);
4962 struct tg3 *tp = tnapi->tp;
4964 struct tg3_hw_status *sblk = tnapi->hw_status;
4967 work_done = tg3_poll_work(tnapi, work_done, budget);
4969 if (unlikely(tp->tg3_flags & TG3_FLAG_TX_RECOVERY_PENDING))
4972 if (unlikely(work_done >= budget))
4975 /* tp->last_tag is used in tg3_restart_ints() below
4976 * to tell the hw how much work has been processed,
4977 * so we must read it before checking for more work.
4979 tnapi->last_tag = sblk->status_tag;
4980 tnapi->last_irq_tag = tnapi->last_tag;
4983 /* check for RX/TX work to do */
4984 if (sblk->idx[0].tx_consumer == tnapi->tx_cons &&
4985 *(tnapi->rx_rcb_prod_idx) == tnapi->rx_rcb_ptr) {
4986 napi_complete(napi);
4987 /* Reenable interrupts. */
4988 tw32_mailbox(tnapi->int_mbox, tnapi->last_tag << 24);
4997 /* work_done is guaranteed to be less than budget. */
4998 napi_complete(napi);
4999 schedule_work(&tp->reset_task);
5003 static int tg3_poll(struct napi_struct *napi, int budget)
5005 struct tg3_napi *tnapi = container_of(napi, struct tg3_napi, napi);
5006 struct tg3 *tp = tnapi->tp;
5008 struct tg3_hw_status *sblk = tnapi->hw_status;
5013 work_done = tg3_poll_work(tnapi, work_done, budget);
5015 if (unlikely(tp->tg3_flags & TG3_FLAG_TX_RECOVERY_PENDING))
5018 if (unlikely(work_done >= budget))
5021 if (tp->tg3_flags & TG3_FLAG_TAGGED_STATUS) {
5022 /* tp->last_tag is used in tg3_int_reenable() below
5023 * to tell the hw how much work has been processed,
5024 * so we must read it before checking for more work.
5026 tnapi->last_tag = sblk->status_tag;
5027 tnapi->last_irq_tag = tnapi->last_tag;
5030 sblk->status &= ~SD_STATUS_UPDATED;
5032 if (likely(!tg3_has_work(tnapi))) {
5033 napi_complete(napi);
5034 tg3_int_reenable(tnapi);
5042 /* work_done is guaranteed to be less than budget. */
5043 napi_complete(napi);
5044 schedule_work(&tp->reset_task);
5048 static void tg3_irq_quiesce(struct tg3 *tp)
5052 BUG_ON(tp->irq_sync);
5057 for (i = 0; i < tp->irq_cnt; i++)
5058 synchronize_irq(tp->napi[i].irq_vec);
5061 static inline int tg3_irq_sync(struct tg3 *tp)
5063 return tp->irq_sync;
5066 /* Fully shutdown all tg3 driver activity elsewhere in the system.
5067 * If irq_sync is non-zero, then the IRQ handler must be synchronized
5068 * with as well. Most of the time, this is not necessary except when
5069 * shutting down the device.
5071 static inline void tg3_full_lock(struct tg3 *tp, int irq_sync)
5073 spin_lock_bh(&tp->lock);
5075 tg3_irq_quiesce(tp);
5078 static inline void tg3_full_unlock(struct tg3 *tp)
5080 spin_unlock_bh(&tp->lock);
5083 /* One-shot MSI handler - Chip automatically disables interrupt
5084 * after sending MSI so driver doesn't have to do it.
5086 static irqreturn_t tg3_msi_1shot(int irq, void *dev_id)
5088 struct tg3_napi *tnapi = dev_id;
5089 struct tg3 *tp = tnapi->tp;
5091 prefetch(tnapi->hw_status);
5093 prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
5095 if (likely(!tg3_irq_sync(tp)))
5096 napi_schedule(&tnapi->napi);
5101 /* MSI ISR - No need to check for interrupt sharing and no need to
5102 * flush status block and interrupt mailbox. PCI ordering rules
5103 * guarantee that MSI will arrive after the status block.
5105 static irqreturn_t tg3_msi(int irq, void *dev_id)
5107 struct tg3_napi *tnapi = dev_id;
5108 struct tg3 *tp = tnapi->tp;
5110 prefetch(tnapi->hw_status);
5112 prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
5114 * Writing any value to intr-mbox-0 clears PCI INTA# and
5115 * chip-internal interrupt pending events.
5116 * Writing non-zero to intr-mbox-0 additional tells the
5117 * NIC to stop sending us irqs, engaging "in-intr-handler"
5120 tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
5121 if (likely(!tg3_irq_sync(tp)))
5122 napi_schedule(&tnapi->napi);
5124 return IRQ_RETVAL(1);
5127 static irqreturn_t tg3_interrupt(int irq, void *dev_id)
5129 struct tg3_napi *tnapi = dev_id;
5130 struct tg3 *tp = tnapi->tp;
5131 struct tg3_hw_status *sblk = tnapi->hw_status;
5132 unsigned int handled = 1;
5134 /* In INTx mode, it is possible for the interrupt to arrive at
5135 * the CPU before the status block posted prior to the interrupt.
5136 * Reading the PCI State register will confirm whether the
5137 * interrupt is ours and will flush the status block.
5139 if (unlikely(!(sblk->status & SD_STATUS_UPDATED))) {
5140 if ((tp->tg3_flags & TG3_FLAG_CHIP_RESETTING) ||
5141 (tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
5148 * Writing any value to intr-mbox-0 clears PCI INTA# and
5149 * chip-internal interrupt pending events.
5150 * Writing non-zero to intr-mbox-0 additional tells the
5151 * NIC to stop sending us irqs, engaging "in-intr-handler"
5154 * Flush the mailbox to de-assert the IRQ immediately to prevent
5155 * spurious interrupts. The flush impacts performance but
5156 * excessive spurious interrupts can be worse in some cases.
5158 tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
5159 if (tg3_irq_sync(tp))
5161 sblk->status &= ~SD_STATUS_UPDATED;
5162 if (likely(tg3_has_work(tnapi))) {
5163 prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
5164 napi_schedule(&tnapi->napi);
5166 /* No work, shared interrupt perhaps? re-enable
5167 * interrupts, and flush that PCI write
5169 tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
5173 return IRQ_RETVAL(handled);
5176 static irqreturn_t tg3_interrupt_tagged(int irq, void *dev_id)
5178 struct tg3_napi *tnapi = dev_id;
5179 struct tg3 *tp = tnapi->tp;
5180 struct tg3_hw_status *sblk = tnapi->hw_status;
5181 unsigned int handled = 1;
5183 /* In INTx mode, it is possible for the interrupt to arrive at
5184 * the CPU before the status block posted prior to the interrupt.
5185 * Reading the PCI State register will confirm whether the
5186 * interrupt is ours and will flush the status block.
5188 if (unlikely(sblk->status_tag == tnapi->last_irq_tag)) {
5189 if ((tp->tg3_flags & TG3_FLAG_CHIP_RESETTING) ||
5190 (tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
5197 * writing any value to intr-mbox-0 clears PCI INTA# and
5198 * chip-internal interrupt pending events.
5199 * writing non-zero to intr-mbox-0 additional tells the
5200 * NIC to stop sending us irqs, engaging "in-intr-handler"
5203 * Flush the mailbox to de-assert the IRQ immediately to prevent
5204 * spurious interrupts. The flush impacts performance but
5205 * excessive spurious interrupts can be worse in some cases.
5207 tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
5210 * In a shared interrupt configuration, sometimes other devices'
5211 * interrupts will scream. We record the current status tag here
5212 * so that the above check can report that the screaming interrupts
5213 * are unhandled. Eventually they will be silenced.
5215 tnapi->last_irq_tag = sblk->status_tag;
5217 if (tg3_irq_sync(tp))
5220 prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
5222 napi_schedule(&tnapi->napi);
5225 return IRQ_RETVAL(handled);
5228 /* ISR for interrupt test */
5229 static irqreturn_t tg3_test_isr(int irq, void *dev_id)
5231 struct tg3_napi *tnapi = dev_id;
5232 struct tg3 *tp = tnapi->tp;
5233 struct tg3_hw_status *sblk = tnapi->hw_status;
5235 if ((sblk->status & SD_STATUS_UPDATED) ||
5236 !(tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
5237 tg3_disable_ints(tp);
5238 return IRQ_RETVAL(1);
5240 return IRQ_RETVAL(0);
5243 static int tg3_init_hw(struct tg3 *, int);
5244 static int tg3_halt(struct tg3 *, int, int);
5246 /* Restart hardware after configuration changes, self-test, etc.
5247 * Invoked with tp->lock held.
5249 static int tg3_restart_hw(struct tg3 *tp, int reset_phy)
5250 __releases(tp->lock)
5251 __acquires(tp->lock)
5255 err = tg3_init_hw(tp, reset_phy);
5257 printk(KERN_ERR PFX "%s: Failed to re-initialize device, "
5258 "aborting.\n", tp->dev->name);
5259 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
5260 tg3_full_unlock(tp);
5261 del_timer_sync(&tp->timer);
5263 tg3_napi_enable(tp);
5265 tg3_full_lock(tp, 0);
5270 #ifdef CONFIG_NET_POLL_CONTROLLER
5271 static void tg3_poll_controller(struct net_device *dev)
5274 struct tg3 *tp = netdev_priv(dev);
5276 for (i = 0; i < tp->irq_cnt; i++)
5277 tg3_interrupt(tp->napi[i].irq_vec, dev);
5281 static void tg3_reset_task(struct work_struct *work)
5283 struct tg3 *tp = container_of(work, struct tg3, reset_task);
5285 unsigned int restart_timer;
5287 tg3_full_lock(tp, 0);
5289 if (!netif_running(tp->dev)) {
5290 tg3_full_unlock(tp);
5294 tg3_full_unlock(tp);
5300 tg3_full_lock(tp, 1);
5302 restart_timer = tp->tg3_flags2 & TG3_FLG2_RESTART_TIMER;
5303 tp->tg3_flags2 &= ~TG3_FLG2_RESTART_TIMER;
5305 if (tp->tg3_flags & TG3_FLAG_TX_RECOVERY_PENDING) {
5306 tp->write32_tx_mbox = tg3_write32_tx_mbox;
5307 tp->write32_rx_mbox = tg3_write_flush_reg32;
5308 tp->tg3_flags |= TG3_FLAG_MBOX_WRITE_REORDER;
5309 tp->tg3_flags &= ~TG3_FLAG_TX_RECOVERY_PENDING;
5312 tg3_halt(tp, RESET_KIND_SHUTDOWN, 0);
5313 err = tg3_init_hw(tp, 1);
5317 tg3_netif_start(tp);
5320 mod_timer(&tp->timer, jiffies + 1);
5323 tg3_full_unlock(tp);
5329 static void tg3_dump_short_state(struct tg3 *tp)
5331 printk(KERN_ERR PFX "DEBUG: MAC_TX_STATUS[%08x] MAC_RX_STATUS[%08x]\n",
5332 tr32(MAC_TX_STATUS), tr32(MAC_RX_STATUS));
5333 printk(KERN_ERR PFX "DEBUG: RDMAC_STATUS[%08x] WDMAC_STATUS[%08x]\n",
5334 tr32(RDMAC_STATUS), tr32(WDMAC_STATUS));
5337 static void tg3_tx_timeout(struct net_device *dev)
5339 struct tg3 *tp = netdev_priv(dev);
5341 if (netif_msg_tx_err(tp)) {
5342 printk(KERN_ERR PFX "%s: transmit timed out, resetting\n",
5344 tg3_dump_short_state(tp);
5347 schedule_work(&tp->reset_task);
5350 /* Test for DMA buffers crossing any 4GB boundaries: 4G, 8G, etc */
5351 static inline int tg3_4g_overflow_test(dma_addr_t mapping, int len)
5353 u32 base = (u32) mapping & 0xffffffff;
5355 return ((base > 0xffffdcc0) &&
5356 (base + len + 8 < base));
5359 /* Test for DMA addresses > 40-bit */
5360 static inline int tg3_40bit_overflow_test(struct tg3 *tp, dma_addr_t mapping,
5363 #if defined(CONFIG_HIGHMEM) && (BITS_PER_LONG == 64)
5364 if (tp->tg3_flags & TG3_FLAG_40BIT_DMA_BUG)
5365 return (((u64) mapping + len) > DMA_BIT_MASK(40));
5372 static void tg3_set_txd(struct tg3_napi *, int, dma_addr_t, int, u32, u32);
5374 /* Workaround 4GB and 40-bit hardware DMA bugs. */
5375 static int tigon3_dma_hwbug_workaround(struct tg3_napi *tnapi,
5376 struct sk_buff *skb, u32 last_plus_one,
5377 u32 *start, u32 base_flags, u32 mss)
5379 struct tg3 *tp = tnapi->tp;
5380 struct sk_buff *new_skb;
5381 dma_addr_t new_addr = 0;
5385 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701)
5386 new_skb = skb_copy(skb, GFP_ATOMIC);
5388 int more_headroom = 4 - ((unsigned long)skb->data & 3);
5390 new_skb = skb_copy_expand(skb,
5391 skb_headroom(skb) + more_headroom,
5392 skb_tailroom(skb), GFP_ATOMIC);
5398 /* New SKB is guaranteed to be linear. */
5400 new_addr = pci_map_single(tp->pdev, new_skb->data, new_skb->len,
5402 /* Make sure the mapping succeeded */
5403 if (pci_dma_mapping_error(tp->pdev, new_addr)) {
5405 dev_kfree_skb(new_skb);
5408 /* Make sure new skb does not cross any 4G boundaries.
5409 * Drop the packet if it does.
5411 } else if ((tp->tg3_flags3 & TG3_FLG3_4G_DMA_BNDRY_BUG) &&
5412 tg3_4g_overflow_test(new_addr, new_skb->len)) {
5413 pci_unmap_single(tp->pdev, new_addr, new_skb->len,
5416 dev_kfree_skb(new_skb);
5419 tg3_set_txd(tnapi, entry, new_addr, new_skb->len,
5420 base_flags, 1 | (mss << 1));
5421 *start = NEXT_TX(entry);
5425 /* Now clean up the sw ring entries. */
5427 while (entry != last_plus_one) {
5431 len = skb_headlen(skb);
5433 len = skb_shinfo(skb)->frags[i-1].size;
5435 pci_unmap_single(tp->pdev,
5436 pci_unmap_addr(&tnapi->tx_buffers[entry],
5438 len, PCI_DMA_TODEVICE);
5440 tnapi->tx_buffers[entry].skb = new_skb;
5441 pci_unmap_addr_set(&tnapi->tx_buffers[entry], mapping,
5444 tnapi->tx_buffers[entry].skb = NULL;
5446 entry = NEXT_TX(entry);
5455 static void tg3_set_txd(struct tg3_napi *tnapi, int entry,
5456 dma_addr_t mapping, int len, u32 flags,
5459 struct tg3_tx_buffer_desc *txd = &tnapi->tx_ring[entry];
5460 int is_end = (mss_and_is_end & 0x1);
5461 u32 mss = (mss_and_is_end >> 1);
5465 flags |= TXD_FLAG_END;
5466 if (flags & TXD_FLAG_VLAN) {
5467 vlan_tag = flags >> 16;
5470 vlan_tag |= (mss << TXD_MSS_SHIFT);
5472 txd->addr_hi = ((u64) mapping >> 32);
5473 txd->addr_lo = ((u64) mapping & 0xffffffff);
5474 txd->len_flags = (len << TXD_LEN_SHIFT) | flags;
5475 txd->vlan_tag = vlan_tag << TXD_VLAN_TAG_SHIFT;
5478 /* hard_start_xmit for devices that don't have any bugs and
5479 * support TG3_FLG2_HW_TSO_2 and TG3_FLG2_HW_TSO_3 only.
5481 static netdev_tx_t tg3_start_xmit(struct sk_buff *skb,
5482 struct net_device *dev)
5484 struct tg3 *tp = netdev_priv(dev);
5485 u32 len, entry, base_flags, mss;
5487 struct tg3_napi *tnapi;
5488 struct netdev_queue *txq;
5489 unsigned int i, last;
5492 txq = netdev_get_tx_queue(dev, skb_get_queue_mapping(skb));
5493 tnapi = &tp->napi[skb_get_queue_mapping(skb)];
5494 if (tp->tg3_flags3 & TG3_FLG3_ENABLE_TSS)
5497 /* We are running in BH disabled context with netif_tx_lock
5498 * and TX reclaim runs via tp->napi.poll inside of a software
5499 * interrupt. Furthermore, IRQ processing runs lockless so we have
5500 * no IRQ context deadlocks to worry about either. Rejoice!
5502 if (unlikely(tg3_tx_avail(tnapi) <= (skb_shinfo(skb)->nr_frags + 1))) {
5503 if (!netif_tx_queue_stopped(txq)) {
5504 netif_tx_stop_queue(txq);
5506 /* This is a hard error, log it. */
5507 printk(KERN_ERR PFX "%s: BUG! Tx Ring full when "
5508 "queue awake!\n", dev->name);
5510 return NETDEV_TX_BUSY;
5513 entry = tnapi->tx_prod;
5516 if ((mss = skb_shinfo(skb)->gso_size) != 0) {
5517 int tcp_opt_len, ip_tcp_len;
5520 if (skb_header_cloned(skb) &&
5521 pskb_expand_head(skb, 0, 0, GFP_ATOMIC)) {
5526 if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6)
5527 hdrlen = skb_headlen(skb) - ETH_HLEN;
5529 struct iphdr *iph = ip_hdr(skb);
5531 tcp_opt_len = tcp_optlen(skb);
5532 ip_tcp_len = ip_hdrlen(skb) + sizeof(struct tcphdr);
5535 iph->tot_len = htons(mss + ip_tcp_len + tcp_opt_len);
5536 hdrlen = ip_tcp_len + tcp_opt_len;
5539 if (tp->tg3_flags2 & TG3_FLG2_HW_TSO_3) {
5540 mss |= (hdrlen & 0xc) << 12;
5542 base_flags |= 0x00000010;
5543 base_flags |= (hdrlen & 0x3e0) << 5;
5547 base_flags |= (TXD_FLAG_CPU_PRE_DMA |
5548 TXD_FLAG_CPU_POST_DMA);
5550 tcp_hdr(skb)->check = 0;
5553 else if (skb->ip_summed == CHECKSUM_PARTIAL)
5554 base_flags |= TXD_FLAG_TCPUDP_CSUM;
5555 #if TG3_VLAN_TAG_USED
5556 if (tp->vlgrp != NULL && vlan_tx_tag_present(skb))
5557 base_flags |= (TXD_FLAG_VLAN |
5558 (vlan_tx_tag_get(skb) << 16));
5561 len = skb_headlen(skb);
5563 /* Queue skb data, a.k.a. the main skb fragment. */
5564 mapping = pci_map_single(tp->pdev, skb->data, len, PCI_DMA_TODEVICE);
5565 if (pci_dma_mapping_error(tp->pdev, mapping)) {
5570 tnapi->tx_buffers[entry].skb = skb;
5571 pci_unmap_addr_set(&tnapi->tx_buffers[entry], mapping, mapping);
5573 if ((tp->tg3_flags3 & TG3_FLG3_USE_JUMBO_BDFLAG) &&
5574 !mss && skb->len > ETH_DATA_LEN)
5575 base_flags |= TXD_FLAG_JMB_PKT;
5577 tg3_set_txd(tnapi, entry, mapping, len, base_flags,
5578 (skb_shinfo(skb)->nr_frags == 0) | (mss << 1));
5580 entry = NEXT_TX(entry);
5582 /* Now loop through additional data fragments, and queue them. */
5583 if (skb_shinfo(skb)->nr_frags > 0) {
5584 last = skb_shinfo(skb)->nr_frags - 1;
5585 for (i = 0; i <= last; i++) {
5586 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
5589 mapping = pci_map_page(tp->pdev,
5592 len, PCI_DMA_TODEVICE);
5593 if (pci_dma_mapping_error(tp->pdev, mapping))
5596 tnapi->tx_buffers[entry].skb = NULL;
5597 pci_unmap_addr_set(&tnapi->tx_buffers[entry], mapping,
5600 tg3_set_txd(tnapi, entry, mapping, len,
5601 base_flags, (i == last) | (mss << 1));
5603 entry = NEXT_TX(entry);
5607 /* Packets are ready, update Tx producer idx local and on card. */
5608 tw32_tx_mbox(tnapi->prodmbox, entry);
5610 tnapi->tx_prod = entry;
5611 if (unlikely(tg3_tx_avail(tnapi) <= (MAX_SKB_FRAGS + 1))) {
5612 netif_tx_stop_queue(txq);
5613 if (tg3_tx_avail(tnapi) > TG3_TX_WAKEUP_THRESH(tnapi))
5614 netif_tx_wake_queue(txq);
5620 return NETDEV_TX_OK;
5624 entry = tnapi->tx_prod;
5625 tnapi->tx_buffers[entry].skb = NULL;
5626 pci_unmap_single(tp->pdev,
5627 pci_unmap_addr(&tnapi->tx_buffers[entry], mapping),
5630 for (i = 0; i <= last; i++) {
5631 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
5632 entry = NEXT_TX(entry);
5634 pci_unmap_page(tp->pdev,
5635 pci_unmap_addr(&tnapi->tx_buffers[entry],
5637 frag->size, PCI_DMA_TODEVICE);
5641 return NETDEV_TX_OK;
5644 static netdev_tx_t tg3_start_xmit_dma_bug(struct sk_buff *,
5645 struct net_device *);
5647 /* Use GSO to workaround a rare TSO bug that may be triggered when the
5648 * TSO header is greater than 80 bytes.
5650 static int tg3_tso_bug(struct tg3 *tp, struct sk_buff *skb)
5652 struct sk_buff *segs, *nskb;
5653 u32 frag_cnt_est = skb_shinfo(skb)->gso_segs * 3;
5655 /* Estimate the number of fragments in the worst case */
5656 if (unlikely(tg3_tx_avail(&tp->napi[0]) <= frag_cnt_est)) {
5657 netif_stop_queue(tp->dev);
5658 if (tg3_tx_avail(&tp->napi[0]) <= frag_cnt_est)
5659 return NETDEV_TX_BUSY;
5661 netif_wake_queue(tp->dev);
5664 segs = skb_gso_segment(skb, tp->dev->features & ~NETIF_F_TSO);
5666 goto tg3_tso_bug_end;
5672 tg3_start_xmit_dma_bug(nskb, tp->dev);
5678 return NETDEV_TX_OK;
5681 /* hard_start_xmit for devices that have the 4G bug and/or 40-bit bug and
5682 * support TG3_FLG2_HW_TSO_1 or firmware TSO only.
5684 static netdev_tx_t tg3_start_xmit_dma_bug(struct sk_buff *skb,
5685 struct net_device *dev)
5687 struct tg3 *tp = netdev_priv(dev);
5688 u32 len, entry, base_flags, mss;
5689 int would_hit_hwbug;
5691 struct tg3_napi *tnapi;
5692 struct netdev_queue *txq;
5693 unsigned int i, last;
5696 txq = netdev_get_tx_queue(dev, skb_get_queue_mapping(skb));
5697 tnapi = &tp->napi[skb_get_queue_mapping(skb)];
5698 if (tp->tg3_flags3 & TG3_FLG3_ENABLE_TSS)
5701 /* We are running in BH disabled context with netif_tx_lock
5702 * and TX reclaim runs via tp->napi.poll inside of a software
5703 * interrupt. Furthermore, IRQ processing runs lockless so we have
5704 * no IRQ context deadlocks to worry about either. Rejoice!
5706 if (unlikely(tg3_tx_avail(tnapi) <= (skb_shinfo(skb)->nr_frags + 1))) {
5707 if (!netif_tx_queue_stopped(txq)) {
5708 netif_tx_stop_queue(txq);
5710 /* This is a hard error, log it. */
5711 printk(KERN_ERR PFX "%s: BUG! Tx Ring full when "
5712 "queue awake!\n", dev->name);
5714 return NETDEV_TX_BUSY;
5717 entry = tnapi->tx_prod;
5719 if (skb->ip_summed == CHECKSUM_PARTIAL)
5720 base_flags |= TXD_FLAG_TCPUDP_CSUM;
5722 if ((mss = skb_shinfo(skb)->gso_size) != 0) {
5724 u32 tcp_opt_len, ip_tcp_len, hdr_len;
5726 if (skb_header_cloned(skb) &&
5727 pskb_expand_head(skb, 0, 0, GFP_ATOMIC)) {
5732 tcp_opt_len = tcp_optlen(skb);
5733 ip_tcp_len = ip_hdrlen(skb) + sizeof(struct tcphdr);
5735 hdr_len = ip_tcp_len + tcp_opt_len;
5736 if (unlikely((ETH_HLEN + hdr_len) > 80) &&
5737 (tp->tg3_flags2 & TG3_FLG2_TSO_BUG))
5738 return (tg3_tso_bug(tp, skb));
5740 base_flags |= (TXD_FLAG_CPU_PRE_DMA |
5741 TXD_FLAG_CPU_POST_DMA);
5745 iph->tot_len = htons(mss + hdr_len);
5746 if (tp->tg3_flags2 & TG3_FLG2_HW_TSO) {
5747 tcp_hdr(skb)->check = 0;
5748 base_flags &= ~TXD_FLAG_TCPUDP_CSUM;
5750 tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr,
5755 if (tp->tg3_flags2 & TG3_FLG2_HW_TSO_3) {
5756 mss |= (hdr_len & 0xc) << 12;