2 * tg3.c: Broadcom Tigon3 ethernet driver.
4 * Copyright (C) 2001, 2002, 2003, 2004 David S. Miller (davem@redhat.com)
5 * Copyright (C) 2001, 2002, 2003 Jeff Garzik (jgarzik@pobox.com)
6 * Copyright (C) 2004 Sun Microsystems Inc.
7 * Copyright (C) 2005-2010 Broadcom Corporation.
10 * Derived from proprietary unpublished source code,
11 * Copyright (C) 2000-2003 Broadcom Corporation.
13 * Permission is hereby granted for the distribution of this firmware
14 * data in hexadecimal or equivalent format, provided this copyright
15 * notice is accompanying it.
19 #include <linux/module.h>
20 #include <linux/moduleparam.h>
21 #include <linux/kernel.h>
22 #include <linux/types.h>
23 #include <linux/compiler.h>
24 #include <linux/slab.h>
25 #include <linux/delay.h>
27 #include <linux/init.h>
28 #include <linux/ioport.h>
29 #include <linux/pci.h>
30 #include <linux/netdevice.h>
31 #include <linux/etherdevice.h>
32 #include <linux/skbuff.h>
33 #include <linux/ethtool.h>
34 #include <linux/mii.h>
35 #include <linux/phy.h>
36 #include <linux/brcmphy.h>
37 #include <linux/if_vlan.h>
39 #include <linux/tcp.h>
40 #include <linux/workqueue.h>
41 #include <linux/prefetch.h>
42 #include <linux/dma-mapping.h>
43 #include <linux/firmware.h>
45 #include <net/checksum.h>
48 #include <asm/system.h>
50 #include <asm/byteorder.h>
51 #include <asm/uaccess.h>
54 #include <asm/idprom.h>
61 #if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE)
62 #define TG3_VLAN_TAG_USED 1
64 #define TG3_VLAN_TAG_USED 0
69 #define DRV_MODULE_NAME "tg3"
70 #define PFX DRV_MODULE_NAME ": "
71 #define DRV_MODULE_VERSION "3.108"
72 #define DRV_MODULE_RELDATE "February 17, 2010"
74 #define TG3_DEF_MAC_MODE 0
75 #define TG3_DEF_RX_MODE 0
76 #define TG3_DEF_TX_MODE 0
77 #define TG3_DEF_MSG_ENABLE \
87 /* length of time before we decide the hardware is borked,
88 * and dev->tx_timeout() should be called to fix the problem
90 #define TG3_TX_TIMEOUT (5 * HZ)
92 /* hardware minimum and maximum for a single frame's data payload */
93 #define TG3_MIN_MTU 60
94 #define TG3_MAX_MTU(tp) \
95 ((tp->tg3_flags & TG3_FLAG_JUMBO_CAPABLE) ? 9000 : 1500)
97 /* These numbers seem to be hard coded in the NIC firmware somehow.
98 * You can't change the ring sizes, but you can change where you place
99 * them in the NIC onboard memory.
101 #define TG3_RX_RING_SIZE 512
102 #define TG3_DEF_RX_RING_PENDING 200
103 #define TG3_RX_JUMBO_RING_SIZE 256
104 #define TG3_DEF_RX_JUMBO_RING_PENDING 100
105 #define TG3_RSS_INDIR_TBL_SIZE 128
107 /* Do not place this n-ring entries value into the tp struct itself,
108 * we really want to expose these constants to GCC so that modulo et
109 * al. operations are done with shifts and masks instead of with
110 * hw multiply/modulo instructions. Another solution would be to
111 * replace things like '% foo' with '& (foo - 1)'.
113 #define TG3_RX_RCB_RING_SIZE(tp) \
114 (((tp->tg3_flags & TG3_FLAG_JUMBO_CAPABLE) && \
115 !(tp->tg3_flags2 & TG3_FLG2_5780_CLASS)) ? 1024 : 512)
117 #define TG3_TX_RING_SIZE 512
118 #define TG3_DEF_TX_RING_PENDING (TG3_TX_RING_SIZE - 1)
120 #define TG3_RX_RING_BYTES (sizeof(struct tg3_rx_buffer_desc) * \
122 #define TG3_RX_JUMBO_RING_BYTES (sizeof(struct tg3_ext_rx_buffer_desc) * \
123 TG3_RX_JUMBO_RING_SIZE)
124 #define TG3_RX_RCB_RING_BYTES(tp) (sizeof(struct tg3_rx_buffer_desc) * \
125 TG3_RX_RCB_RING_SIZE(tp))
126 #define TG3_TX_RING_BYTES (sizeof(struct tg3_tx_buffer_desc) * \
128 #define NEXT_TX(N) (((N) + 1) & (TG3_TX_RING_SIZE - 1))
130 #define TG3_DMA_BYTE_ENAB 64
132 #define TG3_RX_STD_DMA_SZ 1536
133 #define TG3_RX_JMB_DMA_SZ 9046
135 #define TG3_RX_DMA_TO_MAP_SZ(x) ((x) + TG3_DMA_BYTE_ENAB)
137 #define TG3_RX_STD_MAP_SZ TG3_RX_DMA_TO_MAP_SZ(TG3_RX_STD_DMA_SZ)
138 #define TG3_RX_JMB_MAP_SZ TG3_RX_DMA_TO_MAP_SZ(TG3_RX_JMB_DMA_SZ)
140 #define TG3_RX_STD_BUFF_RING_SIZE \
141 (sizeof(struct ring_info) * TG3_RX_RING_SIZE)
143 #define TG3_RX_JMB_BUFF_RING_SIZE \
144 (sizeof(struct ring_info) * TG3_RX_JUMBO_RING_SIZE)
146 /* minimum number of free TX descriptors required to wake up TX process */
147 #define TG3_TX_WAKEUP_THRESH(tnapi) ((tnapi)->tx_pending / 4)
149 #define TG3_RAW_IP_ALIGN 2
151 /* number of ETHTOOL_GSTATS u64's */
152 #define TG3_NUM_STATS (sizeof(struct tg3_ethtool_stats)/sizeof(u64))
154 #define TG3_NUM_TEST 6
156 #define FIRMWARE_TG3 "tigon/tg3.bin"
157 #define FIRMWARE_TG3TSO "tigon/tg3_tso.bin"
158 #define FIRMWARE_TG3TSO5 "tigon/tg3_tso5.bin"
160 static char version[] __devinitdata =
161 DRV_MODULE_NAME ".c:v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
163 MODULE_AUTHOR("David S. Miller (davem@redhat.com) and Jeff Garzik (jgarzik@pobox.com)");
164 MODULE_DESCRIPTION("Broadcom Tigon3 ethernet driver");
165 MODULE_LICENSE("GPL");
166 MODULE_VERSION(DRV_MODULE_VERSION);
167 MODULE_FIRMWARE(FIRMWARE_TG3);
168 MODULE_FIRMWARE(FIRMWARE_TG3TSO);
169 MODULE_FIRMWARE(FIRMWARE_TG3TSO5);
171 #define TG3_RSS_MIN_NUM_MSIX_VECS 2
173 static int tg3_debug = -1; /* -1 == use TG3_DEF_MSG_ENABLE as value */
174 module_param(tg3_debug, int, 0);
175 MODULE_PARM_DESC(tg3_debug, "Tigon3 bitmapped debugging message enable value");
177 static DEFINE_PCI_DEVICE_TABLE(tg3_pci_tbl) = {
178 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5700)},
179 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5701)},
180 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702)},
181 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703)},
182 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704)},
183 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702FE)},
184 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705)},
185 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705_2)},
186 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705M)},
187 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705M_2)},
188 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702X)},
189 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703X)},
190 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704S)},
191 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702A3)},
192 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703A3)},
193 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5782)},
194 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5788)},
195 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5789)},
196 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5901)},
197 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5901_2)},
198 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704S_2)},
199 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705F)},
200 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5720)},
201 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5721)},
202 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5722)},
203 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5750)},
204 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751)},
205 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5750M)},
206 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751M)},
207 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751F)},
208 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5752)},
209 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5752M)},
210 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753)},
211 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753M)},
212 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753F)},
213 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5754)},
214 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5754M)},
215 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5755)},
216 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5755M)},
217 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5756)},
218 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5786)},
219 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787)},
220 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787M)},
221 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787F)},
222 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5714)},
223 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5714S)},
224 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5715)},
225 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5715S)},
226 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5780)},
227 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5780S)},
228 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5781)},
229 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5906)},
230 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5906M)},
231 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5784)},
232 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5764)},
233 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5723)},
234 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5761)},
235 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5761E)},
236 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5761S)},
237 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5761SE)},
238 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5785_G)},
239 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5785_F)},
240 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57780)},
241 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57760)},
242 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57790)},
243 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57788)},
244 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5717)},
245 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5718)},
246 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5724)},
247 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57781)},
248 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57785)},
249 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57761)},
250 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57765)},
251 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57791)},
252 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57795)},
253 {PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9DXX)},
254 {PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9MXX)},
255 {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1000)},
256 {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1001)},
257 {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1003)},
258 {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC9100)},
259 {PCI_DEVICE(PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_TIGON3)},
263 MODULE_DEVICE_TABLE(pci, tg3_pci_tbl);
265 static const struct {
266 const char string[ETH_GSTRING_LEN];
267 } ethtool_stats_keys[TG3_NUM_STATS] = {
270 { "rx_ucast_packets" },
271 { "rx_mcast_packets" },
272 { "rx_bcast_packets" },
274 { "rx_align_errors" },
275 { "rx_xon_pause_rcvd" },
276 { "rx_xoff_pause_rcvd" },
277 { "rx_mac_ctrl_rcvd" },
278 { "rx_xoff_entered" },
279 { "rx_frame_too_long_errors" },
281 { "rx_undersize_packets" },
282 { "rx_in_length_errors" },
283 { "rx_out_length_errors" },
284 { "rx_64_or_less_octet_packets" },
285 { "rx_65_to_127_octet_packets" },
286 { "rx_128_to_255_octet_packets" },
287 { "rx_256_to_511_octet_packets" },
288 { "rx_512_to_1023_octet_packets" },
289 { "rx_1024_to_1522_octet_packets" },
290 { "rx_1523_to_2047_octet_packets" },
291 { "rx_2048_to_4095_octet_packets" },
292 { "rx_4096_to_8191_octet_packets" },
293 { "rx_8192_to_9022_octet_packets" },
300 { "tx_flow_control" },
302 { "tx_single_collisions" },
303 { "tx_mult_collisions" },
305 { "tx_excessive_collisions" },
306 { "tx_late_collisions" },
307 { "tx_collide_2times" },
308 { "tx_collide_3times" },
309 { "tx_collide_4times" },
310 { "tx_collide_5times" },
311 { "tx_collide_6times" },
312 { "tx_collide_7times" },
313 { "tx_collide_8times" },
314 { "tx_collide_9times" },
315 { "tx_collide_10times" },
316 { "tx_collide_11times" },
317 { "tx_collide_12times" },
318 { "tx_collide_13times" },
319 { "tx_collide_14times" },
320 { "tx_collide_15times" },
321 { "tx_ucast_packets" },
322 { "tx_mcast_packets" },
323 { "tx_bcast_packets" },
324 { "tx_carrier_sense_errors" },
328 { "dma_writeq_full" },
329 { "dma_write_prioq_full" },
333 { "rx_threshold_hit" },
335 { "dma_readq_full" },
336 { "dma_read_prioq_full" },
337 { "tx_comp_queue_full" },
339 { "ring_set_send_prod_index" },
340 { "ring_status_update" },
342 { "nic_avoided_irqs" },
343 { "nic_tx_threshold_hit" }
346 static const struct {
347 const char string[ETH_GSTRING_LEN];
348 } ethtool_test_keys[TG3_NUM_TEST] = {
349 { "nvram test (online) " },
350 { "link test (online) " },
351 { "register test (offline)" },
352 { "memory test (offline)" },
353 { "loopback test (offline)" },
354 { "interrupt test (offline)" },
357 static void tg3_write32(struct tg3 *tp, u32 off, u32 val)
359 writel(val, tp->regs + off);
362 static u32 tg3_read32(struct tg3 *tp, u32 off)
364 return (readl(tp->regs + off));
367 static void tg3_ape_write32(struct tg3 *tp, u32 off, u32 val)
369 writel(val, tp->aperegs + off);
372 static u32 tg3_ape_read32(struct tg3 *tp, u32 off)
374 return (readl(tp->aperegs + off));
377 static void tg3_write_indirect_reg32(struct tg3 *tp, u32 off, u32 val)
381 spin_lock_irqsave(&tp->indirect_lock, flags);
382 pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off);
383 pci_write_config_dword(tp->pdev, TG3PCI_REG_DATA, val);
384 spin_unlock_irqrestore(&tp->indirect_lock, flags);
387 static void tg3_write_flush_reg32(struct tg3 *tp, u32 off, u32 val)
389 writel(val, tp->regs + off);
390 readl(tp->regs + off);
393 static u32 tg3_read_indirect_reg32(struct tg3 *tp, u32 off)
398 spin_lock_irqsave(&tp->indirect_lock, flags);
399 pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off);
400 pci_read_config_dword(tp->pdev, TG3PCI_REG_DATA, &val);
401 spin_unlock_irqrestore(&tp->indirect_lock, flags);
405 static void tg3_write_indirect_mbox(struct tg3 *tp, u32 off, u32 val)
409 if (off == (MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW)) {
410 pci_write_config_dword(tp->pdev, TG3PCI_RCV_RET_RING_CON_IDX +
411 TG3_64BIT_REG_LOW, val);
414 if (off == TG3_RX_STD_PROD_IDX_REG) {
415 pci_write_config_dword(tp->pdev, TG3PCI_STD_RING_PROD_IDX +
416 TG3_64BIT_REG_LOW, val);
420 spin_lock_irqsave(&tp->indirect_lock, flags);
421 pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off + 0x5600);
422 pci_write_config_dword(tp->pdev, TG3PCI_REG_DATA, val);
423 spin_unlock_irqrestore(&tp->indirect_lock, flags);
425 /* In indirect mode when disabling interrupts, we also need
426 * to clear the interrupt bit in the GRC local ctrl register.
428 if ((off == (MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW)) &&
430 pci_write_config_dword(tp->pdev, TG3PCI_MISC_LOCAL_CTRL,
431 tp->grc_local_ctrl|GRC_LCLCTRL_CLEARINT);
435 static u32 tg3_read_indirect_mbox(struct tg3 *tp, u32 off)
440 spin_lock_irqsave(&tp->indirect_lock, flags);
441 pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off + 0x5600);
442 pci_read_config_dword(tp->pdev, TG3PCI_REG_DATA, &val);
443 spin_unlock_irqrestore(&tp->indirect_lock, flags);
447 /* usec_wait specifies the wait time in usec when writing to certain registers
448 * where it is unsafe to read back the register without some delay.
449 * GRC_LOCAL_CTRL is one example if the GPIOs are toggled to switch power.
450 * TG3PCI_CLOCK_CTRL is another example if the clock frequencies are changed.
452 static void _tw32_flush(struct tg3 *tp, u32 off, u32 val, u32 usec_wait)
454 if ((tp->tg3_flags & TG3_FLAG_PCIX_TARGET_HWBUG) ||
455 (tp->tg3_flags2 & TG3_FLG2_ICH_WORKAROUND))
456 /* Non-posted methods */
457 tp->write32(tp, off, val);
460 tg3_write32(tp, off, val);
465 /* Wait again after the read for the posted method to guarantee that
466 * the wait time is met.
472 static inline void tw32_mailbox_flush(struct tg3 *tp, u32 off, u32 val)
474 tp->write32_mbox(tp, off, val);
475 if (!(tp->tg3_flags & TG3_FLAG_MBOX_WRITE_REORDER) &&
476 !(tp->tg3_flags2 & TG3_FLG2_ICH_WORKAROUND))
477 tp->read32_mbox(tp, off);
480 static void tg3_write32_tx_mbox(struct tg3 *tp, u32 off, u32 val)
482 void __iomem *mbox = tp->regs + off;
484 if (tp->tg3_flags & TG3_FLAG_TXD_MBOX_HWBUG)
486 if (tp->tg3_flags & TG3_FLAG_MBOX_WRITE_REORDER)
490 static u32 tg3_read32_mbox_5906(struct tg3 *tp, u32 off)
492 return (readl(tp->regs + off + GRCMBOX_BASE));
495 static void tg3_write32_mbox_5906(struct tg3 *tp, u32 off, u32 val)
497 writel(val, tp->regs + off + GRCMBOX_BASE);
500 #define tw32_mailbox(reg, val) tp->write32_mbox(tp, reg, val)
501 #define tw32_mailbox_f(reg, val) tw32_mailbox_flush(tp, (reg), (val))
502 #define tw32_rx_mbox(reg, val) tp->write32_rx_mbox(tp, reg, val)
503 #define tw32_tx_mbox(reg, val) tp->write32_tx_mbox(tp, reg, val)
504 #define tr32_mailbox(reg) tp->read32_mbox(tp, reg)
506 #define tw32(reg,val) tp->write32(tp, reg, val)
507 #define tw32_f(reg,val) _tw32_flush(tp,(reg),(val), 0)
508 #define tw32_wait_f(reg,val,us) _tw32_flush(tp,(reg),(val), (us))
509 #define tr32(reg) tp->read32(tp, reg)
511 static void tg3_write_mem(struct tg3 *tp, u32 off, u32 val)
515 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) &&
516 (off >= NIC_SRAM_STATS_BLK) && (off < NIC_SRAM_TX_BUFFER_DESC))
519 spin_lock_irqsave(&tp->indirect_lock, flags);
520 if (tp->tg3_flags & TG3_FLAG_SRAM_USE_CONFIG) {
521 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off);
522 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
524 /* Always leave this as zero. */
525 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
527 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, off);
528 tw32_f(TG3PCI_MEM_WIN_DATA, val);
530 /* Always leave this as zero. */
531 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, 0);
533 spin_unlock_irqrestore(&tp->indirect_lock, flags);
536 static void tg3_read_mem(struct tg3 *tp, u32 off, u32 *val)
540 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) &&
541 (off >= NIC_SRAM_STATS_BLK) && (off < NIC_SRAM_TX_BUFFER_DESC)) {
546 spin_lock_irqsave(&tp->indirect_lock, flags);
547 if (tp->tg3_flags & TG3_FLAG_SRAM_USE_CONFIG) {
548 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off);
549 pci_read_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
551 /* Always leave this as zero. */
552 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
554 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, off);
555 *val = tr32(TG3PCI_MEM_WIN_DATA);
557 /* Always leave this as zero. */
558 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, 0);
560 spin_unlock_irqrestore(&tp->indirect_lock, flags);
563 static void tg3_ape_lock_init(struct tg3 *tp)
567 /* Make sure the driver hasn't any stale locks. */
568 for (i = 0; i < 8; i++)
569 tg3_ape_write32(tp, TG3_APE_LOCK_GRANT + 4 * i,
570 APE_LOCK_GRANT_DRIVER);
573 static int tg3_ape_lock(struct tg3 *tp, int locknum)
579 if (!(tp->tg3_flags3 & TG3_FLG3_ENABLE_APE))
583 case TG3_APE_LOCK_GRC:
584 case TG3_APE_LOCK_MEM:
592 tg3_ape_write32(tp, TG3_APE_LOCK_REQ + off, APE_LOCK_REQ_DRIVER);
594 /* Wait for up to 1 millisecond to acquire lock. */
595 for (i = 0; i < 100; i++) {
596 status = tg3_ape_read32(tp, TG3_APE_LOCK_GRANT + off);
597 if (status == APE_LOCK_GRANT_DRIVER)
602 if (status != APE_LOCK_GRANT_DRIVER) {
603 /* Revoke the lock request. */
604 tg3_ape_write32(tp, TG3_APE_LOCK_GRANT + off,
605 APE_LOCK_GRANT_DRIVER);
613 static void tg3_ape_unlock(struct tg3 *tp, int locknum)
617 if (!(tp->tg3_flags3 & TG3_FLG3_ENABLE_APE))
621 case TG3_APE_LOCK_GRC:
622 case TG3_APE_LOCK_MEM:
629 tg3_ape_write32(tp, TG3_APE_LOCK_GRANT + off, APE_LOCK_GRANT_DRIVER);
632 static void tg3_disable_ints(struct tg3 *tp)
636 tw32(TG3PCI_MISC_HOST_CTRL,
637 (tp->misc_host_ctrl | MISC_HOST_CTRL_MASK_PCI_INT));
638 for (i = 0; i < tp->irq_max; i++)
639 tw32_mailbox_f(tp->napi[i].int_mbox, 0x00000001);
642 static void tg3_enable_ints(struct tg3 *tp)
649 tw32(TG3PCI_MISC_HOST_CTRL,
650 (tp->misc_host_ctrl & ~MISC_HOST_CTRL_MASK_PCI_INT));
652 tp->coal_now = tp->coalesce_mode | HOSTCC_MODE_ENABLE;
653 for (i = 0; i < tp->irq_cnt; i++) {
654 struct tg3_napi *tnapi = &tp->napi[i];
655 tw32_mailbox_f(tnapi->int_mbox, tnapi->last_tag << 24);
656 if (tp->tg3_flags2 & TG3_FLG2_1SHOT_MSI)
657 tw32_mailbox_f(tnapi->int_mbox, tnapi->last_tag << 24);
659 tp->coal_now |= tnapi->coal_now;
662 /* Force an initial interrupt */
663 if (!(tp->tg3_flags & TG3_FLAG_TAGGED_STATUS) &&
664 (tp->napi[0].hw_status->status & SD_STATUS_UPDATED))
665 tw32(GRC_LOCAL_CTRL, tp->grc_local_ctrl | GRC_LCLCTRL_SETINT);
667 tw32(HOSTCC_MODE, tp->coal_now);
669 tp->coal_now &= ~(tp->napi[0].coal_now | tp->napi[1].coal_now);
672 static inline unsigned int tg3_has_work(struct tg3_napi *tnapi)
674 struct tg3 *tp = tnapi->tp;
675 struct tg3_hw_status *sblk = tnapi->hw_status;
676 unsigned int work_exists = 0;
678 /* check for phy events */
679 if (!(tp->tg3_flags &
680 (TG3_FLAG_USE_LINKCHG_REG |
681 TG3_FLAG_POLL_SERDES))) {
682 if (sblk->status & SD_STATUS_LINK_CHG)
685 /* check for RX/TX work to do */
686 if (sblk->idx[0].tx_consumer != tnapi->tx_cons ||
687 *(tnapi->rx_rcb_prod_idx) != tnapi->rx_rcb_ptr)
694 * similar to tg3_enable_ints, but it accurately determines whether there
695 * is new work pending and can return without flushing the PIO write
696 * which reenables interrupts
698 static void tg3_int_reenable(struct tg3_napi *tnapi)
700 struct tg3 *tp = tnapi->tp;
702 tw32_mailbox(tnapi->int_mbox, tnapi->last_tag << 24);
705 /* When doing tagged status, this work check is unnecessary.
706 * The last_tag we write above tells the chip which piece of
707 * work we've completed.
709 if (!(tp->tg3_flags & TG3_FLAG_TAGGED_STATUS) &&
711 tw32(HOSTCC_MODE, tp->coalesce_mode |
712 HOSTCC_MODE_ENABLE | tnapi->coal_now);
715 static void tg3_napi_disable(struct tg3 *tp)
719 for (i = tp->irq_cnt - 1; i >= 0; i--)
720 napi_disable(&tp->napi[i].napi);
723 static void tg3_napi_enable(struct tg3 *tp)
727 for (i = 0; i < tp->irq_cnt; i++)
728 napi_enable(&tp->napi[i].napi);
731 static inline void tg3_netif_stop(struct tg3 *tp)
733 tp->dev->trans_start = jiffies; /* prevent tx timeout */
734 tg3_napi_disable(tp);
735 netif_tx_disable(tp->dev);
738 static inline void tg3_netif_start(struct tg3 *tp)
740 /* NOTE: unconditional netif_tx_wake_all_queues is only
741 * appropriate so long as all callers are assured to
742 * have free tx slots (such as after tg3_init_hw)
744 netif_tx_wake_all_queues(tp->dev);
747 tp->napi[0].hw_status->status |= SD_STATUS_UPDATED;
751 static void tg3_switch_clocks(struct tg3 *tp)
756 if ((tp->tg3_flags & TG3_FLAG_CPMU_PRESENT) ||
757 (tp->tg3_flags2 & TG3_FLG2_5780_CLASS))
760 clock_ctrl = tr32(TG3PCI_CLOCK_CTRL);
762 orig_clock_ctrl = clock_ctrl;
763 clock_ctrl &= (CLOCK_CTRL_FORCE_CLKRUN |
764 CLOCK_CTRL_CLKRUN_OENABLE |
766 tp->pci_clock_ctrl = clock_ctrl;
768 if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
769 if (orig_clock_ctrl & CLOCK_CTRL_625_CORE) {
770 tw32_wait_f(TG3PCI_CLOCK_CTRL,
771 clock_ctrl | CLOCK_CTRL_625_CORE, 40);
773 } else if ((orig_clock_ctrl & CLOCK_CTRL_44MHZ_CORE) != 0) {
774 tw32_wait_f(TG3PCI_CLOCK_CTRL,
776 (CLOCK_CTRL_44MHZ_CORE | CLOCK_CTRL_ALTCLK),
778 tw32_wait_f(TG3PCI_CLOCK_CTRL,
779 clock_ctrl | (CLOCK_CTRL_ALTCLK),
782 tw32_wait_f(TG3PCI_CLOCK_CTRL, clock_ctrl, 40);
785 #define PHY_BUSY_LOOPS 5000
787 static int tg3_readphy(struct tg3 *tp, int reg, u32 *val)
793 if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
795 (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
801 frame_val = ((tp->phy_addr << MI_COM_PHY_ADDR_SHIFT) &
802 MI_COM_PHY_ADDR_MASK);
803 frame_val |= ((reg << MI_COM_REG_ADDR_SHIFT) &
804 MI_COM_REG_ADDR_MASK);
805 frame_val |= (MI_COM_CMD_READ | MI_COM_START);
807 tw32_f(MAC_MI_COM, frame_val);
809 loops = PHY_BUSY_LOOPS;
812 frame_val = tr32(MAC_MI_COM);
814 if ((frame_val & MI_COM_BUSY) == 0) {
816 frame_val = tr32(MAC_MI_COM);
824 *val = frame_val & MI_COM_DATA_MASK;
828 if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
829 tw32_f(MAC_MI_MODE, tp->mi_mode);
836 static int tg3_writephy(struct tg3 *tp, int reg, u32 val)
842 if ((tp->tg3_flags3 & TG3_FLG3_PHY_IS_FET) &&
843 (reg == MII_TG3_CTRL || reg == MII_TG3_AUX_CTRL))
846 if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
848 (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
852 frame_val = ((tp->phy_addr << MI_COM_PHY_ADDR_SHIFT) &
853 MI_COM_PHY_ADDR_MASK);
854 frame_val |= ((reg << MI_COM_REG_ADDR_SHIFT) &
855 MI_COM_REG_ADDR_MASK);
856 frame_val |= (val & MI_COM_DATA_MASK);
857 frame_val |= (MI_COM_CMD_WRITE | MI_COM_START);
859 tw32_f(MAC_MI_COM, frame_val);
861 loops = PHY_BUSY_LOOPS;
864 frame_val = tr32(MAC_MI_COM);
865 if ((frame_val & MI_COM_BUSY) == 0) {
867 frame_val = tr32(MAC_MI_COM);
877 if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
878 tw32_f(MAC_MI_MODE, tp->mi_mode);
885 static int tg3_bmcr_reset(struct tg3 *tp)
890 /* OK, reset it, and poll the BMCR_RESET bit until it
891 * clears or we time out.
893 phy_control = BMCR_RESET;
894 err = tg3_writephy(tp, MII_BMCR, phy_control);
900 err = tg3_readphy(tp, MII_BMCR, &phy_control);
904 if ((phy_control & BMCR_RESET) == 0) {
916 static int tg3_mdio_read(struct mii_bus *bp, int mii_id, int reg)
918 struct tg3 *tp = bp->priv;
921 spin_lock_bh(&tp->lock);
923 if (tg3_readphy(tp, reg, &val))
926 spin_unlock_bh(&tp->lock);
931 static int tg3_mdio_write(struct mii_bus *bp, int mii_id, int reg, u16 val)
933 struct tg3 *tp = bp->priv;
936 spin_lock_bh(&tp->lock);
938 if (tg3_writephy(tp, reg, val))
941 spin_unlock_bh(&tp->lock);
946 static int tg3_mdio_reset(struct mii_bus *bp)
951 static void tg3_mdio_config_5785(struct tg3 *tp)
954 struct phy_device *phydev;
956 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
957 switch (phydev->drv->phy_id & phydev->drv->phy_id_mask) {
958 case PHY_ID_BCM50610:
959 case PHY_ID_BCM50610M:
960 val = MAC_PHYCFG2_50610_LED_MODES;
962 case PHY_ID_BCMAC131:
963 val = MAC_PHYCFG2_AC131_LED_MODES;
965 case PHY_ID_RTL8211C:
966 val = MAC_PHYCFG2_RTL8211C_LED_MODES;
968 case PHY_ID_RTL8201E:
969 val = MAC_PHYCFG2_RTL8201E_LED_MODES;
975 if (phydev->interface != PHY_INTERFACE_MODE_RGMII) {
976 tw32(MAC_PHYCFG2, val);
978 val = tr32(MAC_PHYCFG1);
979 val &= ~(MAC_PHYCFG1_RGMII_INT |
980 MAC_PHYCFG1_RXCLK_TO_MASK | MAC_PHYCFG1_TXCLK_TO_MASK);
981 val |= MAC_PHYCFG1_RXCLK_TIMEOUT | MAC_PHYCFG1_TXCLK_TIMEOUT;
982 tw32(MAC_PHYCFG1, val);
987 if (!(tp->tg3_flags3 & TG3_FLG3_RGMII_INBAND_DISABLE))
988 val |= MAC_PHYCFG2_EMODE_MASK_MASK |
989 MAC_PHYCFG2_FMODE_MASK_MASK |
990 MAC_PHYCFG2_GMODE_MASK_MASK |
991 MAC_PHYCFG2_ACT_MASK_MASK |
992 MAC_PHYCFG2_QUAL_MASK_MASK |
993 MAC_PHYCFG2_INBAND_ENABLE;
995 tw32(MAC_PHYCFG2, val);
997 val = tr32(MAC_PHYCFG1);
998 val &= ~(MAC_PHYCFG1_RXCLK_TO_MASK | MAC_PHYCFG1_TXCLK_TO_MASK |
999 MAC_PHYCFG1_RGMII_EXT_RX_DEC | MAC_PHYCFG1_RGMII_SND_STAT_EN);
1000 if (!(tp->tg3_flags3 & TG3_FLG3_RGMII_INBAND_DISABLE)) {
1001 if (tp->tg3_flags3 & TG3_FLG3_RGMII_EXT_IBND_RX_EN)
1002 val |= MAC_PHYCFG1_RGMII_EXT_RX_DEC;
1003 if (tp->tg3_flags3 & TG3_FLG3_RGMII_EXT_IBND_TX_EN)
1004 val |= MAC_PHYCFG1_RGMII_SND_STAT_EN;
1006 val |= MAC_PHYCFG1_RXCLK_TIMEOUT | MAC_PHYCFG1_TXCLK_TIMEOUT |
1007 MAC_PHYCFG1_RGMII_INT | MAC_PHYCFG1_TXC_DRV;
1008 tw32(MAC_PHYCFG1, val);
1010 val = tr32(MAC_EXT_RGMII_MODE);
1011 val &= ~(MAC_RGMII_MODE_RX_INT_B |
1012 MAC_RGMII_MODE_RX_QUALITY |
1013 MAC_RGMII_MODE_RX_ACTIVITY |
1014 MAC_RGMII_MODE_RX_ENG_DET |
1015 MAC_RGMII_MODE_TX_ENABLE |
1016 MAC_RGMII_MODE_TX_LOWPWR |
1017 MAC_RGMII_MODE_TX_RESET);
1018 if (!(tp->tg3_flags3 & TG3_FLG3_RGMII_INBAND_DISABLE)) {
1019 if (tp->tg3_flags3 & TG3_FLG3_RGMII_EXT_IBND_RX_EN)
1020 val |= MAC_RGMII_MODE_RX_INT_B |
1021 MAC_RGMII_MODE_RX_QUALITY |
1022 MAC_RGMII_MODE_RX_ACTIVITY |
1023 MAC_RGMII_MODE_RX_ENG_DET;
1024 if (tp->tg3_flags3 & TG3_FLG3_RGMII_EXT_IBND_TX_EN)
1025 val |= MAC_RGMII_MODE_TX_ENABLE |
1026 MAC_RGMII_MODE_TX_LOWPWR |
1027 MAC_RGMII_MODE_TX_RESET;
1029 tw32(MAC_EXT_RGMII_MODE, val);
1032 static void tg3_mdio_start(struct tg3 *tp)
1034 tp->mi_mode &= ~MAC_MI_MODE_AUTO_POLL;
1035 tw32_f(MAC_MI_MODE, tp->mi_mode);
1038 if ((tp->tg3_flags3 & TG3_FLG3_MDIOBUS_INITED) &&
1039 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
1040 tg3_mdio_config_5785(tp);
1043 static int tg3_mdio_init(struct tg3 *tp)
1047 struct phy_device *phydev;
1049 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717) {
1050 u32 funcnum, is_serdes;
1052 funcnum = tr32(TG3_CPMU_STATUS) & TG3_CPMU_STATUS_PCIE_FUNC;
1058 if (tp->pci_chip_rev_id != CHIPREV_ID_5717_A0)
1059 is_serdes = tr32(SG_DIG_STATUS) & SG_DIG_IS_SERDES;
1061 is_serdes = tr32(TG3_CPMU_PHY_STRAP) &
1062 TG3_CPMU_PHY_STRAP_IS_SERDES;
1066 tp->phy_addr = TG3_PHY_MII_ADDR;
1070 if (!(tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB) ||
1071 (tp->tg3_flags3 & TG3_FLG3_MDIOBUS_INITED))
1074 tp->mdio_bus = mdiobus_alloc();
1075 if (tp->mdio_bus == NULL)
1078 tp->mdio_bus->name = "tg3 mdio bus";
1079 snprintf(tp->mdio_bus->id, MII_BUS_ID_SIZE, "%x",
1080 (tp->pdev->bus->number << 8) | tp->pdev->devfn);
1081 tp->mdio_bus->priv = tp;
1082 tp->mdio_bus->parent = &tp->pdev->dev;
1083 tp->mdio_bus->read = &tg3_mdio_read;
1084 tp->mdio_bus->write = &tg3_mdio_write;
1085 tp->mdio_bus->reset = &tg3_mdio_reset;
1086 tp->mdio_bus->phy_mask = ~(1 << TG3_PHY_MII_ADDR);
1087 tp->mdio_bus->irq = &tp->mdio_irq[0];
1089 for (i = 0; i < PHY_MAX_ADDR; i++)
1090 tp->mdio_bus->irq[i] = PHY_POLL;
1092 /* The bus registration will look for all the PHYs on the mdio bus.
1093 * Unfortunately, it does not ensure the PHY is powered up before
1094 * accessing the PHY ID registers. A chip reset is the
1095 * quickest way to bring the device back to an operational state..
1097 if (tg3_readphy(tp, MII_BMCR, ®) || (reg & BMCR_PDOWN))
1100 i = mdiobus_register(tp->mdio_bus);
1102 printk(KERN_WARNING "%s: mdiobus_reg failed (0x%x)\n",
1104 mdiobus_free(tp->mdio_bus);
1108 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
1110 if (!phydev || !phydev->drv) {
1111 printk(KERN_WARNING "%s: No PHY devices\n", tp->dev->name);
1112 mdiobus_unregister(tp->mdio_bus);
1113 mdiobus_free(tp->mdio_bus);
1117 switch (phydev->drv->phy_id & phydev->drv->phy_id_mask) {
1118 case PHY_ID_BCM57780:
1119 phydev->interface = PHY_INTERFACE_MODE_GMII;
1120 phydev->dev_flags |= PHY_BRCM_AUTO_PWRDWN_ENABLE;
1122 case PHY_ID_BCM50610:
1123 case PHY_ID_BCM50610M:
1124 phydev->dev_flags |= PHY_BRCM_CLEAR_RGMII_MODE |
1125 PHY_BRCM_RX_REFCLK_UNUSED |
1126 PHY_BRCM_DIS_TXCRXC_NOENRGY |
1127 PHY_BRCM_AUTO_PWRDWN_ENABLE;
1128 if (tp->tg3_flags3 & TG3_FLG3_RGMII_INBAND_DISABLE)
1129 phydev->dev_flags |= PHY_BRCM_STD_IBND_DISABLE;
1130 if (tp->tg3_flags3 & TG3_FLG3_RGMII_EXT_IBND_RX_EN)
1131 phydev->dev_flags |= PHY_BRCM_EXT_IBND_RX_ENABLE;
1132 if (tp->tg3_flags3 & TG3_FLG3_RGMII_EXT_IBND_TX_EN)
1133 phydev->dev_flags |= PHY_BRCM_EXT_IBND_TX_ENABLE;
1135 case PHY_ID_RTL8211C:
1136 phydev->interface = PHY_INTERFACE_MODE_RGMII;
1138 case PHY_ID_RTL8201E:
1139 case PHY_ID_BCMAC131:
1140 phydev->interface = PHY_INTERFACE_MODE_MII;
1141 phydev->dev_flags |= PHY_BRCM_AUTO_PWRDWN_ENABLE;
1142 tp->tg3_flags3 |= TG3_FLG3_PHY_IS_FET;
1146 tp->tg3_flags3 |= TG3_FLG3_MDIOBUS_INITED;
1148 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
1149 tg3_mdio_config_5785(tp);
1154 static void tg3_mdio_fini(struct tg3 *tp)
1156 if (tp->tg3_flags3 & TG3_FLG3_MDIOBUS_INITED) {
1157 tp->tg3_flags3 &= ~TG3_FLG3_MDIOBUS_INITED;
1158 mdiobus_unregister(tp->mdio_bus);
1159 mdiobus_free(tp->mdio_bus);
1163 /* tp->lock is held. */
1164 static inline void tg3_generate_fw_event(struct tg3 *tp)
1168 val = tr32(GRC_RX_CPU_EVENT);
1169 val |= GRC_RX_CPU_DRIVER_EVENT;
1170 tw32_f(GRC_RX_CPU_EVENT, val);
1172 tp->last_event_jiffies = jiffies;
1175 #define TG3_FW_EVENT_TIMEOUT_USEC 2500
1177 /* tp->lock is held. */
1178 static void tg3_wait_for_event_ack(struct tg3 *tp)
1181 unsigned int delay_cnt;
1184 /* If enough time has passed, no wait is necessary. */
1185 time_remain = (long)(tp->last_event_jiffies + 1 +
1186 usecs_to_jiffies(TG3_FW_EVENT_TIMEOUT_USEC)) -
1188 if (time_remain < 0)
1191 /* Check if we can shorten the wait time. */
1192 delay_cnt = jiffies_to_usecs(time_remain);
1193 if (delay_cnt > TG3_FW_EVENT_TIMEOUT_USEC)
1194 delay_cnt = TG3_FW_EVENT_TIMEOUT_USEC;
1195 delay_cnt = (delay_cnt >> 3) + 1;
1197 for (i = 0; i < delay_cnt; i++) {
1198 if (!(tr32(GRC_RX_CPU_EVENT) & GRC_RX_CPU_DRIVER_EVENT))
1204 /* tp->lock is held. */
1205 static void tg3_ump_link_report(struct tg3 *tp)
1210 if (!(tp->tg3_flags2 & TG3_FLG2_5780_CLASS) ||
1211 !(tp->tg3_flags & TG3_FLAG_ENABLE_ASF))
1214 tg3_wait_for_event_ack(tp);
1216 tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX, FWCMD_NICDRV_LINK_UPDATE);
1218 tg3_write_mem(tp, NIC_SRAM_FW_CMD_LEN_MBOX, 14);
1221 if (!tg3_readphy(tp, MII_BMCR, ®))
1223 if (!tg3_readphy(tp, MII_BMSR, ®))
1224 val |= (reg & 0xffff);
1225 tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX, val);
1228 if (!tg3_readphy(tp, MII_ADVERTISE, ®))
1230 if (!tg3_readphy(tp, MII_LPA, ®))
1231 val |= (reg & 0xffff);
1232 tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 4, val);
1235 if (!(tp->tg3_flags2 & TG3_FLG2_MII_SERDES)) {
1236 if (!tg3_readphy(tp, MII_CTRL1000, ®))
1238 if (!tg3_readphy(tp, MII_STAT1000, ®))
1239 val |= (reg & 0xffff);
1241 tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 8, val);
1243 if (!tg3_readphy(tp, MII_PHYADDR, ®))
1247 tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 12, val);
1249 tg3_generate_fw_event(tp);
1252 static void tg3_link_report(struct tg3 *tp)
1254 if (!netif_carrier_ok(tp->dev)) {
1255 if (netif_msg_link(tp))
1256 printk(KERN_INFO PFX "%s: Link is down.\n",
1258 tg3_ump_link_report(tp);
1259 } else if (netif_msg_link(tp)) {
1260 printk(KERN_INFO PFX "%s: Link is up at %d Mbps, %s duplex.\n",
1262 (tp->link_config.active_speed == SPEED_1000 ?
1264 (tp->link_config.active_speed == SPEED_100 ?
1266 (tp->link_config.active_duplex == DUPLEX_FULL ?
1269 printk(KERN_INFO PFX
1270 "%s: Flow control is %s for TX and %s for RX.\n",
1272 (tp->link_config.active_flowctrl & FLOW_CTRL_TX) ?
1274 (tp->link_config.active_flowctrl & FLOW_CTRL_RX) ?
1276 tg3_ump_link_report(tp);
1280 static u16 tg3_advert_flowctrl_1000T(u8 flow_ctrl)
1284 if ((flow_ctrl & FLOW_CTRL_TX) && (flow_ctrl & FLOW_CTRL_RX))
1285 miireg = ADVERTISE_PAUSE_CAP;
1286 else if (flow_ctrl & FLOW_CTRL_TX)
1287 miireg = ADVERTISE_PAUSE_ASYM;
1288 else if (flow_ctrl & FLOW_CTRL_RX)
1289 miireg = ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM;
1296 static u16 tg3_advert_flowctrl_1000X(u8 flow_ctrl)
1300 if ((flow_ctrl & FLOW_CTRL_TX) && (flow_ctrl & FLOW_CTRL_RX))
1301 miireg = ADVERTISE_1000XPAUSE;
1302 else if (flow_ctrl & FLOW_CTRL_TX)
1303 miireg = ADVERTISE_1000XPSE_ASYM;
1304 else if (flow_ctrl & FLOW_CTRL_RX)
1305 miireg = ADVERTISE_1000XPAUSE | ADVERTISE_1000XPSE_ASYM;
1312 static u8 tg3_resolve_flowctrl_1000X(u16 lcladv, u16 rmtadv)
1316 if (lcladv & ADVERTISE_1000XPAUSE) {
1317 if (lcladv & ADVERTISE_1000XPSE_ASYM) {
1318 if (rmtadv & LPA_1000XPAUSE)
1319 cap = FLOW_CTRL_TX | FLOW_CTRL_RX;
1320 else if (rmtadv & LPA_1000XPAUSE_ASYM)
1323 if (rmtadv & LPA_1000XPAUSE)
1324 cap = FLOW_CTRL_TX | FLOW_CTRL_RX;
1326 } else if (lcladv & ADVERTISE_1000XPSE_ASYM) {
1327 if ((rmtadv & LPA_1000XPAUSE) && (rmtadv & LPA_1000XPAUSE_ASYM))
1334 static void tg3_setup_flow_control(struct tg3 *tp, u32 lcladv, u32 rmtadv)
1338 u32 old_rx_mode = tp->rx_mode;
1339 u32 old_tx_mode = tp->tx_mode;
1341 if (tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB)
1342 autoneg = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]->autoneg;
1344 autoneg = tp->link_config.autoneg;
1346 if (autoneg == AUTONEG_ENABLE &&
1347 (tp->tg3_flags & TG3_FLAG_PAUSE_AUTONEG)) {
1348 if (tp->tg3_flags2 & TG3_FLG2_ANY_SERDES)
1349 flowctrl = tg3_resolve_flowctrl_1000X(lcladv, rmtadv);
1351 flowctrl = mii_resolve_flowctrl_fdx(lcladv, rmtadv);
1353 flowctrl = tp->link_config.flowctrl;
1355 tp->link_config.active_flowctrl = flowctrl;
1357 if (flowctrl & FLOW_CTRL_RX)
1358 tp->rx_mode |= RX_MODE_FLOW_CTRL_ENABLE;
1360 tp->rx_mode &= ~RX_MODE_FLOW_CTRL_ENABLE;
1362 if (old_rx_mode != tp->rx_mode)
1363 tw32_f(MAC_RX_MODE, tp->rx_mode);
1365 if (flowctrl & FLOW_CTRL_TX)
1366 tp->tx_mode |= TX_MODE_FLOW_CTRL_ENABLE;
1368 tp->tx_mode &= ~TX_MODE_FLOW_CTRL_ENABLE;
1370 if (old_tx_mode != tp->tx_mode)
1371 tw32_f(MAC_TX_MODE, tp->tx_mode);
1374 static void tg3_adjust_link(struct net_device *dev)
1376 u8 oldflowctrl, linkmesg = 0;
1377 u32 mac_mode, lcl_adv, rmt_adv;
1378 struct tg3 *tp = netdev_priv(dev);
1379 struct phy_device *phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
1381 spin_lock_bh(&tp->lock);
1383 mac_mode = tp->mac_mode & ~(MAC_MODE_PORT_MODE_MASK |
1384 MAC_MODE_HALF_DUPLEX);
1386 oldflowctrl = tp->link_config.active_flowctrl;
1392 if (phydev->speed == SPEED_100 || phydev->speed == SPEED_10)
1393 mac_mode |= MAC_MODE_PORT_MODE_MII;
1394 else if (phydev->speed == SPEED_1000 ||
1395 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785)
1396 mac_mode |= MAC_MODE_PORT_MODE_GMII;
1398 mac_mode |= MAC_MODE_PORT_MODE_MII;
1400 if (phydev->duplex == DUPLEX_HALF)
1401 mac_mode |= MAC_MODE_HALF_DUPLEX;
1403 lcl_adv = tg3_advert_flowctrl_1000T(
1404 tp->link_config.flowctrl);
1407 rmt_adv = LPA_PAUSE_CAP;
1408 if (phydev->asym_pause)
1409 rmt_adv |= LPA_PAUSE_ASYM;
1412 tg3_setup_flow_control(tp, lcl_adv, rmt_adv);
1414 mac_mode |= MAC_MODE_PORT_MODE_GMII;
1416 if (mac_mode != tp->mac_mode) {
1417 tp->mac_mode = mac_mode;
1418 tw32_f(MAC_MODE, tp->mac_mode);
1422 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785) {
1423 if (phydev->speed == SPEED_10)
1425 MAC_MI_STAT_10MBPS_MODE |
1426 MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
1428 tw32(MAC_MI_STAT, MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
1431 if (phydev->speed == SPEED_1000 && phydev->duplex == DUPLEX_HALF)
1432 tw32(MAC_TX_LENGTHS,
1433 ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
1434 (6 << TX_LENGTHS_IPG_SHIFT) |
1435 (0xff << TX_LENGTHS_SLOT_TIME_SHIFT)));
1437 tw32(MAC_TX_LENGTHS,
1438 ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
1439 (6 << TX_LENGTHS_IPG_SHIFT) |
1440 (32 << TX_LENGTHS_SLOT_TIME_SHIFT)));
1442 if ((phydev->link && tp->link_config.active_speed == SPEED_INVALID) ||
1443 (!phydev->link && tp->link_config.active_speed != SPEED_INVALID) ||
1444 phydev->speed != tp->link_config.active_speed ||
1445 phydev->duplex != tp->link_config.active_duplex ||
1446 oldflowctrl != tp->link_config.active_flowctrl)
1449 tp->link_config.active_speed = phydev->speed;
1450 tp->link_config.active_duplex = phydev->duplex;
1452 spin_unlock_bh(&tp->lock);
1455 tg3_link_report(tp);
1458 static int tg3_phy_init(struct tg3 *tp)
1460 struct phy_device *phydev;
1462 if (tp->tg3_flags3 & TG3_FLG3_PHY_CONNECTED)
1465 /* Bring the PHY back to a known state. */
1468 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
1470 /* Attach the MAC to the PHY. */
1471 phydev = phy_connect(tp->dev, dev_name(&phydev->dev), tg3_adjust_link,
1472 phydev->dev_flags, phydev->interface);
1473 if (IS_ERR(phydev)) {
1474 printk(KERN_ERR "%s: Could not attach to PHY\n", tp->dev->name);
1475 return PTR_ERR(phydev);
1478 /* Mask with MAC supported features. */
1479 switch (phydev->interface) {
1480 case PHY_INTERFACE_MODE_GMII:
1481 case PHY_INTERFACE_MODE_RGMII:
1482 if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY)) {
1483 phydev->supported &= (PHY_GBIT_FEATURES |
1485 SUPPORTED_Asym_Pause);
1489 case PHY_INTERFACE_MODE_MII:
1490 phydev->supported &= (PHY_BASIC_FEATURES |
1492 SUPPORTED_Asym_Pause);
1495 phy_disconnect(tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]);
1499 tp->tg3_flags3 |= TG3_FLG3_PHY_CONNECTED;
1501 phydev->advertising = phydev->supported;
1506 static void tg3_phy_start(struct tg3 *tp)
1508 struct phy_device *phydev;
1510 if (!(tp->tg3_flags3 & TG3_FLG3_PHY_CONNECTED))
1513 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
1515 if (tp->link_config.phy_is_low_power) {
1516 tp->link_config.phy_is_low_power = 0;
1517 phydev->speed = tp->link_config.orig_speed;
1518 phydev->duplex = tp->link_config.orig_duplex;
1519 phydev->autoneg = tp->link_config.orig_autoneg;
1520 phydev->advertising = tp->link_config.orig_advertising;
1525 phy_start_aneg(phydev);
1528 static void tg3_phy_stop(struct tg3 *tp)
1530 if (!(tp->tg3_flags3 & TG3_FLG3_PHY_CONNECTED))
1533 phy_stop(tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]);
1536 static void tg3_phy_fini(struct tg3 *tp)
1538 if (tp->tg3_flags3 & TG3_FLG3_PHY_CONNECTED) {
1539 phy_disconnect(tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]);
1540 tp->tg3_flags3 &= ~TG3_FLG3_PHY_CONNECTED;
1544 static void tg3_phydsp_write(struct tg3 *tp, u32 reg, u32 val)
1546 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, reg);
1547 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, val);
1550 static void tg3_phy_fet_toggle_apd(struct tg3 *tp, bool enable)
1554 if (!tg3_readphy(tp, MII_TG3_FET_TEST, &phytest)) {
1557 tg3_writephy(tp, MII_TG3_FET_TEST,
1558 phytest | MII_TG3_FET_SHADOW_EN);
1559 if (!tg3_readphy(tp, MII_TG3_FET_SHDW_AUXSTAT2, &phy)) {
1561 phy |= MII_TG3_FET_SHDW_AUXSTAT2_APD;
1563 phy &= ~MII_TG3_FET_SHDW_AUXSTAT2_APD;
1564 tg3_writephy(tp, MII_TG3_FET_SHDW_AUXSTAT2, phy);
1566 tg3_writephy(tp, MII_TG3_FET_TEST, phytest);
1570 static void tg3_phy_toggle_apd(struct tg3 *tp, bool enable)
1574 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS) ||
1575 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 &&
1576 (tp->tg3_flags2 & TG3_FLG2_MII_SERDES)))
1579 if (tp->tg3_flags3 & TG3_FLG3_PHY_IS_FET) {
1580 tg3_phy_fet_toggle_apd(tp, enable);
1584 reg = MII_TG3_MISC_SHDW_WREN |
1585 MII_TG3_MISC_SHDW_SCR5_SEL |
1586 MII_TG3_MISC_SHDW_SCR5_LPED |
1587 MII_TG3_MISC_SHDW_SCR5_DLPTLM |
1588 MII_TG3_MISC_SHDW_SCR5_SDTL |
1589 MII_TG3_MISC_SHDW_SCR5_C125OE;
1590 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5784 || !enable)
1591 reg |= MII_TG3_MISC_SHDW_SCR5_DLLAPD;
1593 tg3_writephy(tp, MII_TG3_MISC_SHDW, reg);
1596 reg = MII_TG3_MISC_SHDW_WREN |
1597 MII_TG3_MISC_SHDW_APD_SEL |
1598 MII_TG3_MISC_SHDW_APD_WKTM_84MS;
1600 reg |= MII_TG3_MISC_SHDW_APD_ENABLE;
1602 tg3_writephy(tp, MII_TG3_MISC_SHDW, reg);
1605 static void tg3_phy_toggle_automdix(struct tg3 *tp, int enable)
1609 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS) ||
1610 (tp->tg3_flags2 & TG3_FLG2_ANY_SERDES))
1613 if (tp->tg3_flags3 & TG3_FLG3_PHY_IS_FET) {
1616 if (!tg3_readphy(tp, MII_TG3_FET_TEST, &ephy)) {
1617 u32 reg = MII_TG3_FET_SHDW_MISCCTRL;
1619 tg3_writephy(tp, MII_TG3_FET_TEST,
1620 ephy | MII_TG3_FET_SHADOW_EN);
1621 if (!tg3_readphy(tp, reg, &phy)) {
1623 phy |= MII_TG3_FET_SHDW_MISCCTRL_MDIX;
1625 phy &= ~MII_TG3_FET_SHDW_MISCCTRL_MDIX;
1626 tg3_writephy(tp, reg, phy);
1628 tg3_writephy(tp, MII_TG3_FET_TEST, ephy);
1631 phy = MII_TG3_AUXCTL_MISC_RDSEL_MISC |
1632 MII_TG3_AUXCTL_SHDWSEL_MISC;
1633 if (!tg3_writephy(tp, MII_TG3_AUX_CTRL, phy) &&
1634 !tg3_readphy(tp, MII_TG3_AUX_CTRL, &phy)) {
1636 phy |= MII_TG3_AUXCTL_MISC_FORCE_AMDIX;
1638 phy &= ~MII_TG3_AUXCTL_MISC_FORCE_AMDIX;
1639 phy |= MII_TG3_AUXCTL_MISC_WREN;
1640 tg3_writephy(tp, MII_TG3_AUX_CTRL, phy);
1645 static void tg3_phy_set_wirespeed(struct tg3 *tp)
1649 if (tp->tg3_flags2 & TG3_FLG2_NO_ETH_WIRE_SPEED)
1652 if (!tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x7007) &&
1653 !tg3_readphy(tp, MII_TG3_AUX_CTRL, &val))
1654 tg3_writephy(tp, MII_TG3_AUX_CTRL,
1655 (val | (1 << 15) | (1 << 4)));
1658 static void tg3_phy_apply_otp(struct tg3 *tp)
1667 /* Enable SM_DSP clock and tx 6dB coding. */
1668 phy = MII_TG3_AUXCTL_SHDWSEL_AUXCTL |
1669 MII_TG3_AUXCTL_ACTL_SMDSP_ENA |
1670 MII_TG3_AUXCTL_ACTL_TX_6DB;
1671 tg3_writephy(tp, MII_TG3_AUX_CTRL, phy);
1673 phy = ((otp & TG3_OTP_AGCTGT_MASK) >> TG3_OTP_AGCTGT_SHIFT);
1674 phy |= MII_TG3_DSP_TAP1_AGCTGT_DFLT;
1675 tg3_phydsp_write(tp, MII_TG3_DSP_TAP1, phy);
1677 phy = ((otp & TG3_OTP_HPFFLTR_MASK) >> TG3_OTP_HPFFLTR_SHIFT) |
1678 ((otp & TG3_OTP_HPFOVER_MASK) >> TG3_OTP_HPFOVER_SHIFT);
1679 tg3_phydsp_write(tp, MII_TG3_DSP_AADJ1CH0, phy);
1681 phy = ((otp & TG3_OTP_LPFDIS_MASK) >> TG3_OTP_LPFDIS_SHIFT);
1682 phy |= MII_TG3_DSP_AADJ1CH3_ADCCKADJ;
1683 tg3_phydsp_write(tp, MII_TG3_DSP_AADJ1CH3, phy);
1685 phy = ((otp & TG3_OTP_VDAC_MASK) >> TG3_OTP_VDAC_SHIFT);
1686 tg3_phydsp_write(tp, MII_TG3_DSP_EXP75, phy);
1688 phy = ((otp & TG3_OTP_10BTAMP_MASK) >> TG3_OTP_10BTAMP_SHIFT);
1689 tg3_phydsp_write(tp, MII_TG3_DSP_EXP96, phy);
1691 phy = ((otp & TG3_OTP_ROFF_MASK) >> TG3_OTP_ROFF_SHIFT) |
1692 ((otp & TG3_OTP_RCOFF_MASK) >> TG3_OTP_RCOFF_SHIFT);
1693 tg3_phydsp_write(tp, MII_TG3_DSP_EXP97, phy);
1695 /* Turn off SM_DSP clock. */
1696 phy = MII_TG3_AUXCTL_SHDWSEL_AUXCTL |
1697 MII_TG3_AUXCTL_ACTL_TX_6DB;
1698 tg3_writephy(tp, MII_TG3_AUX_CTRL, phy);
1701 static int tg3_wait_macro_done(struct tg3 *tp)
1708 if (!tg3_readphy(tp, 0x16, &tmp32)) {
1709 if ((tmp32 & 0x1000) == 0)
1719 static int tg3_phy_write_and_check_testpat(struct tg3 *tp, int *resetp)
1721 static const u32 test_pat[4][6] = {
1722 { 0x00005555, 0x00000005, 0x00002aaa, 0x0000000a, 0x00003456, 0x00000003 },
1723 { 0x00002aaa, 0x0000000a, 0x00003333, 0x00000003, 0x0000789a, 0x00000005 },
1724 { 0x00005a5a, 0x00000005, 0x00002a6a, 0x0000000a, 0x00001bcd, 0x00000003 },
1725 { 0x00002a5a, 0x0000000a, 0x000033c3, 0x00000003, 0x00002ef1, 0x00000005 }
1729 for (chan = 0; chan < 4; chan++) {
1732 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
1733 (chan * 0x2000) | 0x0200);
1734 tg3_writephy(tp, 0x16, 0x0002);
1736 for (i = 0; i < 6; i++)
1737 tg3_writephy(tp, MII_TG3_DSP_RW_PORT,
1740 tg3_writephy(tp, 0x16, 0x0202);
1741 if (tg3_wait_macro_done(tp)) {
1746 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
1747 (chan * 0x2000) | 0x0200);
1748 tg3_writephy(tp, 0x16, 0x0082);
1749 if (tg3_wait_macro_done(tp)) {
1754 tg3_writephy(tp, 0x16, 0x0802);
1755 if (tg3_wait_macro_done(tp)) {
1760 for (i = 0; i < 6; i += 2) {
1763 if (tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &low) ||
1764 tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &high) ||
1765 tg3_wait_macro_done(tp)) {
1771 if (low != test_pat[chan][i] ||
1772 high != test_pat[chan][i+1]) {
1773 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000b);
1774 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x4001);
1775 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x4005);
1785 static int tg3_phy_reset_chanpat(struct tg3 *tp)
1789 for (chan = 0; chan < 4; chan++) {
1792 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
1793 (chan * 0x2000) | 0x0200);
1794 tg3_writephy(tp, 0x16, 0x0002);
1795 for (i = 0; i < 6; i++)
1796 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x000);
1797 tg3_writephy(tp, 0x16, 0x0202);
1798 if (tg3_wait_macro_done(tp))
1805 static int tg3_phy_reset_5703_4_5(struct tg3 *tp)
1807 u32 reg32, phy9_orig;
1808 int retries, do_phy_reset, err;
1814 err = tg3_bmcr_reset(tp);
1820 /* Disable transmitter and interrupt. */
1821 if (tg3_readphy(tp, MII_TG3_EXT_CTRL, ®32))
1825 tg3_writephy(tp, MII_TG3_EXT_CTRL, reg32);
1827 /* Set full-duplex, 1000 mbps. */
1828 tg3_writephy(tp, MII_BMCR,
1829 BMCR_FULLDPLX | TG3_BMCR_SPEED1000);
1831 /* Set to master mode. */
1832 if (tg3_readphy(tp, MII_TG3_CTRL, &phy9_orig))
1835 tg3_writephy(tp, MII_TG3_CTRL,
1836 (MII_TG3_CTRL_AS_MASTER |
1837 MII_TG3_CTRL_ENABLE_AS_MASTER));
1839 /* Enable SM_DSP_CLOCK and 6dB. */
1840 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0c00);
1842 /* Block the PHY control access. */
1843 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8005);
1844 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0800);
1846 err = tg3_phy_write_and_check_testpat(tp, &do_phy_reset);
1849 } while (--retries);
1851 err = tg3_phy_reset_chanpat(tp);
1855 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8005);
1856 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0000);
1858 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8200);
1859 tg3_writephy(tp, 0x16, 0x0000);
1861 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
1862 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
1863 /* Set Extended packet length bit for jumbo frames */
1864 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x4400);
1867 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0400);
1870 tg3_writephy(tp, MII_TG3_CTRL, phy9_orig);
1872 if (!tg3_readphy(tp, MII_TG3_EXT_CTRL, ®32)) {
1874 tg3_writephy(tp, MII_TG3_EXT_CTRL, reg32);
1881 /* This will reset the tigon3 PHY if there is no valid
1882 * link unless the FORCE argument is non-zero.
1884 static int tg3_phy_reset(struct tg3 *tp)
1890 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
1893 val = tr32(GRC_MISC_CFG);
1894 tw32_f(GRC_MISC_CFG, val & ~GRC_MISC_CFG_EPHY_IDDQ);
1897 err = tg3_readphy(tp, MII_BMSR, &phy_status);
1898 err |= tg3_readphy(tp, MII_BMSR, &phy_status);
1902 if (netif_running(tp->dev) && netif_carrier_ok(tp->dev)) {
1903 netif_carrier_off(tp->dev);
1904 tg3_link_report(tp);
1907 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
1908 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
1909 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
1910 err = tg3_phy_reset_5703_4_5(tp);
1917 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 &&
1918 GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5784_AX) {
1919 cpmuctrl = tr32(TG3_CPMU_CTRL);
1920 if (cpmuctrl & CPMU_CTRL_GPHY_10MB_RXONLY)
1922 cpmuctrl & ~CPMU_CTRL_GPHY_10MB_RXONLY);
1925 err = tg3_bmcr_reset(tp);
1929 if (cpmuctrl & CPMU_CTRL_GPHY_10MB_RXONLY) {
1932 phy = MII_TG3_DSP_EXP8_AEDW | MII_TG3_DSP_EXP8_REJ2MHz;
1933 tg3_phydsp_write(tp, MII_TG3_DSP_EXP8, phy);
1935 tw32(TG3_CPMU_CTRL, cpmuctrl);
1938 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5784_AX ||
1939 GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5761_AX) {
1942 val = tr32(TG3_CPMU_LSPD_1000MB_CLK);
1943 if ((val & CPMU_LSPD_1000MB_MACCLK_MASK) ==
1944 CPMU_LSPD_1000MB_MACCLK_12_5) {
1945 val &= ~CPMU_LSPD_1000MB_MACCLK_MASK;
1947 tw32_f(TG3_CPMU_LSPD_1000MB_CLK, val);
1951 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 &&
1952 (tp->tg3_flags2 & TG3_FLG2_MII_SERDES))
1955 tg3_phy_apply_otp(tp);
1957 if (tp->tg3_flags3 & TG3_FLG3_PHY_ENABLE_APD)
1958 tg3_phy_toggle_apd(tp, true);
1960 tg3_phy_toggle_apd(tp, false);
1963 if (tp->tg3_flags2 & TG3_FLG2_PHY_ADC_BUG) {
1964 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0c00);
1965 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x201f);
1966 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x2aaa);
1967 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000a);
1968 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0323);
1969 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0400);
1971 if (tp->tg3_flags2 & TG3_FLG2_PHY_5704_A0_BUG) {
1972 tg3_writephy(tp, 0x1c, 0x8d68);
1973 tg3_writephy(tp, 0x1c, 0x8d68);
1975 if (tp->tg3_flags2 & TG3_FLG2_PHY_BER_BUG) {
1976 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0c00);
1977 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000a);
1978 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x310b);
1979 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x201f);
1980 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x9506);
1981 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x401f);
1982 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x14e2);
1983 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0400);
1985 else if (tp->tg3_flags2 & TG3_FLG2_PHY_JITTER_BUG) {
1986 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0c00);
1987 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000a);
1988 if (tp->tg3_flags2 & TG3_FLG2_PHY_ADJUST_TRIM) {
1989 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x110b);
1990 tg3_writephy(tp, MII_TG3_TEST1,
1991 MII_TG3_TEST1_TRIM_EN | 0x4);
1993 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x010b);
1994 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0400);
1996 /* Set Extended packet length bit (bit 14) on all chips that */
1997 /* support jumbo frames */
1998 if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) {
1999 /* Cannot do read-modify-write on 5401 */
2000 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x4c20);
2001 } else if (tp->tg3_flags & TG3_FLAG_JUMBO_CAPABLE) {
2004 /* Set bit 14 with read-modify-write to preserve other bits */
2005 if (!tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0007) &&
2006 !tg3_readphy(tp, MII_TG3_AUX_CTRL, &phy_reg))
2007 tg3_writephy(tp, MII_TG3_AUX_CTRL, phy_reg | 0x4000);
2010 /* Set phy register 0x10 bit 0 to high fifo elasticity to support
2011 * jumbo frames transmission.
2013 if (tp->tg3_flags & TG3_FLAG_JUMBO_CAPABLE) {
2016 if (!tg3_readphy(tp, MII_TG3_EXT_CTRL, &phy_reg))
2017 tg3_writephy(tp, MII_TG3_EXT_CTRL,
2018 phy_reg | MII_TG3_EXT_CTRL_FIFO_ELASTIC);
2021 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
2022 /* adjust output voltage */
2023 tg3_writephy(tp, MII_TG3_FET_PTEST, 0x12);
2026 tg3_phy_toggle_automdix(tp, 1);
2027 tg3_phy_set_wirespeed(tp);
2031 static void tg3_frob_aux_power(struct tg3 *tp)
2033 struct tg3 *tp_peer = tp;
2035 /* The GPIOs do something completely different on 57765. */
2036 if ((tp->tg3_flags2 & TG3_FLG2_IS_NIC) == 0 ||
2037 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765)
2040 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
2041 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714 ||
2042 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717) {
2043 struct net_device *dev_peer;
2045 dev_peer = pci_get_drvdata(tp->pdev_peer);
2046 /* remove_one() may have been run on the peer. */
2050 tp_peer = netdev_priv(dev_peer);
2053 if ((tp->tg3_flags & TG3_FLAG_WOL_ENABLE) != 0 ||
2054 (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) != 0 ||
2055 (tp_peer->tg3_flags & TG3_FLAG_WOL_ENABLE) != 0 ||
2056 (tp_peer->tg3_flags & TG3_FLAG_ENABLE_ASF) != 0) {
2057 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
2058 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
2059 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
2060 (GRC_LCLCTRL_GPIO_OE0 |
2061 GRC_LCLCTRL_GPIO_OE1 |
2062 GRC_LCLCTRL_GPIO_OE2 |
2063 GRC_LCLCTRL_GPIO_OUTPUT0 |
2064 GRC_LCLCTRL_GPIO_OUTPUT1),
2066 } else if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761 ||
2067 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761S) {
2068 /* The 5761 non-e device swaps GPIO 0 and GPIO 2. */
2069 u32 grc_local_ctrl = GRC_LCLCTRL_GPIO_OE0 |
2070 GRC_LCLCTRL_GPIO_OE1 |
2071 GRC_LCLCTRL_GPIO_OE2 |
2072 GRC_LCLCTRL_GPIO_OUTPUT0 |
2073 GRC_LCLCTRL_GPIO_OUTPUT1 |
2075 tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl, 100);
2077 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OUTPUT2;
2078 tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl, 100);
2080 grc_local_ctrl &= ~GRC_LCLCTRL_GPIO_OUTPUT0;
2081 tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl, 100);
2084 u32 grc_local_ctrl = 0;
2086 if (tp_peer != tp &&
2087 (tp_peer->tg3_flags & TG3_FLAG_INIT_COMPLETE) != 0)
2090 /* Workaround to prevent overdrawing Amps. */
2091 if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
2093 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE3;
2094 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
2095 grc_local_ctrl, 100);
2098 /* On 5753 and variants, GPIO2 cannot be used. */
2099 no_gpio2 = tp->nic_sram_data_cfg &
2100 NIC_SRAM_DATA_CFG_NO_GPIO2;
2102 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE0 |
2103 GRC_LCLCTRL_GPIO_OE1 |
2104 GRC_LCLCTRL_GPIO_OE2 |
2105 GRC_LCLCTRL_GPIO_OUTPUT1 |
2106 GRC_LCLCTRL_GPIO_OUTPUT2;
2108 grc_local_ctrl &= ~(GRC_LCLCTRL_GPIO_OE2 |
2109 GRC_LCLCTRL_GPIO_OUTPUT2);
2111 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
2112 grc_local_ctrl, 100);
2114 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OUTPUT0;
2116 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
2117 grc_local_ctrl, 100);
2120 grc_local_ctrl &= ~GRC_LCLCTRL_GPIO_OUTPUT2;
2121 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
2122 grc_local_ctrl, 100);
2126 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
2127 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701) {
2128 if (tp_peer != tp &&
2129 (tp_peer->tg3_flags & TG3_FLAG_INIT_COMPLETE) != 0)
2132 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
2133 (GRC_LCLCTRL_GPIO_OE1 |
2134 GRC_LCLCTRL_GPIO_OUTPUT1), 100);
2136 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
2137 GRC_LCLCTRL_GPIO_OE1, 100);
2139 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
2140 (GRC_LCLCTRL_GPIO_OE1 |
2141 GRC_LCLCTRL_GPIO_OUTPUT1), 100);
2146 static int tg3_5700_link_polarity(struct tg3 *tp, u32 speed)
2148 if (tp->led_ctrl == LED_CTRL_MODE_PHY_2)
2150 else if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5411) {
2151 if (speed != SPEED_10)
2153 } else if (speed == SPEED_10)
2159 static int tg3_setup_phy(struct tg3 *, int);
2161 #define RESET_KIND_SHUTDOWN 0
2162 #define RESET_KIND_INIT 1
2163 #define RESET_KIND_SUSPEND 2
2165 static void tg3_write_sig_post_reset(struct tg3 *, int);
2166 static int tg3_halt_cpu(struct tg3 *, u32);
2168 static void tg3_power_down_phy(struct tg3 *tp, bool do_low_power)
2172 if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) {
2173 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
2174 u32 sg_dig_ctrl = tr32(SG_DIG_CTRL);
2175 u32 serdes_cfg = tr32(MAC_SERDES_CFG);
2178 SG_DIG_USING_HW_AUTONEG | SG_DIG_SOFT_RESET;
2179 tw32(SG_DIG_CTRL, sg_dig_ctrl);
2180 tw32(MAC_SERDES_CFG, serdes_cfg | (1 << 15));
2185 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
2187 val = tr32(GRC_MISC_CFG);
2188 tw32_f(GRC_MISC_CFG, val | GRC_MISC_CFG_EPHY_IDDQ);
2191 } else if (tp->tg3_flags3 & TG3_FLG3_PHY_IS_FET) {
2193 if (!tg3_readphy(tp, MII_TG3_FET_TEST, &phytest)) {
2196 tg3_writephy(tp, MII_ADVERTISE, 0);
2197 tg3_writephy(tp, MII_BMCR,
2198 BMCR_ANENABLE | BMCR_ANRESTART);
2200 tg3_writephy(tp, MII_TG3_FET_TEST,
2201 phytest | MII_TG3_FET_SHADOW_EN);
2202 if (!tg3_readphy(tp, MII_TG3_FET_SHDW_AUXMODE4, &phy)) {
2203 phy |= MII_TG3_FET_SHDW_AUXMODE4_SBPD;
2205 MII_TG3_FET_SHDW_AUXMODE4,
2208 tg3_writephy(tp, MII_TG3_FET_TEST, phytest);
2211 } else if (do_low_power) {
2212 tg3_writephy(tp, MII_TG3_EXT_CTRL,
2213 MII_TG3_EXT_CTRL_FORCE_LED_OFF);
2215 tg3_writephy(tp, MII_TG3_AUX_CTRL,
2216 MII_TG3_AUXCTL_SHDWSEL_PWRCTL |
2217 MII_TG3_AUXCTL_PCTL_100TX_LPWR |
2218 MII_TG3_AUXCTL_PCTL_SPR_ISOLATE |
2219 MII_TG3_AUXCTL_PCTL_VREG_11V);
2222 /* The PHY should not be powered down on some chips because
2225 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
2226 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
2227 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780 &&
2228 (tp->tg3_flags2 & TG3_FLG2_MII_SERDES)))
2231 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5784_AX ||
2232 GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5761_AX) {
2233 val = tr32(TG3_CPMU_LSPD_1000MB_CLK);
2234 val &= ~CPMU_LSPD_1000MB_MACCLK_MASK;
2235 val |= CPMU_LSPD_1000MB_MACCLK_12_5;
2236 tw32_f(TG3_CPMU_LSPD_1000MB_CLK, val);
2239 tg3_writephy(tp, MII_BMCR, BMCR_PDOWN);
2242 /* tp->lock is held. */
2243 static int tg3_nvram_lock(struct tg3 *tp)
2245 if (tp->tg3_flags & TG3_FLAG_NVRAM) {
2248 if (tp->nvram_lock_cnt == 0) {
2249 tw32(NVRAM_SWARB, SWARB_REQ_SET1);
2250 for (i = 0; i < 8000; i++) {
2251 if (tr32(NVRAM_SWARB) & SWARB_GNT1)
2256 tw32(NVRAM_SWARB, SWARB_REQ_CLR1);
2260 tp->nvram_lock_cnt++;
2265 /* tp->lock is held. */
2266 static void tg3_nvram_unlock(struct tg3 *tp)
2268 if (tp->tg3_flags & TG3_FLAG_NVRAM) {
2269 if (tp->nvram_lock_cnt > 0)
2270 tp->nvram_lock_cnt--;
2271 if (tp->nvram_lock_cnt == 0)
2272 tw32_f(NVRAM_SWARB, SWARB_REQ_CLR1);
2276 /* tp->lock is held. */
2277 static void tg3_enable_nvram_access(struct tg3 *tp)
2279 if ((tp->tg3_flags2 & TG3_FLG2_5750_PLUS) &&
2280 !(tp->tg3_flags3 & TG3_FLG3_PROTECTED_NVRAM)) {
2281 u32 nvaccess = tr32(NVRAM_ACCESS);
2283 tw32(NVRAM_ACCESS, nvaccess | ACCESS_ENABLE);
2287 /* tp->lock is held. */
2288 static void tg3_disable_nvram_access(struct tg3 *tp)
2290 if ((tp->tg3_flags2 & TG3_FLG2_5750_PLUS) &&
2291 !(tp->tg3_flags3 & TG3_FLG3_PROTECTED_NVRAM)) {
2292 u32 nvaccess = tr32(NVRAM_ACCESS);
2294 tw32(NVRAM_ACCESS, nvaccess & ~ACCESS_ENABLE);
2298 static int tg3_nvram_read_using_eeprom(struct tg3 *tp,
2299 u32 offset, u32 *val)
2304 if (offset > EEPROM_ADDR_ADDR_MASK || (offset % 4) != 0)
2307 tmp = tr32(GRC_EEPROM_ADDR) & ~(EEPROM_ADDR_ADDR_MASK |
2308 EEPROM_ADDR_DEVID_MASK |
2310 tw32(GRC_EEPROM_ADDR,
2312 (0 << EEPROM_ADDR_DEVID_SHIFT) |
2313 ((offset << EEPROM_ADDR_ADDR_SHIFT) &
2314 EEPROM_ADDR_ADDR_MASK) |
2315 EEPROM_ADDR_READ | EEPROM_ADDR_START);
2317 for (i = 0; i < 1000; i++) {
2318 tmp = tr32(GRC_EEPROM_ADDR);
2320 if (tmp & EEPROM_ADDR_COMPLETE)
2324 if (!(tmp & EEPROM_ADDR_COMPLETE))
2327 tmp = tr32(GRC_EEPROM_DATA);
2330 * The data will always be opposite the native endian
2331 * format. Perform a blind byteswap to compensate.
2338 #define NVRAM_CMD_TIMEOUT 10000
2340 static int tg3_nvram_exec_cmd(struct tg3 *tp, u32 nvram_cmd)
2344 tw32(NVRAM_CMD, nvram_cmd);
2345 for (i = 0; i < NVRAM_CMD_TIMEOUT; i++) {
2347 if (tr32(NVRAM_CMD) & NVRAM_CMD_DONE) {
2353 if (i == NVRAM_CMD_TIMEOUT)
2359 static u32 tg3_nvram_phys_addr(struct tg3 *tp, u32 addr)
2361 if ((tp->tg3_flags & TG3_FLAG_NVRAM) &&
2362 (tp->tg3_flags & TG3_FLAG_NVRAM_BUFFERED) &&
2363 (tp->tg3_flags2 & TG3_FLG2_FLASH) &&
2364 !(tp->tg3_flags3 & TG3_FLG3_NO_NVRAM_ADDR_TRANS) &&
2365 (tp->nvram_jedecnum == JEDEC_ATMEL))
2367 addr = ((addr / tp->nvram_pagesize) <<
2368 ATMEL_AT45DB0X1B_PAGE_POS) +
2369 (addr % tp->nvram_pagesize);
2374 static u32 tg3_nvram_logical_addr(struct tg3 *tp, u32 addr)
2376 if ((tp->tg3_flags & TG3_FLAG_NVRAM) &&
2377 (tp->tg3_flags & TG3_FLAG_NVRAM_BUFFERED) &&
2378 (tp->tg3_flags2 & TG3_FLG2_FLASH) &&
2379 !(tp->tg3_flags3 & TG3_FLG3_NO_NVRAM_ADDR_TRANS) &&
2380 (tp->nvram_jedecnum == JEDEC_ATMEL))
2382 addr = ((addr >> ATMEL_AT45DB0X1B_PAGE_POS) *
2383 tp->nvram_pagesize) +
2384 (addr & ((1 << ATMEL_AT45DB0X1B_PAGE_POS) - 1));
2389 /* NOTE: Data read in from NVRAM is byteswapped according to
2390 * the byteswapping settings for all other register accesses.
2391 * tg3 devices are BE devices, so on a BE machine, the data
2392 * returned will be exactly as it is seen in NVRAM. On a LE
2393 * machine, the 32-bit value will be byteswapped.
2395 static int tg3_nvram_read(struct tg3 *tp, u32 offset, u32 *val)
2399 if (!(tp->tg3_flags & TG3_FLAG_NVRAM))
2400 return tg3_nvram_read_using_eeprom(tp, offset, val);
2402 offset = tg3_nvram_phys_addr(tp, offset);
2404 if (offset > NVRAM_ADDR_MSK)
2407 ret = tg3_nvram_lock(tp);
2411 tg3_enable_nvram_access(tp);
2413 tw32(NVRAM_ADDR, offset);
2414 ret = tg3_nvram_exec_cmd(tp, NVRAM_CMD_RD | NVRAM_CMD_GO |
2415 NVRAM_CMD_FIRST | NVRAM_CMD_LAST | NVRAM_CMD_DONE);
2418 *val = tr32(NVRAM_RDDATA);
2420 tg3_disable_nvram_access(tp);
2422 tg3_nvram_unlock(tp);
2427 /* Ensures NVRAM data is in bytestream format. */
2428 static int tg3_nvram_read_be32(struct tg3 *tp, u32 offset, __be32 *val)
2431 int res = tg3_nvram_read(tp, offset, &v);
2433 *val = cpu_to_be32(v);
2437 /* tp->lock is held. */
2438 static void __tg3_set_mac_addr(struct tg3 *tp, int skip_mac_1)
2440 u32 addr_high, addr_low;
2443 addr_high = ((tp->dev->dev_addr[0] << 8) |
2444 tp->dev->dev_addr[1]);
2445 addr_low = ((tp->dev->dev_addr[2] << 24) |
2446 (tp->dev->dev_addr[3] << 16) |
2447 (tp->dev->dev_addr[4] << 8) |
2448 (tp->dev->dev_addr[5] << 0));
2449 for (i = 0; i < 4; i++) {
2450 if (i == 1 && skip_mac_1)
2452 tw32(MAC_ADDR_0_HIGH + (i * 8), addr_high);
2453 tw32(MAC_ADDR_0_LOW + (i * 8), addr_low);
2456 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
2457 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
2458 for (i = 0; i < 12; i++) {
2459 tw32(MAC_EXTADDR_0_HIGH + (i * 8), addr_high);
2460 tw32(MAC_EXTADDR_0_LOW + (i * 8), addr_low);
2464 addr_high = (tp->dev->dev_addr[0] +
2465 tp->dev->dev_addr[1] +
2466 tp->dev->dev_addr[2] +
2467 tp->dev->dev_addr[3] +
2468 tp->dev->dev_addr[4] +
2469 tp->dev->dev_addr[5]) &
2470 TX_BACKOFF_SEED_MASK;
2471 tw32(MAC_TX_BACKOFF_SEED, addr_high);
2474 static int tg3_set_power_state(struct tg3 *tp, pci_power_t state)
2477 bool device_should_wake, do_low_power;
2479 /* Make sure register accesses (indirect or otherwise)
2480 * will function correctly.
2482 pci_write_config_dword(tp->pdev,
2483 TG3PCI_MISC_HOST_CTRL,
2484 tp->misc_host_ctrl);
2488 pci_enable_wake(tp->pdev, state, false);
2489 pci_set_power_state(tp->pdev, PCI_D0);
2491 /* Switch out of Vaux if it is a NIC */
2492 if (tp->tg3_flags2 & TG3_FLG2_IS_NIC)
2493 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl, 100);
2503 printk(KERN_ERR PFX "%s: Invalid power state (D%d) requested\n",
2504 tp->dev->name, state);
2508 /* Restore the CLKREQ setting. */
2509 if (tp->tg3_flags3 & TG3_FLG3_CLKREQ_BUG) {
2512 pci_read_config_word(tp->pdev,
2513 tp->pcie_cap + PCI_EXP_LNKCTL,
2515 lnkctl |= PCI_EXP_LNKCTL_CLKREQ_EN;
2516 pci_write_config_word(tp->pdev,
2517 tp->pcie_cap + PCI_EXP_LNKCTL,
2521 misc_host_ctrl = tr32(TG3PCI_MISC_HOST_CTRL);
2522 tw32(TG3PCI_MISC_HOST_CTRL,
2523 misc_host_ctrl | MISC_HOST_CTRL_MASK_PCI_INT);
2525 device_should_wake = pci_pme_capable(tp->pdev, state) &&
2526 device_may_wakeup(&tp->pdev->dev) &&
2527 (tp->tg3_flags & TG3_FLAG_WOL_ENABLE);
2529 if (tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB) {
2530 do_low_power = false;
2531 if ((tp->tg3_flags3 & TG3_FLG3_PHY_CONNECTED) &&
2532 !tp->link_config.phy_is_low_power) {
2533 struct phy_device *phydev;
2534 u32 phyid, advertising;
2536 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
2538 tp->link_config.phy_is_low_power = 1;
2540 tp->link_config.orig_speed = phydev->speed;
2541 tp->link_config.orig_duplex = phydev->duplex;
2542 tp->link_config.orig_autoneg = phydev->autoneg;
2543 tp->link_config.orig_advertising = phydev->advertising;
2545 advertising = ADVERTISED_TP |
2547 ADVERTISED_Autoneg |
2548 ADVERTISED_10baseT_Half;
2550 if ((tp->tg3_flags & TG3_FLAG_ENABLE_ASF) ||
2551 device_should_wake) {
2552 if (tp->tg3_flags & TG3_FLAG_WOL_SPEED_100MB)
2554 ADVERTISED_100baseT_Half |
2555 ADVERTISED_100baseT_Full |
2556 ADVERTISED_10baseT_Full;
2558 advertising |= ADVERTISED_10baseT_Full;
2561 phydev->advertising = advertising;
2563 phy_start_aneg(phydev);
2565 phyid = phydev->drv->phy_id & phydev->drv->phy_id_mask;
2566 if (phyid != PHY_ID_BCMAC131) {
2567 phyid &= PHY_BCM_OUI_MASK;
2568 if (phyid == PHY_BCM_OUI_1 ||
2569 phyid == PHY_BCM_OUI_2 ||
2570 phyid == PHY_BCM_OUI_3)
2571 do_low_power = true;
2575 do_low_power = true;
2577 if (tp->link_config.phy_is_low_power == 0) {
2578 tp->link_config.phy_is_low_power = 1;
2579 tp->link_config.orig_speed = tp->link_config.speed;
2580 tp->link_config.orig_duplex = tp->link_config.duplex;
2581 tp->link_config.orig_autoneg = tp->link_config.autoneg;
2584 if (!(tp->tg3_flags2 & TG3_FLG2_ANY_SERDES)) {
2585 tp->link_config.speed = SPEED_10;
2586 tp->link_config.duplex = DUPLEX_HALF;
2587 tp->link_config.autoneg = AUTONEG_ENABLE;
2588 tg3_setup_phy(tp, 0);
2592 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
2595 val = tr32(GRC_VCPU_EXT_CTRL);
2596 tw32(GRC_VCPU_EXT_CTRL, val | GRC_VCPU_EXT_CTRL_DISABLE_WOL);
2597 } else if (!(tp->tg3_flags & TG3_FLAG_ENABLE_ASF)) {
2601 for (i = 0; i < 200; i++) {
2602 tg3_read_mem(tp, NIC_SRAM_FW_ASF_STATUS_MBOX, &val);
2603 if (val == ~NIC_SRAM_FIRMWARE_MBOX_MAGIC1)
2608 if (tp->tg3_flags & TG3_FLAG_WOL_CAP)
2609 tg3_write_mem(tp, NIC_SRAM_WOL_MBOX, WOL_SIGNATURE |
2610 WOL_DRV_STATE_SHUTDOWN |
2614 if (device_should_wake) {
2617 if (!(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)) {
2619 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x5a);
2623 if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES)
2624 mac_mode = MAC_MODE_PORT_MODE_GMII;
2626 mac_mode = MAC_MODE_PORT_MODE_MII;
2628 mac_mode |= tp->mac_mode & MAC_MODE_LINK_POLARITY;
2629 if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
2631 u32 speed = (tp->tg3_flags &
2632 TG3_FLAG_WOL_SPEED_100MB) ?
2633 SPEED_100 : SPEED_10;
2634 if (tg3_5700_link_polarity(tp, speed))
2635 mac_mode |= MAC_MODE_LINK_POLARITY;
2637 mac_mode &= ~MAC_MODE_LINK_POLARITY;
2640 mac_mode = MAC_MODE_PORT_MODE_TBI;
2643 if (!(tp->tg3_flags2 & TG3_FLG2_5750_PLUS))
2644 tw32(MAC_LED_CTRL, tp->led_ctrl);
2646 mac_mode |= MAC_MODE_MAGIC_PKT_ENABLE;
2647 if (((tp->tg3_flags2 & TG3_FLG2_5705_PLUS) &&
2648 !(tp->tg3_flags2 & TG3_FLG2_5780_CLASS)) &&
2649 ((tp->tg3_flags & TG3_FLAG_ENABLE_ASF) ||
2650 (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE)))
2651 mac_mode |= MAC_MODE_KEEP_FRAME_IN_WOL;
2653 if (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE) {
2654 mac_mode |= tp->mac_mode &
2655 (MAC_MODE_APE_TX_EN | MAC_MODE_APE_RX_EN);
2656 if (mac_mode & MAC_MODE_APE_TX_EN)
2657 mac_mode |= MAC_MODE_TDE_ENABLE;
2660 tw32_f(MAC_MODE, mac_mode);
2663 tw32_f(MAC_RX_MODE, RX_MODE_ENABLE);
2667 if (!(tp->tg3_flags & TG3_FLAG_WOL_SPEED_100MB) &&
2668 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
2669 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)) {
2672 base_val = tp->pci_clock_ctrl;
2673 base_val |= (CLOCK_CTRL_RXCLK_DISABLE |
2674 CLOCK_CTRL_TXCLK_DISABLE);
2676 tw32_wait_f(TG3PCI_CLOCK_CTRL, base_val | CLOCK_CTRL_ALTCLK |
2677 CLOCK_CTRL_PWRDOWN_PLL133, 40);
2678 } else if ((tp->tg3_flags2 & TG3_FLG2_5780_CLASS) ||
2679 (tp->tg3_flags & TG3_FLAG_CPMU_PRESENT) ||
2680 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)) {
2682 } else if (!((tp->tg3_flags2 & TG3_FLG2_5750_PLUS) &&
2683 (tp->tg3_flags & TG3_FLAG_ENABLE_ASF))) {
2684 u32 newbits1, newbits2;
2686 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
2687 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
2688 newbits1 = (CLOCK_CTRL_RXCLK_DISABLE |
2689 CLOCK_CTRL_TXCLK_DISABLE |
2691 newbits2 = newbits1 | CLOCK_CTRL_44MHZ_CORE;
2692 } else if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
2693 newbits1 = CLOCK_CTRL_625_CORE;
2694 newbits2 = newbits1 | CLOCK_CTRL_ALTCLK;
2696 newbits1 = CLOCK_CTRL_ALTCLK;
2697 newbits2 = newbits1 | CLOCK_CTRL_44MHZ_CORE;
2700 tw32_wait_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl | newbits1,
2703 tw32_wait_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl | newbits2,
2706 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
2709 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
2710 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
2711 newbits3 = (CLOCK_CTRL_RXCLK_DISABLE |
2712 CLOCK_CTRL_TXCLK_DISABLE |
2713 CLOCK_CTRL_44MHZ_CORE);
2715 newbits3 = CLOCK_CTRL_44MHZ_CORE;
2718 tw32_wait_f(TG3PCI_CLOCK_CTRL,
2719 tp->pci_clock_ctrl | newbits3, 40);
2723 if (!(device_should_wake) &&
2724 !(tp->tg3_flags & TG3_FLAG_ENABLE_ASF))
2725 tg3_power_down_phy(tp, do_low_power);
2727 tg3_frob_aux_power(tp);
2729 /* Workaround for unstable PLL clock */
2730 if ((GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_AX) ||
2731 (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_BX)) {
2732 u32 val = tr32(0x7d00);
2734 val &= ~((1 << 16) | (1 << 4) | (1 << 2) | (1 << 1) | 1);
2736 if (!(tp->tg3_flags & TG3_FLAG_ENABLE_ASF)) {
2739 err = tg3_nvram_lock(tp);
2740 tg3_halt_cpu(tp, RX_CPU_BASE);
2742 tg3_nvram_unlock(tp);
2746 tg3_write_sig_post_reset(tp, RESET_KIND_SHUTDOWN);
2748 if (device_should_wake)
2749 pci_enable_wake(tp->pdev, state, true);
2751 /* Finally, set the new power state. */
2752 pci_set_power_state(tp->pdev, state);
2757 static void tg3_aux_stat_to_speed_duplex(struct tg3 *tp, u32 val, u16 *speed, u8 *duplex)
2759 switch (val & MII_TG3_AUX_STAT_SPDMASK) {
2760 case MII_TG3_AUX_STAT_10HALF:
2762 *duplex = DUPLEX_HALF;
2765 case MII_TG3_AUX_STAT_10FULL:
2767 *duplex = DUPLEX_FULL;
2770 case MII_TG3_AUX_STAT_100HALF:
2772 *duplex = DUPLEX_HALF;
2775 case MII_TG3_AUX_STAT_100FULL:
2777 *duplex = DUPLEX_FULL;
2780 case MII_TG3_AUX_STAT_1000HALF:
2781 *speed = SPEED_1000;
2782 *duplex = DUPLEX_HALF;
2785 case MII_TG3_AUX_STAT_1000FULL:
2786 *speed = SPEED_1000;
2787 *duplex = DUPLEX_FULL;
2791 if (tp->tg3_flags3 & TG3_FLG3_PHY_IS_FET) {
2792 *speed = (val & MII_TG3_AUX_STAT_100) ? SPEED_100 :
2794 *duplex = (val & MII_TG3_AUX_STAT_FULL) ? DUPLEX_FULL :
2798 *speed = SPEED_INVALID;
2799 *duplex = DUPLEX_INVALID;
2804 static void tg3_phy_copper_begin(struct tg3 *tp)
2809 if (tp->link_config.phy_is_low_power) {
2810 /* Entering low power mode. Disable gigabit and
2811 * 100baseT advertisements.
2813 tg3_writephy(tp, MII_TG3_CTRL, 0);
2815 new_adv = (ADVERTISE_10HALF | ADVERTISE_10FULL |
2816 ADVERTISE_CSMA | ADVERTISE_PAUSE_CAP);
2817 if (tp->tg3_flags & TG3_FLAG_WOL_SPEED_100MB)
2818 new_adv |= (ADVERTISE_100HALF | ADVERTISE_100FULL);
2820 tg3_writephy(tp, MII_ADVERTISE, new_adv);
2821 } else if (tp->link_config.speed == SPEED_INVALID) {
2822 if (tp->tg3_flags & TG3_FLAG_10_100_ONLY)
2823 tp->link_config.advertising &=
2824 ~(ADVERTISED_1000baseT_Half |
2825 ADVERTISED_1000baseT_Full);
2827 new_adv = ADVERTISE_CSMA;
2828 if (tp->link_config.advertising & ADVERTISED_10baseT_Half)
2829 new_adv |= ADVERTISE_10HALF;
2830 if (tp->link_config.advertising & ADVERTISED_10baseT_Full)
2831 new_adv |= ADVERTISE_10FULL;
2832 if (tp->link_config.advertising & ADVERTISED_100baseT_Half)
2833 new_adv |= ADVERTISE_100HALF;
2834 if (tp->link_config.advertising & ADVERTISED_100baseT_Full)
2835 new_adv |= ADVERTISE_100FULL;
2837 new_adv |= tg3_advert_flowctrl_1000T(tp->link_config.flowctrl);
2839 tg3_writephy(tp, MII_ADVERTISE, new_adv);
2841 if (tp->link_config.advertising &
2842 (ADVERTISED_1000baseT_Half | ADVERTISED_1000baseT_Full)) {
2844 if (tp->link_config.advertising & ADVERTISED_1000baseT_Half)
2845 new_adv |= MII_TG3_CTRL_ADV_1000_HALF;
2846 if (tp->link_config.advertising & ADVERTISED_1000baseT_Full)
2847 new_adv |= MII_TG3_CTRL_ADV_1000_FULL;
2848 if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY) &&
2849 (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
2850 tp->pci_chip_rev_id == CHIPREV_ID_5701_B0))
2851 new_adv |= (MII_TG3_CTRL_AS_MASTER |
2852 MII_TG3_CTRL_ENABLE_AS_MASTER);
2853 tg3_writephy(tp, MII_TG3_CTRL, new_adv);
2855 tg3_writephy(tp, MII_TG3_CTRL, 0);
2858 new_adv = tg3_advert_flowctrl_1000T(tp->link_config.flowctrl);
2859 new_adv |= ADVERTISE_CSMA;
2861 /* Asking for a specific link mode. */
2862 if (tp->link_config.speed == SPEED_1000) {
2863 tg3_writephy(tp, MII_ADVERTISE, new_adv);
2865 if (tp->link_config.duplex == DUPLEX_FULL)
2866 new_adv = MII_TG3_CTRL_ADV_1000_FULL;
2868 new_adv = MII_TG3_CTRL_ADV_1000_HALF;
2869 if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
2870 tp->pci_chip_rev_id == CHIPREV_ID_5701_B0)
2871 new_adv |= (MII_TG3_CTRL_AS_MASTER |
2872 MII_TG3_CTRL_ENABLE_AS_MASTER);
2874 if (tp->link_config.speed == SPEED_100) {
2875 if (tp->link_config.duplex == DUPLEX_FULL)
2876 new_adv |= ADVERTISE_100FULL;
2878 new_adv |= ADVERTISE_100HALF;
2880 if (tp->link_config.duplex == DUPLEX_FULL)
2881 new_adv |= ADVERTISE_10FULL;
2883 new_adv |= ADVERTISE_10HALF;
2885 tg3_writephy(tp, MII_ADVERTISE, new_adv);
2890 tg3_writephy(tp, MII_TG3_CTRL, new_adv);
2893 if (tp->link_config.autoneg == AUTONEG_DISABLE &&
2894 tp->link_config.speed != SPEED_INVALID) {
2895 u32 bmcr, orig_bmcr;
2897 tp->link_config.active_speed = tp->link_config.speed;
2898 tp->link_config.active_duplex = tp->link_config.duplex;
2901 switch (tp->link_config.speed) {
2907 bmcr |= BMCR_SPEED100;
2911 bmcr |= TG3_BMCR_SPEED1000;
2915 if (tp->link_config.duplex == DUPLEX_FULL)
2916 bmcr |= BMCR_FULLDPLX;
2918 if (!tg3_readphy(tp, MII_BMCR, &orig_bmcr) &&
2919 (bmcr != orig_bmcr)) {
2920 tg3_writephy(tp, MII_BMCR, BMCR_LOOPBACK);
2921 for (i = 0; i < 1500; i++) {
2925 if (tg3_readphy(tp, MII_BMSR, &tmp) ||
2926 tg3_readphy(tp, MII_BMSR, &tmp))
2928 if (!(tmp & BMSR_LSTATUS)) {
2933 tg3_writephy(tp, MII_BMCR, bmcr);
2937 tg3_writephy(tp, MII_BMCR,
2938 BMCR_ANENABLE | BMCR_ANRESTART);
2942 static int tg3_init_5401phy_dsp(struct tg3 *tp)
2946 /* Turn off tap power management. */
2947 /* Set Extended packet length bit */
2948 err = tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x4c20);
2950 err |= tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x0012);
2951 err |= tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x1804);
2953 err |= tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x0013);
2954 err |= tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x1204);
2956 err |= tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8006);
2957 err |= tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0132);
2959 err |= tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8006);
2960 err |= tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0232);
2962 err |= tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x201f);
2963 err |= tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0a20);
2970 static int tg3_copper_is_advertising_all(struct tg3 *tp, u32 mask)
2972 u32 adv_reg, all_mask = 0;
2974 if (mask & ADVERTISED_10baseT_Half)
2975 all_mask |= ADVERTISE_10HALF;
2976 if (mask & ADVERTISED_10baseT_Full)
2977 all_mask |= ADVERTISE_10FULL;
2978 if (mask & ADVERTISED_100baseT_Half)
2979 all_mask |= ADVERTISE_100HALF;
2980 if (mask & ADVERTISED_100baseT_Full)
2981 all_mask |= ADVERTISE_100FULL;
2983 if (tg3_readphy(tp, MII_ADVERTISE, &adv_reg))
2986 if ((adv_reg & all_mask) != all_mask)
2988 if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY)) {
2992 if (mask & ADVERTISED_1000baseT_Half)
2993 all_mask |= ADVERTISE_1000HALF;
2994 if (mask & ADVERTISED_1000baseT_Full)
2995 all_mask |= ADVERTISE_1000FULL;
2997 if (tg3_readphy(tp, MII_TG3_CTRL, &tg3_ctrl))
3000 if ((tg3_ctrl & all_mask) != all_mask)
3006 static int tg3_adv_1000T_flowctrl_ok(struct tg3 *tp, u32 *lcladv, u32 *rmtadv)
3010 if (tg3_readphy(tp, MII_ADVERTISE, lcladv))
3013 curadv = *lcladv & (ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM);
3014 reqadv = tg3_advert_flowctrl_1000T(tp->link_config.flowctrl);
3016 if (tp->link_config.active_duplex == DUPLEX_FULL) {
3017 if (curadv != reqadv)
3020 if (tp->tg3_flags & TG3_FLAG_PAUSE_AUTONEG)
3021 tg3_readphy(tp, MII_LPA, rmtadv);
3023 /* Reprogram the advertisement register, even if it
3024 * does not affect the current link. If the link
3025 * gets renegotiated in the future, we can save an
3026 * additional renegotiation cycle by advertising
3027 * it correctly in the first place.
3029 if (curadv != reqadv) {
3030 *lcladv &= ~(ADVERTISE_PAUSE_CAP |
3031 ADVERTISE_PAUSE_ASYM);
3032 tg3_writephy(tp, MII_ADVERTISE, *lcladv | reqadv);
3039 static int tg3_setup_copper_phy(struct tg3 *tp, int force_reset)
3041 int current_link_up;
3043 u32 lcl_adv, rmt_adv;
3051 (MAC_STATUS_SYNC_CHANGED |
3052 MAC_STATUS_CFG_CHANGED |
3053 MAC_STATUS_MI_COMPLETION |
3054 MAC_STATUS_LNKSTATE_CHANGED));
3057 if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
3059 (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
3063 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x02);
3065 /* Some third-party PHYs need to be reset on link going
3068 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
3069 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
3070 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) &&
3071 netif_carrier_ok(tp->dev)) {
3072 tg3_readphy(tp, MII_BMSR, &bmsr);
3073 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
3074 !(bmsr & BMSR_LSTATUS))
3080 if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) {
3081 tg3_readphy(tp, MII_BMSR, &bmsr);
3082 if (tg3_readphy(tp, MII_BMSR, &bmsr) ||
3083 !(tp->tg3_flags & TG3_FLAG_INIT_COMPLETE))
3086 if (!(bmsr & BMSR_LSTATUS)) {
3087 err = tg3_init_5401phy_dsp(tp);
3091 tg3_readphy(tp, MII_BMSR, &bmsr);
3092 for (i = 0; i < 1000; i++) {
3094 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
3095 (bmsr & BMSR_LSTATUS)) {
3101 if ((tp->phy_id & TG3_PHY_ID_REV_MASK) ==
3102 TG3_PHY_REV_BCM5401_B0 &&
3103 !(bmsr & BMSR_LSTATUS) &&
3104 tp->link_config.active_speed == SPEED_1000) {
3105 err = tg3_phy_reset(tp);
3107 err = tg3_init_5401phy_dsp(tp);
3112 } else if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
3113 tp->pci_chip_rev_id == CHIPREV_ID_5701_B0) {
3114 /* 5701 {A0,B0} CRC bug workaround */
3115 tg3_writephy(tp, 0x15, 0x0a75);
3116 tg3_writephy(tp, 0x1c, 0x8c68);
3117 tg3_writephy(tp, 0x1c, 0x8d68);
3118 tg3_writephy(tp, 0x1c, 0x8c68);
3121 /* Clear pending interrupts... */
3122 tg3_readphy(tp, MII_TG3_ISTAT, &dummy);
3123 tg3_readphy(tp, MII_TG3_ISTAT, &dummy);
3125 if (tp->tg3_flags & TG3_FLAG_USE_MI_INTERRUPT)
3126 tg3_writephy(tp, MII_TG3_IMASK, ~MII_TG3_INT_LINKCHG);
3127 else if (!(tp->tg3_flags3 & TG3_FLG3_PHY_IS_FET))
3128 tg3_writephy(tp, MII_TG3_IMASK, ~0);
3130 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
3131 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
3132 if (tp->led_ctrl == LED_CTRL_MODE_PHY_1)
3133 tg3_writephy(tp, MII_TG3_EXT_CTRL,
3134 MII_TG3_EXT_CTRL_LNK3_LED_MODE);
3136 tg3_writephy(tp, MII_TG3_EXT_CTRL, 0);
3139 current_link_up = 0;
3140 current_speed = SPEED_INVALID;
3141 current_duplex = DUPLEX_INVALID;
3143 if (tp->tg3_flags2 & TG3_FLG2_CAPACITIVE_COUPLING) {
3146 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x4007);
3147 tg3_readphy(tp, MII_TG3_AUX_CTRL, &val);
3148 if (!(val & (1 << 10))) {
3150 tg3_writephy(tp, MII_TG3_AUX_CTRL, val);
3156 for (i = 0; i < 100; i++) {
3157 tg3_readphy(tp, MII_BMSR, &bmsr);
3158 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
3159 (bmsr & BMSR_LSTATUS))
3164 if (bmsr & BMSR_LSTATUS) {
3167 tg3_readphy(tp, MII_TG3_AUX_STAT, &aux_stat);
3168 for (i = 0; i < 2000; i++) {
3170 if (!tg3_readphy(tp, MII_TG3_AUX_STAT, &aux_stat) &&
3175 tg3_aux_stat_to_speed_duplex(tp, aux_stat,
3180 for (i = 0; i < 200; i++) {
3181 tg3_readphy(tp, MII_BMCR, &bmcr);
3182 if (tg3_readphy(tp, MII_BMCR, &bmcr))
3184 if (bmcr && bmcr != 0x7fff)
3192 tp->link_config.active_speed = current_speed;
3193 tp->link_config.active_duplex = current_duplex;
3195 if (tp->link_config.autoneg == AUTONEG_ENABLE) {
3196 if ((bmcr & BMCR_ANENABLE) &&
3197 tg3_copper_is_advertising_all(tp,
3198 tp->link_config.advertising)) {
3199 if (tg3_adv_1000T_flowctrl_ok(tp, &lcl_adv,
3201 current_link_up = 1;
3204 if (!(bmcr & BMCR_ANENABLE) &&
3205 tp->link_config.speed == current_speed &&
3206 tp->link_config.duplex == current_duplex &&
3207 tp->link_config.flowctrl ==
3208 tp->link_config.active_flowctrl) {
3209 current_link_up = 1;
3213 if (current_link_up == 1 &&
3214 tp->link_config.active_duplex == DUPLEX_FULL)
3215 tg3_setup_flow_control(tp, lcl_adv, rmt_adv);
3219 if (current_link_up == 0 || tp->link_config.phy_is_low_power) {
3222 tg3_phy_copper_begin(tp);
3224 tg3_readphy(tp, MII_BMSR, &tmp);
3225 if (!tg3_readphy(tp, MII_BMSR, &tmp) &&
3226 (tmp & BMSR_LSTATUS))
3227 current_link_up = 1;
3230 tp->mac_mode &= ~MAC_MODE_PORT_MODE_MASK;
3231 if (current_link_up == 1) {
3232 if (tp->link_config.active_speed == SPEED_100 ||
3233 tp->link_config.active_speed == SPEED_10)
3234 tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
3236 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
3237 } else if (tp->tg3_flags3 & TG3_FLG3_PHY_IS_FET)
3238 tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
3240 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
3242 tp->mac_mode &= ~MAC_MODE_HALF_DUPLEX;
3243 if (tp->link_config.active_duplex == DUPLEX_HALF)
3244 tp->mac_mode |= MAC_MODE_HALF_DUPLEX;
3246 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700) {
3247 if (current_link_up == 1 &&
3248 tg3_5700_link_polarity(tp, tp->link_config.active_speed))
3249 tp->mac_mode |= MAC_MODE_LINK_POLARITY;
3251 tp->mac_mode &= ~MAC_MODE_LINK_POLARITY;
3254 /* ??? Without this setting Netgear GA302T PHY does not
3255 * ??? send/receive packets...
3257 if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5411 &&
3258 tp->pci_chip_rev_id == CHIPREV_ID_5700_ALTIMA) {
3259 tp->mi_mode |= MAC_MI_MODE_AUTO_POLL;
3260 tw32_f(MAC_MI_MODE, tp->mi_mode);
3264 tw32_f(MAC_MODE, tp->mac_mode);
3267 if (tp->tg3_flags & TG3_FLAG_USE_LINKCHG_REG) {
3268 /* Polled via timer. */
3269 tw32_f(MAC_EVENT, 0);
3271 tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
3275 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 &&
3276 current_link_up == 1 &&
3277 tp->link_config.active_speed == SPEED_1000 &&
3278 ((tp->tg3_flags & TG3_FLAG_PCIX_MODE) ||
3279 (tp->tg3_flags & TG3_FLAG_PCI_HIGH_SPEED))) {
3282 (MAC_STATUS_SYNC_CHANGED |
3283 MAC_STATUS_CFG_CHANGED));
3286 NIC_SRAM_FIRMWARE_MBOX,
3287 NIC_SRAM_FIRMWARE_MBOX_MAGIC2);
3290 /* Prevent send BD corruption. */
3291 if (tp->tg3_flags3 & TG3_FLG3_CLKREQ_BUG) {
3292 u16 oldlnkctl, newlnkctl;
3294 pci_read_config_word(tp->pdev,
3295 tp->pcie_cap + PCI_EXP_LNKCTL,
3297 if (tp->link_config.active_speed == SPEED_100 ||
3298 tp->link_config.active_speed == SPEED_10)
3299 newlnkctl = oldlnkctl & ~PCI_EXP_LNKCTL_CLKREQ_EN;
3301 newlnkctl = oldlnkctl | PCI_EXP_LNKCTL_CLKREQ_EN;
3302 if (newlnkctl != oldlnkctl)
3303 pci_write_config_word(tp->pdev,
3304 tp->pcie_cap + PCI_EXP_LNKCTL,
3308 if (current_link_up != netif_carrier_ok(tp->dev)) {
3309 if (current_link_up)
3310 netif_carrier_on(tp->dev);
3312 netif_carrier_off(tp->dev);
3313 tg3_link_report(tp);
3319 struct tg3_fiber_aneginfo {
3321 #define ANEG_STATE_UNKNOWN 0
3322 #define ANEG_STATE_AN_ENABLE 1
3323 #define ANEG_STATE_RESTART_INIT 2
3324 #define ANEG_STATE_RESTART 3
3325 #define ANEG_STATE_DISABLE_LINK_OK 4
3326 #define ANEG_STATE_ABILITY_DETECT_INIT 5
3327 #define ANEG_STATE_ABILITY_DETECT 6
3328 #define ANEG_STATE_ACK_DETECT_INIT 7
3329 #define ANEG_STATE_ACK_DETECT 8
3330 #define ANEG_STATE_COMPLETE_ACK_INIT 9
3331 #define ANEG_STATE_COMPLETE_ACK 10
3332 #define ANEG_STATE_IDLE_DETECT_INIT 11
3333 #define ANEG_STATE_IDLE_DETECT 12
3334 #define ANEG_STATE_LINK_OK 13
3335 #define ANEG_STATE_NEXT_PAGE_WAIT_INIT 14
3336 #define ANEG_STATE_NEXT_PAGE_WAIT 15
3339 #define MR_AN_ENABLE 0x00000001
3340 #define MR_RESTART_AN 0x00000002
3341 #define MR_AN_COMPLETE 0x00000004
3342 #define MR_PAGE_RX 0x00000008
3343 #define MR_NP_LOADED 0x00000010
3344 #define MR_TOGGLE_TX 0x00000020
3345 #define MR_LP_ADV_FULL_DUPLEX 0x00000040
3346 #define MR_LP_ADV_HALF_DUPLEX 0x00000080
3347 #define MR_LP_ADV_SYM_PAUSE 0x00000100
3348 #define MR_LP_ADV_ASYM_PAUSE 0x00000200
3349 #define MR_LP_ADV_REMOTE_FAULT1 0x00000400
3350 #define MR_LP_ADV_REMOTE_FAULT2 0x00000800
3351 #define MR_LP_ADV_NEXT_PAGE 0x00001000
3352 #define MR_TOGGLE_RX 0x00002000
3353 #define MR_NP_RX 0x00004000
3355 #define MR_LINK_OK 0x80000000
3357 unsigned long link_time, cur_time;
3359 u32 ability_match_cfg;
3360 int ability_match_count;
3362 char ability_match, idle_match, ack_match;
3364 u32 txconfig, rxconfig;
3365 #define ANEG_CFG_NP 0x00000080
3366 #define ANEG_CFG_ACK 0x00000040
3367 #define ANEG_CFG_RF2 0x00000020
3368 #define ANEG_CFG_RF1 0x00000010
3369 #define ANEG_CFG_PS2 0x00000001
3370 #define ANEG_CFG_PS1 0x00008000
3371 #define ANEG_CFG_HD 0x00004000
3372 #define ANEG_CFG_FD 0x00002000
3373 #define ANEG_CFG_INVAL 0x00001f06
3378 #define ANEG_TIMER_ENAB 2
3379 #define ANEG_FAILED -1
3381 #define ANEG_STATE_SETTLE_TIME 10000
3383 static int tg3_fiber_aneg_smachine(struct tg3 *tp,
3384 struct tg3_fiber_aneginfo *ap)
3387 unsigned long delta;
3391 if (ap->state == ANEG_STATE_UNKNOWN) {
3395 ap->ability_match_cfg = 0;
3396 ap->ability_match_count = 0;
3397 ap->ability_match = 0;
3403 if (tr32(MAC_STATUS) & MAC_STATUS_RCVD_CFG) {
3404 rx_cfg_reg = tr32(MAC_RX_AUTO_NEG);
3406 if (rx_cfg_reg != ap->ability_match_cfg) {
3407 ap->ability_match_cfg = rx_cfg_reg;
3408 ap->ability_match = 0;
3409 ap->ability_match_count = 0;
3411 if (++ap->ability_match_count > 1) {
3412 ap->ability_match = 1;
3413 ap->ability_match_cfg = rx_cfg_reg;
3416 if (rx_cfg_reg & ANEG_CFG_ACK)
3424 ap->ability_match_cfg = 0;
3425 ap->ability_match_count = 0;
3426 ap->ability_match = 0;
3432 ap->rxconfig = rx_cfg_reg;
3436 case ANEG_STATE_UNKNOWN:
3437 if (ap->flags & (MR_AN_ENABLE | MR_RESTART_AN))
3438 ap->state = ANEG_STATE_AN_ENABLE;
3441 case ANEG_STATE_AN_ENABLE:
3442 ap->flags &= ~(MR_AN_COMPLETE | MR_PAGE_RX);
3443 if (ap->flags & MR_AN_ENABLE) {
3446 ap->ability_match_cfg = 0;
3447 ap->ability_match_count = 0;
3448 ap->ability_match = 0;
3452 ap->state = ANEG_STATE_RESTART_INIT;
3454 ap->state = ANEG_STATE_DISABLE_LINK_OK;
3458 case ANEG_STATE_RESTART_INIT:
3459 ap->link_time = ap->cur_time;
3460 ap->flags &= ~(MR_NP_LOADED);
3462 tw32(MAC_TX_AUTO_NEG, 0);
3463 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
3464 tw32_f(MAC_MODE, tp->mac_mode);
3467 ret = ANEG_TIMER_ENAB;
3468 ap->state = ANEG_STATE_RESTART;
3471 case ANEG_STATE_RESTART:
3472 delta = ap->cur_time - ap->link_time;
3473 if (delta > ANEG_STATE_SETTLE_TIME) {
3474 ap->state = ANEG_STATE_ABILITY_DETECT_INIT;
3476 ret = ANEG_TIMER_ENAB;
3480 case ANEG_STATE_DISABLE_LINK_OK:
3484 case ANEG_STATE_ABILITY_DETECT_INIT:
3485 ap->flags &= ~(MR_TOGGLE_TX);
3486 ap->txconfig = ANEG_CFG_FD;
3487 flowctrl = tg3_advert_flowctrl_1000X(tp->link_config.flowctrl);
3488 if (flowctrl & ADVERTISE_1000XPAUSE)
3489 ap->txconfig |= ANEG_CFG_PS1;
3490 if (flowctrl & ADVERTISE_1000XPSE_ASYM)
3491 ap->txconfig |= ANEG_CFG_PS2;
3492 tw32(MAC_TX_AUTO_NEG, ap->txconfig);
3493 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
3494 tw32_f(MAC_MODE, tp->mac_mode);
3497 ap->state = ANEG_STATE_ABILITY_DETECT;
3500 case ANEG_STATE_ABILITY_DETECT:
3501 if (ap->ability_match != 0 && ap->rxconfig != 0) {
3502 ap->state = ANEG_STATE_ACK_DETECT_INIT;
3506 case ANEG_STATE_ACK_DETECT_INIT:
3507 ap->txconfig |= ANEG_CFG_ACK;
3508 tw32(MAC_TX_AUTO_NEG, ap->txconfig);
3509 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
3510 tw32_f(MAC_MODE, tp->mac_mode);
3513 ap->state = ANEG_STATE_ACK_DETECT;
3516 case ANEG_STATE_ACK_DETECT:
3517 if (ap->ack_match != 0) {
3518 if ((ap->rxconfig & ~ANEG_CFG_ACK) ==
3519 (ap->ability_match_cfg & ~ANEG_CFG_ACK)) {
3520 ap->state = ANEG_STATE_COMPLETE_ACK_INIT;
3522 ap->state = ANEG_STATE_AN_ENABLE;
3524 } else if (ap->ability_match != 0 &&
3525 ap->rxconfig == 0) {
3526 ap->state = ANEG_STATE_AN_ENABLE;
3530 case ANEG_STATE_COMPLETE_ACK_INIT:
3531 if (ap->rxconfig & ANEG_CFG_INVAL) {
3535 ap->flags &= ~(MR_LP_ADV_FULL_DUPLEX |
3536 MR_LP_ADV_HALF_DUPLEX |
3537 MR_LP_ADV_SYM_PAUSE |
3538 MR_LP_ADV_ASYM_PAUSE |
3539 MR_LP_ADV_REMOTE_FAULT1 |
3540 MR_LP_ADV_REMOTE_FAULT2 |
3541 MR_LP_ADV_NEXT_PAGE |
3544 if (ap->rxconfig & ANEG_CFG_FD)
3545 ap->flags |= MR_LP_ADV_FULL_DUPLEX;
3546 if (ap->rxconfig & ANEG_CFG_HD)
3547 ap->flags |= MR_LP_ADV_HALF_DUPLEX;
3548 if (ap->rxconfig & ANEG_CFG_PS1)
3549 ap->flags |= MR_LP_ADV_SYM_PAUSE;
3550 if (ap->rxconfig & ANEG_CFG_PS2)
3551 ap->flags |= MR_LP_ADV_ASYM_PAUSE;
3552 if (ap->rxconfig & ANEG_CFG_RF1)
3553 ap->flags |= MR_LP_ADV_REMOTE_FAULT1;
3554 if (ap->rxconfig & ANEG_CFG_RF2)
3555 ap->flags |= MR_LP_ADV_REMOTE_FAULT2;
3556 if (ap->rxconfig & ANEG_CFG_NP)
3557 ap->flags |= MR_LP_ADV_NEXT_PAGE;
3559 ap->link_time = ap->cur_time;
3561 ap->flags ^= (MR_TOGGLE_TX);
3562 if (ap->rxconfig & 0x0008)
3563 ap->flags |= MR_TOGGLE_RX;
3564 if (ap->rxconfig & ANEG_CFG_NP)
3565 ap->flags |= MR_NP_RX;
3566 ap->flags |= MR_PAGE_RX;
3568 ap->state = ANEG_STATE_COMPLETE_ACK;
3569 ret = ANEG_TIMER_ENAB;
3572 case ANEG_STATE_COMPLETE_ACK:
3573 if (ap->ability_match != 0 &&
3574 ap->rxconfig == 0) {
3575 ap->state = ANEG_STATE_AN_ENABLE;
3578 delta = ap->cur_time - ap->link_time;
3579 if (delta > ANEG_STATE_SETTLE_TIME) {
3580 if (!(ap->flags & (MR_LP_ADV_NEXT_PAGE))) {
3581 ap->state = ANEG_STATE_IDLE_DETECT_INIT;
3583 if ((ap->txconfig & ANEG_CFG_NP) == 0 &&
3584 !(ap->flags & MR_NP_RX)) {
3585 ap->state = ANEG_STATE_IDLE_DETECT_INIT;
3593 case ANEG_STATE_IDLE_DETECT_INIT:
3594 ap->link_time = ap->cur_time;
3595 tp->mac_mode &= ~MAC_MODE_SEND_CONFIGS;
3596 tw32_f(MAC_MODE, tp->mac_mode);
3599 ap->state = ANEG_STATE_IDLE_DETECT;
3600 ret = ANEG_TIMER_ENAB;
3603 case ANEG_STATE_IDLE_DETECT:
3604 if (ap->ability_match != 0 &&
3605 ap->rxconfig == 0) {
3606 ap->state = ANEG_STATE_AN_ENABLE;
3609 delta = ap->cur_time - ap->link_time;
3610 if (delta > ANEG_STATE_SETTLE_TIME) {
3611 /* XXX another gem from the Broadcom driver :( */
3612 ap->state = ANEG_STATE_LINK_OK;
3616 case ANEG_STATE_LINK_OK:
3617 ap->flags |= (MR_AN_COMPLETE | MR_LINK_OK);
3621 case ANEG_STATE_NEXT_PAGE_WAIT_INIT:
3622 /* ??? unimplemented */
3625 case ANEG_STATE_NEXT_PAGE_WAIT:
3626 /* ??? unimplemented */
3637 static int fiber_autoneg(struct tg3 *tp, u32 *txflags, u32 *rxflags)
3640 struct tg3_fiber_aneginfo aninfo;
3641 int status = ANEG_FAILED;
3645 tw32_f(MAC_TX_AUTO_NEG, 0);
3647 tmp = tp->mac_mode & ~MAC_MODE_PORT_MODE_MASK;
3648 tw32_f(MAC_MODE, tmp | MAC_MODE_PORT_MODE_GMII);
3651 tw32_f(MAC_MODE, tp->mac_mode | MAC_MODE_SEND_CONFIGS);
3654 memset(&aninfo, 0, sizeof(aninfo));
3655 aninfo.flags |= MR_AN_ENABLE;
3656 aninfo.state = ANEG_STATE_UNKNOWN;
3657 aninfo.cur_time = 0;
3659 while (++tick < 195000) {
3660 status = tg3_fiber_aneg_smachine(tp, &aninfo);
3661 if (status == ANEG_DONE || status == ANEG_FAILED)
3667 tp->mac_mode &= ~MAC_MODE_SEND_CONFIGS;
3668 tw32_f(MAC_MODE, tp->mac_mode);
3671 *txflags = aninfo.txconfig;
3672 *rxflags = aninfo.flags;
3674 if (status == ANEG_DONE &&
3675 (aninfo.flags & (MR_AN_COMPLETE | MR_LINK_OK |
3676 MR_LP_ADV_FULL_DUPLEX)))
3682 static void tg3_init_bcm8002(struct tg3 *tp)
3684 u32 mac_status = tr32(MAC_STATUS);
3687 /* Reset when initting first time or we have a link. */
3688 if ((tp->tg3_flags & TG3_FLAG_INIT_COMPLETE) &&
3689 !(mac_status & MAC_STATUS_PCS_SYNCED))
3692 /* Set PLL lock range. */
3693 tg3_writephy(tp, 0x16, 0x8007);
3696 tg3_writephy(tp, MII_BMCR, BMCR_RESET);
3698 /* Wait for reset to complete. */
3699 /* XXX schedule_timeout() ... */
3700 for (i = 0; i < 500; i++)
3703 /* Config mode; select PMA/Ch 1 regs. */
3704 tg3_writephy(tp, 0x10, 0x8411);
3706 /* Enable auto-lock and comdet, select txclk for tx. */
3707 tg3_writephy(tp, 0x11, 0x0a10);
3709 tg3_writephy(tp, 0x18, 0x00a0);
3710 tg3_writephy(tp, 0x16, 0x41ff);
3712 /* Assert and deassert POR. */
3713 tg3_writephy(tp, 0x13, 0x0400);
3715 tg3_writephy(tp, 0x13, 0x0000);
3717 tg3_writephy(tp, 0x11, 0x0a50);
3719 tg3_writephy(tp, 0x11, 0x0a10);
3721 /* Wait for signal to stabilize */
3722 /* XXX schedule_timeout() ... */
3723 for (i = 0; i < 15000; i++)
3726 /* Deselect the channel register so we can read the PHYID
3729 tg3_writephy(tp, 0x10, 0x8011);
3732 static int tg3_setup_fiber_hw_autoneg(struct tg3 *tp, u32 mac_status)
3735 u32 sg_dig_ctrl, sg_dig_status;
3736 u32 serdes_cfg, expected_sg_dig_ctrl;
3737 int workaround, port_a;
3738 int current_link_up;
3741 expected_sg_dig_ctrl = 0;
3744 current_link_up = 0;
3746 if (tp->pci_chip_rev_id != CHIPREV_ID_5704_A0 &&
3747 tp->pci_chip_rev_id != CHIPREV_ID_5704_A1) {
3749 if (tr32(TG3PCI_DUAL_MAC_CTRL) & DUAL_MAC_CTRL_ID)
3752 /* preserve bits 0-11,13,14 for signal pre-emphasis */
3753 /* preserve bits 20-23 for voltage regulator */
3754 serdes_cfg = tr32(MAC_SERDES_CFG) & 0x00f06fff;
3757 sg_dig_ctrl = tr32(SG_DIG_CTRL);
3759 if (tp->link_config.autoneg != AUTONEG_ENABLE) {
3760 if (sg_dig_ctrl & SG_DIG_USING_HW_AUTONEG) {
3762 u32 val = serdes_cfg;
3768 tw32_f(MAC_SERDES_CFG, val);
3771 tw32_f(SG_DIG_CTRL, SG_DIG_COMMON_SETUP);
3773 if (mac_status & MAC_STATUS_PCS_SYNCED) {
3774 tg3_setup_flow_control(tp, 0, 0);
3775 current_link_up = 1;
3780 /* Want auto-negotiation. */
3781 expected_sg_dig_ctrl = SG_DIG_USING_HW_AUTONEG | SG_DIG_COMMON_SETUP;
3783 flowctrl = tg3_advert_flowctrl_1000X(tp->link_config.flowctrl);
3784 if (flowctrl & ADVERTISE_1000XPAUSE)
3785 expected_sg_dig_ctrl |= SG_DIG_PAUSE_CAP;
3786 if (flowctrl & ADVERTISE_1000XPSE_ASYM)
3787 expected_sg_dig_ctrl |= SG_DIG_ASYM_PAUSE;
3789 if (sg_dig_ctrl != expected_sg_dig_ctrl) {
3790 if ((tp->tg3_flags2 & TG3_FLG2_PARALLEL_DETECT) &&
3791 tp->serdes_counter &&
3792 ((mac_status & (MAC_STATUS_PCS_SYNCED |
3793 MAC_STATUS_RCVD_CFG)) ==
3794 MAC_STATUS_PCS_SYNCED)) {
3795 tp->serdes_counter--;
3796 current_link_up = 1;
3801 tw32_f(MAC_SERDES_CFG, serdes_cfg | 0xc011000);
3802 tw32_f(SG_DIG_CTRL, expected_sg_dig_ctrl | SG_DIG_SOFT_RESET);
3804 tw32_f(SG_DIG_CTRL, expected_sg_dig_ctrl);
3806 tp->serdes_counter = SERDES_AN_TIMEOUT_5704S;
3807 tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
3808 } else if (mac_status & (MAC_STATUS_PCS_SYNCED |
3809 MAC_STATUS_SIGNAL_DET)) {
3810 sg_dig_status = tr32(SG_DIG_STATUS);
3811 mac_status = tr32(MAC_STATUS);
3813 if ((sg_dig_status & SG_DIG_AUTONEG_COMPLETE) &&
3814 (mac_status & MAC_STATUS_PCS_SYNCED)) {
3815 u32 local_adv = 0, remote_adv = 0;
3817 if (sg_dig_ctrl & SG_DIG_PAUSE_CAP)
3818 local_adv |= ADVERTISE_1000XPAUSE;
3819 if (sg_dig_ctrl & SG_DIG_ASYM_PAUSE)
3820 local_adv |= ADVERTISE_1000XPSE_ASYM;
3822 if (sg_dig_status & SG_DIG_PARTNER_PAUSE_CAPABLE)
3823 remote_adv |= LPA_1000XPAUSE;
3824 if (sg_dig_status & SG_DIG_PARTNER_ASYM_PAUSE)
3825 remote_adv |= LPA_1000XPAUSE_ASYM;
3827 tg3_setup_flow_control(tp, local_adv, remote_adv);
3828 current_link_up = 1;
3829 tp->serdes_counter = 0;
3830 tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
3831 } else if (!(sg_dig_status & SG_DIG_AUTONEG_COMPLETE)) {
3832 if (tp->serdes_counter)
3833 tp->serdes_counter--;
3836 u32 val = serdes_cfg;
3843 tw32_f(MAC_SERDES_CFG, val);
3846 tw32_f(SG_DIG_CTRL, SG_DIG_COMMON_SETUP);
3849 /* Link parallel detection - link is up */
3850 /* only if we have PCS_SYNC and not */
3851 /* receiving config code words */
3852 mac_status = tr32(MAC_STATUS);
3853 if ((mac_status & MAC_STATUS_PCS_SYNCED) &&
3854 !(mac_status & MAC_STATUS_RCVD_CFG)) {
3855 tg3_setup_flow_control(tp, 0, 0);
3856 current_link_up = 1;
3858 TG3_FLG2_PARALLEL_DETECT;
3859 tp->serdes_counter =
3860 SERDES_PARALLEL_DET_TIMEOUT;
3862 goto restart_autoneg;
3866 tp->serdes_counter = SERDES_AN_TIMEOUT_5704S;
3867 tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
3871 return current_link_up;
3874 static int tg3_setup_fiber_by_hand(struct tg3 *tp, u32 mac_status)
3876 int current_link_up = 0;
3878 if (!(mac_status & MAC_STATUS_PCS_SYNCED))
3881 if (tp->link_config.autoneg == AUTONEG_ENABLE) {
3882 u32 txflags, rxflags;
3885 if (fiber_autoneg(tp, &txflags, &rxflags)) {
3886 u32 local_adv = 0, remote_adv = 0;
3888 if (txflags & ANEG_CFG_PS1)
3889 local_adv |= ADVERTISE_1000XPAUSE;
3890 if (txflags & ANEG_CFG_PS2)
3891 local_adv |= ADVERTISE_1000XPSE_ASYM;
3893 if (rxflags & MR_LP_ADV_SYM_PAUSE)
3894 remote_adv |= LPA_1000XPAUSE;
3895 if (rxflags & MR_LP_ADV_ASYM_PAUSE)
3896 remote_adv |= LPA_1000XPAUSE_ASYM;
3898 tg3_setup_flow_control(tp, local_adv, remote_adv);
3900 current_link_up = 1;
3902 for (i = 0; i < 30; i++) {
3905 (MAC_STATUS_SYNC_CHANGED |
3906 MAC_STATUS_CFG_CHANGED));
3908 if ((tr32(MAC_STATUS) &
3909 (MAC_STATUS_SYNC_CHANGED |
3910 MAC_STATUS_CFG_CHANGED)) == 0)
3914 mac_status = tr32(MAC_STATUS);
3915 if (current_link_up == 0 &&
3916 (mac_status & MAC_STATUS_PCS_SYNCED) &&
3917 !(mac_status & MAC_STATUS_RCVD_CFG))
3918 current_link_up = 1;
3920 tg3_setup_flow_control(tp, 0, 0);
3922 /* Forcing 1000FD link up. */
3923 current_link_up = 1;
3925 tw32_f(MAC_MODE, (tp->mac_mode | MAC_MODE_SEND_CONFIGS));
3928 tw32_f(MAC_MODE, tp->mac_mode);
3933 return current_link_up;
3936 static int tg3_setup_fiber_phy(struct tg3 *tp, int force_reset)
3939 u16 orig_active_speed;
3940 u8 orig_active_duplex;
3942 int current_link_up;
3945 orig_pause_cfg = tp->link_config.active_flowctrl;
3946 orig_active_speed = tp->link_config.active_speed;
3947 orig_active_duplex = tp->link_config.active_duplex;
3949 if (!(tp->tg3_flags2 & TG3_FLG2_HW_AUTONEG) &&
3950 netif_carrier_ok(tp->dev) &&
3951 (tp->tg3_flags & TG3_FLAG_INIT_COMPLETE)) {
3952 mac_status = tr32(MAC_STATUS);
3953 mac_status &= (MAC_STATUS_PCS_SYNCED |
3954 MAC_STATUS_SIGNAL_DET |
3955 MAC_STATUS_CFG_CHANGED |
3956 MAC_STATUS_RCVD_CFG);
3957 if (mac_status == (MAC_STATUS_PCS_SYNCED |
3958 MAC_STATUS_SIGNAL_DET)) {
3959 tw32_f(MAC_STATUS, (MAC_STATUS_SYNC_CHANGED |
3960 MAC_STATUS_CFG_CHANGED));
3965 tw32_f(MAC_TX_AUTO_NEG, 0);
3967 tp->mac_mode &= ~(MAC_MODE_PORT_MODE_MASK | MAC_MODE_HALF_DUPLEX);
3968 tp->mac_mode |= MAC_MODE_PORT_MODE_TBI;
3969 tw32_f(MAC_MODE, tp->mac_mode);
3972 if (tp->phy_id == TG3_PHY_ID_BCM8002)
3973 tg3_init_bcm8002(tp);
3975 /* Enable link change event even when serdes polling. */
3976 tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
3979 current_link_up = 0;
3980 mac_status = tr32(MAC_STATUS);
3982 if (tp->tg3_flags2 & TG3_FLG2_HW_AUTONEG)
3983 current_link_up = tg3_setup_fiber_hw_autoneg(tp, mac_status);
3985 current_link_up = tg3_setup_fiber_by_hand(tp, mac_status);
3987 tp->napi[0].hw_status->status =
3988 (SD_STATUS_UPDATED |
3989 (tp->napi[0].hw_status->status & ~SD_STATUS_LINK_CHG));
3991 for (i = 0; i < 100; i++) {
3992 tw32_f(MAC_STATUS, (MAC_STATUS_SYNC_CHANGED |
3993 MAC_STATUS_CFG_CHANGED));
3995 if ((tr32(MAC_STATUS) & (MAC_STATUS_SYNC_CHANGED |
3996 MAC_STATUS_CFG_CHANGED |
3997 MAC_STATUS_LNKSTATE_CHANGED)) == 0)
4001 mac_status = tr32(MAC_STATUS);
4002 if ((mac_status & MAC_STATUS_PCS_SYNCED) == 0) {
4003 current_link_up = 0;
4004 if (tp->link_config.autoneg == AUTONEG_ENABLE &&
4005 tp->serdes_counter == 0) {
4006 tw32_f(MAC_MODE, (tp->mac_mode |
4007 MAC_MODE_SEND_CONFIGS));
4009 tw32_f(MAC_MODE, tp->mac_mode);
4013 if (current_link_up == 1) {
4014 tp->link_config.active_speed = SPEED_1000;
4015 tp->link_config.active_duplex = DUPLEX_FULL;
4016 tw32(MAC_LED_CTRL, (tp->led_ctrl |
4017 LED_CTRL_LNKLED_OVERRIDE |
4018 LED_CTRL_1000MBPS_ON));
4020 tp->link_config.active_speed = SPEED_INVALID;
4021 tp->link_config.active_duplex = DUPLEX_INVALID;
4022 tw32(MAC_LED_CTRL, (tp->led_ctrl |
4023 LED_CTRL_LNKLED_OVERRIDE |
4024 LED_CTRL_TRAFFIC_OVERRIDE));
4027 if (current_link_up != netif_carrier_ok(tp->dev)) {
4028 if (current_link_up)
4029 netif_carrier_on(tp->dev);
4031 netif_carrier_off(tp->dev);
4032 tg3_link_report(tp);
4034 u32 now_pause_cfg = tp->link_config.active_flowctrl;
4035 if (orig_pause_cfg != now_pause_cfg ||
4036 orig_active_speed != tp->link_config.active_speed ||
4037 orig_active_duplex != tp->link_config.active_duplex)
4038 tg3_link_report(tp);
4044 static int tg3_setup_fiber_mii_phy(struct tg3 *tp, int force_reset)
4046 int current_link_up, err = 0;
4050 u32 local_adv, remote_adv;
4052 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
4053 tw32_f(MAC_MODE, tp->mac_mode);
4059 (MAC_STATUS_SYNC_CHANGED |
4060 MAC_STATUS_CFG_CHANGED |
4061 MAC_STATUS_MI_COMPLETION |
4062 MAC_STATUS_LNKSTATE_CHANGED));
4068 current_link_up = 0;
4069 current_speed = SPEED_INVALID;
4070 current_duplex = DUPLEX_INVALID;
4072 err |= tg3_readphy(tp, MII_BMSR, &bmsr);
4073 err |= tg3_readphy(tp, MII_BMSR, &bmsr);
4074 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) {
4075 if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
4076 bmsr |= BMSR_LSTATUS;
4078 bmsr &= ~BMSR_LSTATUS;
4081 err |= tg3_readphy(tp, MII_BMCR, &bmcr);
4083 if ((tp->link_config.autoneg == AUTONEG_ENABLE) && !force_reset &&
4084 (tp->tg3_flags2 & TG3_FLG2_PARALLEL_DETECT)) {
4085 /* do nothing, just check for link up at the end */
4086 } else if (tp->link_config.autoneg == AUTONEG_ENABLE) {
4089 err |= tg3_readphy(tp, MII_ADVERTISE, &adv);
4090 new_adv = adv & ~(ADVERTISE_1000XFULL | ADVERTISE_1000XHALF |
4091 ADVERTISE_1000XPAUSE |
4092 ADVERTISE_1000XPSE_ASYM |
4095 new_adv |= tg3_advert_flowctrl_1000X(tp->link_config.flowctrl);
4097 if (tp->link_config.advertising & ADVERTISED_1000baseT_Half)
4098 new_adv |= ADVERTISE_1000XHALF;
4099 if (tp->link_config.advertising & ADVERTISED_1000baseT_Full)
4100 new_adv |= ADVERTISE_1000XFULL;
4102 if ((new_adv != adv) || !(bmcr & BMCR_ANENABLE)) {
4103 tg3_writephy(tp, MII_ADVERTISE, new_adv);
4104 bmcr |= BMCR_ANENABLE | BMCR_ANRESTART;
4105 tg3_writephy(tp, MII_BMCR, bmcr);
4107 tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
4108 tp->serdes_counter = SERDES_AN_TIMEOUT_5714S;
4109 tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
4116 bmcr &= ~BMCR_SPEED1000;
4117 new_bmcr = bmcr & ~(BMCR_ANENABLE | BMCR_FULLDPLX);
4119 if (tp->link_config.duplex == DUPLEX_FULL)
4120 new_bmcr |= BMCR_FULLDPLX;
4122 if (new_bmcr != bmcr) {
4123 /* BMCR_SPEED1000 is a reserved bit that needs
4124 * to be set on write.
4126 new_bmcr |= BMCR_SPEED1000;
4128 /* Force a linkdown */
4129 if (netif_carrier_ok(tp->dev)) {
4132 err |= tg3_readphy(tp, MII_ADVERTISE, &adv);
4133 adv &= ~(ADVERTISE_1000XFULL |
4134 ADVERTISE_1000XHALF |
4136 tg3_writephy(tp, MII_ADVERTISE, adv);
4137 tg3_writephy(tp, MII_BMCR, bmcr |
4141 netif_carrier_off(tp->dev);
4143 tg3_writephy(tp, MII_BMCR, new_bmcr);
4145 err |= tg3_readphy(tp, MII_BMSR, &bmsr);
4146 err |= tg3_readphy(tp, MII_BMSR, &bmsr);
4147 if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
4149 if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
4150 bmsr |= BMSR_LSTATUS;
4152 bmsr &= ~BMSR_LSTATUS;
4154 tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
4158 if (bmsr & BMSR_LSTATUS) {
4159 current_speed = SPEED_1000;
4160 current_link_up = 1;
4161 if (bmcr & BMCR_FULLDPLX)
4162 current_duplex = DUPLEX_FULL;
4164 current_duplex = DUPLEX_HALF;
4169 if (bmcr & BMCR_ANENABLE) {
4172 err |= tg3_readphy(tp, MII_ADVERTISE, &local_adv);
4173 err |= tg3_readphy(tp, MII_LPA, &remote_adv);
4174 common = local_adv & remote_adv;
4175 if (common & (ADVERTISE_1000XHALF |
4176 ADVERTISE_1000XFULL)) {
4177 if (common & ADVERTISE_1000XFULL)
4178 current_duplex = DUPLEX_FULL;
4180 current_duplex = DUPLEX_HALF;
4183 current_link_up = 0;
4187 if (current_link_up == 1 && current_duplex == DUPLEX_FULL)
4188 tg3_setup_flow_control(tp, local_adv, remote_adv);
4190 tp->mac_mode &= ~MAC_MODE_HALF_DUPLEX;
4191 if (tp->link_config.active_duplex == DUPLEX_HALF)
4192 tp->mac_mode |= MAC_MODE_HALF_DUPLEX;
4194 tw32_f(MAC_MODE, tp->mac_mode);
4197 tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
4199 tp->link_config.active_speed = current_speed;
4200 tp->link_config.active_duplex = current_duplex;
4202 if (current_link_up != netif_carrier_ok(tp->dev)) {
4203 if (current_link_up)
4204 netif_carrier_on(tp->dev);
4206 netif_carrier_off(tp->dev);
4207 tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
4209 tg3_link_report(tp);
4214 static void tg3_serdes_parallel_detect(struct tg3 *tp)
4216 if (tp->serdes_counter) {
4217 /* Give autoneg time to complete. */
4218 tp->serdes_counter--;
4221 if (!netif_carrier_ok(tp->dev) &&
4222 (tp->link_config.autoneg == AUTONEG_ENABLE)) {
4225 tg3_readphy(tp, MII_BMCR, &bmcr);
4226 if (bmcr & BMCR_ANENABLE) {
4229 /* Select shadow register 0x1f */
4230 tg3_writephy(tp, 0x1c, 0x7c00);
4231 tg3_readphy(tp, 0x1c, &phy1);
4233 /* Select expansion interrupt status register */
4234 tg3_writephy(tp, 0x17, 0x0f01);
4235 tg3_readphy(tp, 0x15, &phy2);
4236 tg3_readphy(tp, 0x15, &phy2);
4238 if ((phy1 & 0x10) && !(phy2 & 0x20)) {
4239 /* We have signal detect and not receiving
4240 * config code words, link is up by parallel
4244 bmcr &= ~BMCR_ANENABLE;
4245 bmcr |= BMCR_SPEED1000 | BMCR_FULLDPLX;
4246 tg3_writephy(tp, MII_BMCR, bmcr);
4247 tp->tg3_flags2 |= TG3_FLG2_PARALLEL_DETECT;
4251 else if (netif_carrier_ok(tp->dev) &&
4252 (tp->link_config.autoneg == AUTONEG_ENABLE) &&
4253 (tp->tg3_flags2 & TG3_FLG2_PARALLEL_DETECT)) {
4256 /* Select expansion interrupt status register */
4257 tg3_writephy(tp, 0x17, 0x0f01);
4258 tg3_readphy(tp, 0x15, &phy2);
4262 /* Config code words received, turn on autoneg. */
4263 tg3_readphy(tp, MII_BMCR, &bmcr);
4264 tg3_writephy(tp, MII_BMCR, bmcr | BMCR_ANENABLE);
4266 tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
4272 static int tg3_setup_phy(struct tg3 *tp, int force_reset)
4276 if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) {
4277 err = tg3_setup_fiber_phy(tp, force_reset);
4278 } else if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES) {
4279 err = tg3_setup_fiber_mii_phy(tp, force_reset);
4281 err = tg3_setup_copper_phy(tp, force_reset);
4284 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5784_AX) {
4287 val = tr32(TG3_CPMU_CLCK_STAT) & CPMU_CLCK_STAT_MAC_CLCK_MASK;
4288 if (val == CPMU_CLCK_STAT_MAC_CLCK_62_5)
4290 else if (val == CPMU_CLCK_STAT_MAC_CLCK_6_25)
4295 val = tr32(GRC_MISC_CFG) & ~GRC_MISC_CFG_PRESCALAR_MASK;
4296 val |= (scale << GRC_MISC_CFG_PRESCALAR_SHIFT);
4297 tw32(GRC_MISC_CFG, val);
4300 if (tp->link_config.active_speed == SPEED_1000 &&
4301 tp->link_config.active_duplex == DUPLEX_HALF)
4302 tw32(MAC_TX_LENGTHS,
4303 ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
4304 (6 << TX_LENGTHS_IPG_SHIFT) |
4305 (0xff << TX_LENGTHS_SLOT_TIME_SHIFT)));
4307 tw32(MAC_TX_LENGTHS,
4308 ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
4309 (6 << TX_LENGTHS_IPG_SHIFT) |
4310 (32 << TX_LENGTHS_SLOT_TIME_SHIFT)));
4312 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
4313 if (netif_carrier_ok(tp->dev)) {
4314 tw32(HOSTCC_STAT_COAL_TICKS,
4315 tp->coal.stats_block_coalesce_usecs);
4317 tw32(HOSTCC_STAT_COAL_TICKS, 0);
4321 if (tp->tg3_flags & TG3_FLAG_ASPM_WORKAROUND) {
4322 u32 val = tr32(PCIE_PWR_MGMT_THRESH);
4323 if (!netif_carrier_ok(tp->dev))
4324 val = (val & ~PCIE_PWR_MGMT_L1_THRESH_MSK) |
4327 val |= PCIE_PWR_MGMT_L1_THRESH_MSK;
4328 tw32(PCIE_PWR_MGMT_THRESH, val);
4334 /* This is called whenever we suspect that the system chipset is re-
4335 * ordering the sequence of MMIO to the tx send mailbox. The symptom
4336 * is bogus tx completions. We try to recover by setting the
4337 * TG3_FLAG_MBOX_WRITE_REORDER flag and resetting the chip later
4340 static void tg3_tx_recover(struct tg3 *tp)
4342 BUG_ON((tp->tg3_flags & TG3_FLAG_MBOX_WRITE_REORDER) ||
4343 tp->write32_tx_mbox == tg3_write_indirect_mbox);
4345 printk(KERN_WARNING PFX "%s: The system may be re-ordering memory-"
4346 "mapped I/O cycles to the network device, attempting to "
4347 "recover. Please report the problem to the driver maintainer "
4348 "and include system chipset information.\n", tp->dev->name);
4350 spin_lock(&tp->lock);
4351 tp->tg3_flags |= TG3_FLAG_TX_RECOVERY_PENDING;
4352 spin_unlock(&tp->lock);
4355 static inline u32 tg3_tx_avail(struct tg3_napi *tnapi)
4358 return tnapi->tx_pending -
4359 ((tnapi->tx_prod - tnapi->tx_cons) & (TG3_TX_RING_SIZE - 1));
4362 /* Tigon3 never reports partial packet sends. So we do not
4363 * need special logic to handle SKBs that have not had all
4364 * of their frags sent yet, like SunGEM does.
4366 static void tg3_tx(struct tg3_napi *tnapi)
4368 struct tg3 *tp = tnapi->tp;
4369 u32 hw_idx = tnapi->hw_status->idx[0].tx_consumer;
4370 u32 sw_idx = tnapi->tx_cons;
4371 struct netdev_queue *txq;
4372 int index = tnapi - tp->napi;
4374 if (tp->tg3_flags3 & TG3_FLG3_ENABLE_TSS)
4377 txq = netdev_get_tx_queue(tp->dev, index);
4379 while (sw_idx != hw_idx) {
4380 struct ring_info *ri = &tnapi->tx_buffers[sw_idx];
4381 struct sk_buff *skb = ri->skb;
4384 if (unlikely(skb == NULL)) {
4389 pci_unmap_single(tp->pdev,
4390 pci_unmap_addr(ri, mapping),
4396 sw_idx = NEXT_TX(sw_idx);
4398 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
4399 ri = &tnapi->tx_buffers[sw_idx];
4400 if (unlikely(ri->skb != NULL || sw_idx == hw_idx))
4403 pci_unmap_page(tp->pdev,
4404 pci_unmap_addr(ri, mapping),
4405 skb_shinfo(skb)->frags[i].size,
4407 sw_idx = NEXT_TX(sw_idx);
4412 if (unlikely(tx_bug)) {
4418 tnapi->tx_cons = sw_idx;
4420 /* Need to make the tx_cons update visible to tg3_start_xmit()
4421 * before checking for netif_queue_stopped(). Without the
4422 * memory barrier, there is a small possibility that tg3_start_xmit()
4423 * will miss it and cause the queue to be stopped forever.
4427 if (unlikely(netif_tx_queue_stopped(txq) &&
4428 (tg3_tx_avail(tnapi) > TG3_TX_WAKEUP_THRESH(tnapi)))) {
4429 __netif_tx_lock(txq, smp_processor_id());
4430 if (netif_tx_queue_stopped(txq) &&
4431 (tg3_tx_avail(tnapi) > TG3_TX_WAKEUP_THRESH(tnapi)))
4432 netif_tx_wake_queue(txq);
4433 __netif_tx_unlock(txq);
4437 static void tg3_rx_skb_free(struct tg3 *tp, struct ring_info *ri, u32 map_sz)
4442 pci_unmap_single(tp->pdev, pci_unmap_addr(ri, mapping),
4443 map_sz, PCI_DMA_FROMDEVICE);
4444 dev_kfree_skb_any(ri->skb);
4448 /* Returns size of skb allocated or < 0 on error.
4450 * We only need to fill in the address because the other members
4451 * of the RX descriptor are invariant, see tg3_init_rings.
4453 * Note the purposeful assymetry of cpu vs. chip accesses. For
4454 * posting buffers we only dirty the first cache line of the RX
4455 * descriptor (containing the address). Whereas for the RX status
4456 * buffers the cpu only reads the last cacheline of the RX descriptor
4457 * (to fetch the error flags, vlan tag, checksum, and opaque cookie).
4459 static int tg3_alloc_rx_skb(struct tg3 *tp, struct tg3_rx_prodring_set *tpr,
4460 u32 opaque_key, u32 dest_idx_unmasked)
4462 struct tg3_rx_buffer_desc *desc;
4463 struct ring_info *map, *src_map;
4464 struct sk_buff *skb;
4466 int skb_size, dest_idx;
4469 switch (opaque_key) {
4470 case RXD_OPAQUE_RING_STD:
4471 dest_idx = dest_idx_unmasked % TG3_RX_RING_SIZE;
4472 desc = &tpr->rx_std[dest_idx];
4473 map = &tpr->rx_std_buffers[dest_idx];
4474 skb_size = tp->rx_pkt_map_sz;
4477 case RXD_OPAQUE_RING_JUMBO:
4478 dest_idx = dest_idx_unmasked % TG3_RX_JUMBO_RING_SIZE;
4479 desc = &tpr->rx_jmb[dest_idx].std;
4480 map = &tpr->rx_jmb_buffers[dest_idx];
4481 skb_size = TG3_RX_JMB_MAP_SZ;
4488 /* Do not overwrite any of the map or rp information
4489 * until we are sure we can commit to a new buffer.
4491 * Callers depend upon this behavior and assume that
4492 * we leave everything unchanged if we fail.
4494 skb = netdev_alloc_skb(tp->dev, skb_size + tp->rx_offset);
4498 skb_reserve(skb, tp->rx_offset);
4500 mapping = pci_map_single(tp->pdev, skb->data, skb_size,
4501 PCI_DMA_FROMDEVICE);
4502 if (pci_dma_mapping_error(tp->pdev, mapping)) {
4508 pci_unmap_addr_set(map, mapping, mapping);
4510 desc->addr_hi = ((u64)mapping >> 32);
4511 desc->addr_lo = ((u64)mapping & 0xffffffff);
4516 /* We only need to move over in the address because the other
4517 * members of the RX descriptor are invariant. See notes above
4518 * tg3_alloc_rx_skb for full details.
4520 static void tg3_recycle_rx(struct tg3_napi *tnapi,
4521 struct tg3_rx_prodring_set *dpr,
4522 u32 opaque_key, int src_idx,
4523 u32 dest_idx_unmasked)
4525 struct tg3 *tp = tnapi->tp;
4526 struct tg3_rx_buffer_desc *src_desc, *dest_desc;
4527 struct ring_info *src_map, *dest_map;
4529 struct tg3_rx_prodring_set *spr = &tp->prodring[0];
4531 switch (opaque_key) {
4532 case RXD_OPAQUE_RING_STD:
4533 dest_idx = dest_idx_unmasked % TG3_RX_RING_SIZE;
4534 dest_desc = &dpr->rx_std[dest_idx];
4535 dest_map = &dpr->rx_std_buffers[dest_idx];
4536 src_desc = &spr->rx_std[src_idx];
4537 src_map = &spr->rx_std_buffers[src_idx];
4540 case RXD_OPAQUE_RING_JUMBO:
4541 dest_idx = dest_idx_unmasked % TG3_RX_JUMBO_RING_SIZE;
4542 dest_desc = &dpr->rx_jmb[dest_idx].std;
4543 dest_map = &dpr->rx_jmb_buffers[dest_idx];
4544 src_desc = &spr->rx_jmb[src_idx].std;
4545 src_map = &spr->rx_jmb_buffers[src_idx];
4552 dest_map->skb = src_map->skb;
4553 pci_unmap_addr_set(dest_map, mapping,
4554 pci_unmap_addr(src_map, mapping));
4555 dest_desc->addr_hi = src_desc->addr_hi;
4556 dest_desc->addr_lo = src_desc->addr_lo;
4558 /* Ensure that the update to the skb happens after the physical
4559 * addresses have been transferred to the new BD location.
4563 src_map->skb = NULL;
4566 /* The RX ring scheme is composed of multiple rings which post fresh
4567 * buffers to the chip, and one special ring the chip uses to report
4568 * status back to the host.
4570 * The special ring reports the status of received packets to the
4571 * host. The chip does not write into the original descriptor the
4572 * RX buffer was obtained from. The chip simply takes the original
4573 * descriptor as provided by the host, updates the status and length
4574 * field, then writes this into the next status ring entry.
4576 * Each ring the host uses to post buffers to the chip is described
4577 * by a TG3_BDINFO entry in the chips SRAM area. When a packet arrives,
4578 * it is first placed into the on-chip ram. When the packet's length
4579 * is known, it walks down the TG3_BDINFO entries to select the ring.
4580 * Each TG3_BDINFO specifies a MAXLEN field and the first TG3_BDINFO
4581 * which is within the range of the new packet's length is chosen.
4583 * The "separate ring for rx status" scheme may sound queer, but it makes
4584 * sense from a cache coherency perspective. If only the host writes
4585 * to the buffer post rings, and only the chip writes to the rx status
4586 * rings, then cache lines never move beyond shared-modified state.
4587 * If both the host and chip were to write into the same ring, cache line
4588 * eviction could occur since both entities want it in an exclusive state.
4590 static int tg3_rx(struct tg3_napi *tnapi, int budget)
4592 struct tg3 *tp = tnapi->tp;
4593 u32 work_mask, rx_std_posted = 0;
4594 u32 std_prod_idx, jmb_prod_idx;
4595 u32 sw_idx = tnapi->rx_rcb_ptr;
4598 struct tg3_rx_prodring_set *tpr = tnapi->prodring;
4600 hw_idx = *(tnapi->rx_rcb_prod_idx);
4602 * We need to order the read of hw_idx and the read of
4603 * the opaque cookie.
4608 std_prod_idx = tpr->rx_std_prod_idx;
4609 jmb_prod_idx = tpr->rx_jmb_prod_idx;
4610 while (sw_idx != hw_idx && budget > 0) {
4611 struct ring_info *ri;
4612 struct tg3_rx_buffer_desc *desc = &tnapi->rx_rcb[sw_idx];
4614 struct sk_buff *skb;
4615 dma_addr_t dma_addr;
4616 u32 opaque_key, desc_idx, *post_ptr;
4618 desc_idx = desc->opaque & RXD_OPAQUE_INDEX_MASK;
4619 opaque_key = desc->opaque & RXD_OPAQUE_RING_MASK;
4620 if (opaque_key == RXD_OPAQUE_RING_STD) {
4621 ri = &tp->prodring[0].rx_std_buffers[desc_idx];
4622 dma_addr = pci_unmap_addr(ri, mapping);
4624 post_ptr = &std_prod_idx;
4626 } else if (opaque_key == RXD_OPAQUE_RING_JUMBO) {
4627 ri = &tp->prodring[0].rx_jmb_buffers[desc_idx];
4628 dma_addr = pci_unmap_addr(ri, mapping);
4630 post_ptr = &jmb_prod_idx;
4632 goto next_pkt_nopost;
4634 work_mask |= opaque_key;
4636 if ((desc->err_vlan & RXD_ERR_MASK) != 0 &&
4637 (desc->err_vlan != RXD_ERR_ODD_NIBBLE_RCVD_MII)) {
4639 tg3_recycle_rx(tnapi, tpr, opaque_key,
4640 desc_idx, *post_ptr);
4642 /* Other statistics kept track of by card. */
4643 tp->net_stats.rx_dropped++;
4647 len = ((desc->idx_len & RXD_LEN_MASK) >> RXD_LEN_SHIFT) -
4650 if (len > RX_COPY_THRESHOLD &&
4651 tp->rx_offset == NET_IP_ALIGN) {
4652 /* rx_offset will likely not equal NET_IP_ALIGN
4653 * if this is a 5701 card running in PCI-X mode
4654 * [see tg3_get_invariants()]
4658 skb_size = tg3_alloc_rx_skb(tp, tpr, opaque_key,
4663 pci_unmap_single(tp->pdev, dma_addr, skb_size,
4664 PCI_DMA_FROMDEVICE);
4666 /* Ensure that the update to the skb happens
4667 * after the usage of the old DMA mapping.
4675 struct sk_buff *copy_skb;
4677 tg3_recycle_rx(tnapi, tpr, opaque_key,
4678 desc_idx, *post_ptr);
4680 copy_skb = netdev_alloc_skb(tp->dev,
4681 len + TG3_RAW_IP_ALIGN);
4682 if (copy_skb == NULL)
4683 goto drop_it_no_recycle;
4685 skb_reserve(copy_skb, TG3_RAW_IP_ALIGN);
4686 skb_put(copy_skb, len);
4687 pci_dma_sync_single_for_cpu(tp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE);
4688 skb_copy_from_linear_data(skb, copy_skb->data, len);
4689 pci_dma_sync_single_for_device(tp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE);
4691 /* We'll reuse the original ring buffer. */
4695 if ((tp->tg3_flags & TG3_FLAG_RX_CHECKSUMS) &&
4696 (desc->type_flags & RXD_FLAG_TCPUDP_CSUM) &&
4697 (((desc->ip_tcp_csum & RXD_TCPCSUM_MASK)
4698 >> RXD_TCPCSUM_SHIFT) == 0xffff))
4699 skb->ip_summed = CHECKSUM_UNNECESSARY;
4701 skb->ip_summed = CHECKSUM_NONE;
4703 skb->protocol = eth_type_trans(skb, tp->dev);
4705 if (len > (tp->dev->mtu + ETH_HLEN) &&
4706 skb->protocol != htons(ETH_P_8021Q)) {
4711 #if TG3_VLAN_TAG_USED
4712 if (tp->vlgrp != NULL &&
4713 desc->type_flags & RXD_FLAG_VLAN) {
4714 vlan_gro_receive(&tnapi->napi, tp->vlgrp,
4715 desc->err_vlan & RXD_VLAN_MASK, skb);
4718 napi_gro_receive(&tnapi->napi, skb);
4726 if (unlikely(rx_std_posted >= tp->rx_std_max_post)) {
4727 tpr->rx_std_prod_idx = std_prod_idx % TG3_RX_RING_SIZE;
4728 tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG,
4729 tpr->rx_std_prod_idx);
4730 work_mask &= ~RXD_OPAQUE_RING_STD;
4735 sw_idx &= (TG3_RX_RCB_RING_SIZE(tp) - 1);
4737 /* Refresh hw_idx to see if there is new work */
4738 if (sw_idx == hw_idx) {
4739 hw_idx = *(tnapi->rx_rcb_prod_idx);
4744 /* ACK the status ring. */
4745 tnapi->rx_rcb_ptr = sw_idx;
4746 tw32_rx_mbox(tnapi->consmbox, sw_idx);
4748 /* Refill RX ring(s). */
4749 if (!(tp->tg3_flags3 & TG3_FLG3_ENABLE_RSS)) {
4750 if (work_mask & RXD_OPAQUE_RING_STD) {
4751 tpr->rx_std_prod_idx = std_prod_idx % TG3_RX_RING_SIZE;
4752 tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG,
4753 tpr->rx_std_prod_idx);
4755 if (work_mask & RXD_OPAQUE_RING_JUMBO) {
4756 tpr->rx_jmb_prod_idx = jmb_prod_idx %
4757 TG3_RX_JUMBO_RING_SIZE;
4758 tw32_rx_mbox(TG3_RX_JMB_PROD_IDX_REG,
4759 tpr->rx_jmb_prod_idx);
4762 } else if (work_mask) {
4763 /* rx_std_buffers[] and rx_jmb_buffers[] entries must be
4764 * updated before the producer indices can be updated.
4768 tpr->rx_std_prod_idx = std_prod_idx % TG3_RX_RING_SIZE;
4769 tpr->rx_jmb_prod_idx = jmb_prod_idx % TG3_RX_JUMBO_RING_SIZE;
4771 if (tnapi != &tp->napi[1])
4772 napi_schedule(&tp->napi[1].napi);
4778 static void tg3_poll_link(struct tg3 *tp)
4780 /* handle link change and other phy events */
4781 if (!(tp->tg3_flags &
4782 (TG3_FLAG_USE_LINKCHG_REG |
4783 TG3_FLAG_POLL_SERDES))) {
4784 struct tg3_hw_status *sblk = tp->napi[0].hw_status;
4786 if (sblk->status & SD_STATUS_LINK_CHG) {
4787 sblk->status = SD_STATUS_UPDATED |
4788 (sblk->status & ~SD_STATUS_LINK_CHG);
4789 spin_lock(&tp->lock);
4790 if (tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB) {
4792 (MAC_STATUS_SYNC_CHANGED |
4793 MAC_STATUS_CFG_CHANGED |
4794 MAC_STATUS_MI_COMPLETION |
4795 MAC_STATUS_LNKSTATE_CHANGED));
4798 tg3_setup_phy(tp, 0);
4799 spin_unlock(&tp->lock);
4804 static int tg3_rx_prodring_xfer(struct tg3 *tp,
4805 struct tg3_rx_prodring_set *dpr,
4806 struct tg3_rx_prodring_set *spr)
4808 u32 si, di, cpycnt, src_prod_idx;
4812 src_prod_idx = spr->rx_std_prod_idx;
4814 /* Make sure updates to the rx_std_buffers[] entries and the
4815 * standard producer index are seen in the correct order.
4819 if (spr->rx_std_cons_idx == src_prod_idx)
4822 if (spr->rx_std_cons_idx < src_prod_idx)
4823 cpycnt = src_prod_idx - spr->rx_std_cons_idx;
4825 cpycnt = TG3_RX_RING_SIZE - spr->rx_std_cons_idx;
4827 cpycnt = min(cpycnt, TG3_RX_RING_SIZE - dpr->rx_std_prod_idx);
4829 si = spr->rx_std_cons_idx;
4830 di = dpr->rx_std_prod_idx;
4832 for (i = di; i < di + cpycnt; i++) {
4833 if (dpr->rx_std_buffers[i].skb) {
4843 /* Ensure that updates to the rx_std_buffers ring and the
4844 * shadowed hardware producer ring from tg3_recycle_skb() are
4845 * ordered correctly WRT the skb check above.
4849 memcpy(&dpr->rx_std_buffers[di],
4850 &spr->rx_std_buffers[si],
4851 cpycnt * sizeof(struct ring_info));
4853 for (i = 0; i < cpycnt; i++, di++, si++) {
4854 struct tg3_rx_buffer_desc *sbd, *dbd;
4855 sbd = &spr->rx_std[si];
4856 dbd = &dpr->rx_std[di];
4857 dbd->addr_hi = sbd->addr_hi;
4858 dbd->addr_lo = sbd->addr_lo;
4861 spr->rx_std_cons_idx = (spr->rx_std_cons_idx + cpycnt) %
4863 dpr->rx_std_prod_idx = (dpr->rx_std_prod_idx + cpycnt) %
4868 src_prod_idx = spr->rx_jmb_prod_idx;
4870 /* Make sure updates to the rx_jmb_buffers[] entries and
4871 * the jumbo producer index are seen in the correct order.
4875 if (spr->rx_jmb_cons_idx == src_prod_idx)
4878 if (spr->rx_jmb_cons_idx < src_prod_idx)
4879 cpycnt = src_prod_idx - spr->rx_jmb_cons_idx;
4881 cpycnt = TG3_RX_JUMBO_RING_SIZE - spr->rx_jmb_cons_idx;
4883 cpycnt = min(cpycnt,
4884 TG3_RX_JUMBO_RING_SIZE - dpr->rx_jmb_prod_idx);
4886 si = spr->rx_jmb_cons_idx;
4887 di = dpr->rx_jmb_prod_idx;
4889 for (i = di; i < di + cpycnt; i++) {
4890 if (dpr->rx_jmb_buffers[i].skb) {
4900 /* Ensure that updates to the rx_jmb_buffers ring and the
4901 * shadowed hardware producer ring from tg3_recycle_skb() are
4902 * ordered correctly WRT the skb check above.
4906 memcpy(&dpr->rx_jmb_buffers[di],
4907 &spr->rx_jmb_buffers[si],
4908 cpycnt * sizeof(struct ring_info));
4910 for (i = 0; i < cpycnt; i++, di++, si++) {
4911 struct tg3_rx_buffer_desc *sbd, *dbd;
4912 sbd = &spr->rx_jmb[si].std;
4913 dbd = &dpr->rx_jmb[di].std;
4914 dbd->addr_hi = sbd->addr_hi;
4915 dbd->addr_lo = sbd->addr_lo;
4918 spr->rx_jmb_cons_idx = (spr->rx_jmb_cons_idx + cpycnt) %
4919 TG3_RX_JUMBO_RING_SIZE;
4920 dpr->rx_jmb_prod_idx = (dpr->rx_jmb_prod_idx + cpycnt) %
4921 TG3_RX_JUMBO_RING_SIZE;
4927 static int tg3_poll_work(struct tg3_napi *tnapi, int work_done, int budget)
4929 struct tg3 *tp = tnapi->tp;
4931 /* run TX completion thread */
4932 if (tnapi->hw_status->idx[0].tx_consumer != tnapi->tx_cons) {
4934 if (unlikely(tp->tg3_flags & TG3_FLAG_TX_RECOVERY_PENDING))
4938 /* run RX thread, within the bounds set by NAPI.
4939 * All RX "locking" is done by ensuring outside
4940 * code synchronizes with tg3->napi.poll()
4942 if (*(tnapi->rx_rcb_prod_idx) != tnapi->rx_rcb_ptr)
4943 work_done += tg3_rx(tnapi, budget - work_done);
4945 if ((tp->tg3_flags3 & TG3_FLG3_ENABLE_RSS) && tnapi == &tp->napi[1]) {
4946 struct tg3_rx_prodring_set *dpr = &tp->prodring[0];
4948 u32 std_prod_idx = dpr->rx_std_prod_idx;
4949 u32 jmb_prod_idx = dpr->rx_jmb_prod_idx;
4951 for (i = 1; i < tp->irq_cnt; i++)
4952 err |= tg3_rx_prodring_xfer(tp, dpr,
4953 tp->napi[i].prodring);
4957 if (std_prod_idx != dpr->rx_std_prod_idx)
4958 tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG,
4959 dpr->rx_std_prod_idx);
4961 if (jmb_prod_idx != dpr->rx_jmb_prod_idx)
4962 tw32_rx_mbox(TG3_RX_JMB_PROD_IDX_REG,
4963 dpr->rx_jmb_prod_idx);
4968 tw32_f(HOSTCC_MODE, tp->coal_now);
4974 static int tg3_poll_msix(struct napi_struct *napi, int budget)
4976 struct tg3_napi *tnapi = container_of(napi, struct tg3_napi, napi);
4977 struct tg3 *tp = tnapi->tp;
4979 struct tg3_hw_status *sblk = tnapi->hw_status;
4982 work_done = tg3_poll_work(tnapi, work_done, budget);
4984 if (unlikely(tp->tg3_flags & TG3_FLAG_TX_RECOVERY_PENDING))
4987 if (unlikely(work_done >= budget))
4990 /* tp->last_tag is used in tg3_restart_ints() below
4991 * to tell the hw how much work has been processed,
4992 * so we must read it before checking for more work.
4994 tnapi->last_tag = sblk->status_tag;
4995 tnapi->last_irq_tag = tnapi->last_tag;
4998 /* check for RX/TX work to do */
4999 if (sblk->idx[0].tx_consumer == tnapi->tx_cons &&
5000 *(tnapi->rx_rcb_prod_idx) == tnapi->rx_rcb_ptr) {
5001 napi_complete(napi);
5002 /* Reenable interrupts. */
5003 tw32_mailbox(tnapi->int_mbox, tnapi->last_tag << 24);
5012 /* work_done is guaranteed to be less than budget. */
5013 napi_complete(napi);
5014 schedule_work(&tp->reset_task);
5018 static int tg3_poll(struct napi_struct *napi, int budget)
5020 struct tg3_napi *tnapi = container_of(napi, struct tg3_napi, napi);
5021 struct tg3 *tp = tnapi->tp;
5023 struct tg3_hw_status *sblk = tnapi->hw_status;
5028 work_done = tg3_poll_work(tnapi, work_done, budget);
5030 if (unlikely(tp->tg3_flags & TG3_FLAG_TX_RECOVERY_PENDING))
5033 if (unlikely(work_done >= budget))
5036 if (tp->tg3_flags & TG3_FLAG_TAGGED_STATUS) {
5037 /* tp->last_tag is used in tg3_int_reenable() below
5038 * to tell the hw how much work has been processed,
5039 * so we must read it before checking for more work.
5041 tnapi->last_tag = sblk->status_tag;
5042 tnapi->last_irq_tag = tnapi->last_tag;
5045 sblk->status &= ~SD_STATUS_UPDATED;
5047 if (likely(!tg3_has_work(tnapi))) {
5048 napi_complete(napi);
5049 tg3_int_reenable(tnapi);
5057 /* work_done is guaranteed to be less than budget. */
5058 napi_complete(napi);
5059 schedule_work(&tp->reset_task);
5063 static void tg3_irq_quiesce(struct tg3 *tp)
5067 BUG_ON(tp->irq_sync);
5072 for (i = 0; i < tp->irq_cnt; i++)
5073 synchronize_irq(tp->napi[i].irq_vec);
5076 static inline int tg3_irq_sync(struct tg3 *tp)
5078 return tp->irq_sync;
5081 /* Fully shutdown all tg3 driver activity elsewhere in the system.
5082 * If irq_sync is non-zero, then the IRQ handler must be synchronized
5083 * with as well. Most of the time, this is not necessary except when
5084 * shutting down the device.
5086 static inline void tg3_full_lock(struct tg3 *tp, int irq_sync)
5088 spin_lock_bh(&tp->lock);
5090 tg3_irq_quiesce(tp);
5093 static inline void tg3_full_unlock(struct tg3 *tp)
5095 spin_unlock_bh(&tp->lock);
5098 /* One-shot MSI handler - Chip automatically disables interrupt
5099 * after sending MSI so driver doesn't have to do it.
5101 static irqreturn_t tg3_msi_1shot(int irq, void *dev_id)
5103 struct tg3_napi *tnapi = dev_id;
5104 struct tg3 *tp = tnapi->tp;
5106 prefetch(tnapi->hw_status);
5108 prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
5110 if (likely(!tg3_irq_sync(tp)))
5111 napi_schedule(&tnapi->napi);
5116 /* MSI ISR - No need to check for interrupt sharing and no need to
5117 * flush status block and interrupt mailbox. PCI ordering rules
5118 * guarantee that MSI will arrive after the status block.
5120 static irqreturn_t tg3_msi(int irq, void *dev_id)
5122 struct tg3_napi *tnapi = dev_id;
5123 struct tg3 *tp = tnapi->tp;
5125 prefetch(tnapi->hw_status);
5127 prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
5129 * Writing any value to intr-mbox-0 clears PCI INTA# and
5130 * chip-internal interrupt pending events.
5131 * Writing non-zero to intr-mbox-0 additional tells the
5132 * NIC to stop sending us irqs, engaging "in-intr-handler"
5135 tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
5136 if (likely(!tg3_irq_sync(tp)))
5137 napi_schedule(&tnapi->napi);
5139 return IRQ_RETVAL(1);
5142 static irqreturn_t tg3_interrupt(int irq, void *dev_id)
5144 struct tg3_napi *tnapi = dev_id;
5145 struct tg3 *tp = tnapi->tp;
5146 struct tg3_hw_status *sblk = tnapi->hw_status;
5147 unsigned int handled = 1;
5149 /* In INTx mode, it is possible for the interrupt to arrive at
5150 * the CPU before the status block posted prior to the interrupt.
5151 * Reading the PCI State register will confirm whether the
5152 * interrupt is ours and will flush the status block.
5154 if (unlikely(!(sblk->status & SD_STATUS_UPDATED))) {
5155 if ((tp->tg3_flags & TG3_FLAG_CHIP_RESETTING) ||
5156 (tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
5163 * Writing any value to intr-mbox-0 clears PCI INTA# and
5164 * chip-internal interrupt pending events.
5165 * Writing non-zero to intr-mbox-0 additional tells the
5166 * NIC to stop sending us irqs, engaging "in-intr-handler"
5169 * Flush the mailbox to de-assert the IRQ immediately to prevent
5170 * spurious interrupts. The flush impacts performance but
5171 * excessive spurious interrupts can be worse in some cases.
5173 tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
5174 if (tg3_irq_sync(tp))
5176 sblk->status &= ~SD_STATUS_UPDATED;
5177 if (likely(tg3_has_work(tnapi))) {
5178 prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
5179 napi_schedule(&tnapi->napi);
5181 /* No work, shared interrupt perhaps? re-enable
5182 * interrupts, and flush that PCI write
5184 tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
5188 return IRQ_RETVAL(handled);
5191 static irqreturn_t tg3_interrupt_tagged(int irq, void *dev_id)
5193 struct tg3_napi *tnapi = dev_id;
5194 struct tg3 *tp = tnapi->tp;
5195 struct tg3_hw_status *sblk = tnapi->hw_status;
5196 unsigned int handled = 1;
5198 /* In INTx mode, it is possible for the interrupt to arrive at
5199 * the CPU before the status block posted prior to the interrupt.
5200 * Reading the PCI State register will confirm whether the
5201 * interrupt is ours and will flush the status block.
5203 if (unlikely(sblk->status_tag == tnapi->last_irq_tag)) {
5204 if ((tp->tg3_flags & TG3_FLAG_CHIP_RESETTING) ||
5205 (tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
5212 * writing any value to intr-mbox-0 clears PCI INTA# and
5213 * chip-internal interrupt pending events.
5214 * writing non-zero to intr-mbox-0 additional tells the
5215 * NIC to stop sending us irqs, engaging "in-intr-handler"
5218 * Flush the mailbox to de-assert the IRQ immediately to prevent
5219 * spurious interrupts. The flush impacts performance but
5220 * excessive spurious interrupts can be worse in some cases.
5222 tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
5225 * In a shared interrupt configuration, sometimes other devices'
5226 * interrupts will scream. We record the current status tag here
5227 * so that the above check can report that the screaming interrupts
5228 * are unhandled. Eventually they will be silenced.
5230 tnapi->last_irq_tag = sblk->status_tag;
5232 if (tg3_irq_sync(tp))
5235 prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
5237 napi_schedule(&tnapi->napi);
5240 return IRQ_RETVAL(handled);
5243 /* ISR for interrupt test */
5244 static irqreturn_t tg3_test_isr(int irq, void *dev_id)
5246 struct tg3_napi *tnapi = dev_id;
5247 struct tg3 *tp = tnapi->tp;
5248 struct tg3_hw_status *sblk = tnapi->hw_status;
5250 if ((sblk->status & SD_STATUS_UPDATED) ||
5251 !(tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
5252 tg3_disable_ints(tp);
5253 return IRQ_RETVAL(1);
5255 return IRQ_RETVAL(0);
5258 static int tg3_init_hw(struct tg3 *, int);
5259 static int tg3_halt(struct tg3 *, int, int);
5261 /* Restart hardware after configuration changes, self-test, etc.
5262 * Invoked with tp->lock held.
5264 static int tg3_restart_hw(struct tg3 *tp, int reset_phy)
5265 __releases(tp->lock)
5266 __acquires(tp->lock)
5270 err = tg3_init_hw(tp, reset_phy);
5272 printk(KERN_ERR PFX "%s: Failed to re-initialize device, "
5273 "aborting.\n", tp->dev->name);
5274 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
5275 tg3_full_unlock(tp);
5276 del_timer_sync(&tp->timer);
5278 tg3_napi_enable(tp);
5280 tg3_full_lock(tp, 0);
5285 #ifdef CONFIG_NET_POLL_CONTROLLER
5286 static void tg3_poll_controller(struct net_device *dev)
5289 struct tg3 *tp = netdev_priv(dev);
5291 for (i = 0; i < tp->irq_cnt; i++)
5292 tg3_interrupt(tp->napi[i].irq_vec, dev);
5296 static void tg3_reset_task(struct work_struct *work)
5298 struct tg3 *tp = container_of(work, struct tg3, reset_task);
5300 unsigned int restart_timer;
5302 tg3_full_lock(tp, 0);
5304 if (!netif_running(tp->dev)) {
5305 tg3_full_unlock(tp);
5309 tg3_full_unlock(tp);
5315 tg3_full_lock(tp, 1);
5317 restart_timer = tp->tg3_flags2 & TG3_FLG2_RESTART_TIMER;
5318 tp->tg3_flags2 &= ~TG3_FLG2_RESTART_TIMER;
5320 if (tp->tg3_flags & TG3_FLAG_TX_RECOVERY_PENDING) {
5321 tp->write32_tx_mbox = tg3_write32_tx_mbox;
5322 tp->write32_rx_mbox = tg3_write_flush_reg32;
5323 tp->tg3_flags |= TG3_FLAG_MBOX_WRITE_REORDER;
5324 tp->tg3_flags &= ~TG3_FLAG_TX_RECOVERY_PENDING;
5327 tg3_halt(tp, RESET_KIND_SHUTDOWN, 0);
5328 err = tg3_init_hw(tp, 1);
5332 tg3_netif_start(tp);
5335 mod_timer(&tp->timer, jiffies + 1);
5338 tg3_full_unlock(tp);
5344 static void tg3_dump_short_state(struct tg3 *tp)
5346 printk(KERN_ERR PFX "DEBUG: MAC_TX_STATUS[%08x] MAC_RX_STATUS[%08x]\n",
5347 tr32(MAC_TX_STATUS), tr32(MAC_RX_STATUS));
5348 printk(KERN_ERR PFX "DEBUG: RDMAC_STATUS[%08x] WDMAC_STATUS[%08x]\n",
5349 tr32(RDMAC_STATUS), tr32(WDMAC_STATUS));
5352 static void tg3_tx_timeout(struct net_device *dev)
5354 struct tg3 *tp = netdev_priv(dev);
5356 if (netif_msg_tx_err(tp)) {
5357 printk(KERN_ERR PFX "%s: transmit timed out, resetting\n",
5359 tg3_dump_short_state(tp);
5362 schedule_work(&tp->reset_task);
5365 /* Test for DMA buffers crossing any 4GB boundaries: 4G, 8G, etc */
5366 static inline int tg3_4g_overflow_test(dma_addr_t mapping, int len)
5368 u32 base = (u32) mapping & 0xffffffff;
5370 return ((base > 0xffffdcc0) &&
5371 (base + len + 8 < base));
5374 /* Test for DMA addresses > 40-bit */
5375 static inline int tg3_40bit_overflow_test(struct tg3 *tp, dma_addr_t mapping,
5378 #if defined(CONFIG_HIGHMEM) && (BITS_PER_LONG == 64)
5379 if (tp->tg3_flags & TG3_FLAG_40BIT_DMA_BUG)
5380 return (((u64) mapping + len) > DMA_BIT_MASK(40));
5387 static void tg3_set_txd(struct tg3_napi *, int, dma_addr_t, int, u32, u32);
5389 /* Workaround 4GB and 40-bit hardware DMA bugs. */
5390 static int tigon3_dma_hwbug_workaround(struct tg3_napi *tnapi,
5391 struct sk_buff *skb, u32 last_plus_one,
5392 u32 *start, u32 base_flags, u32 mss)
5394 struct tg3 *tp = tnapi->tp;
5395 struct sk_buff *new_skb;
5396 dma_addr_t new_addr = 0;
5400 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701)
5401 new_skb = skb_copy(skb, GFP_ATOMIC);
5403 int more_headroom = 4 - ((unsigned long)skb->data & 3);
5405 new_skb = skb_copy_expand(skb,
5406 skb_headroom(skb) + more_headroom,
5407 skb_tailroom(skb), GFP_ATOMIC);
5413 /* New SKB is guaranteed to be linear. */
5415 new_addr = pci_map_single(tp->pdev, new_skb->data, new_skb->len,
5417 /* Make sure the mapping succeeded */
5418 if (pci_dma_mapping_error(tp->pdev, new_addr)) {
5420 dev_kfree_skb(new_skb);
5423 /* Make sure new skb does not cross any 4G boundaries.
5424 * Drop the packet if it does.
5426 } else if ((tp->tg3_flags3 & TG3_FLG3_4G_DMA_BNDRY_BUG) &&
5427 tg3_4g_overflow_test(new_addr, new_skb->len)) {
5428 pci_unmap_single(tp->pdev, new_addr, new_skb->len,
5431 dev_kfree_skb(new_skb);
5434 tg3_set_txd(tnapi, entry, new_addr, new_skb->len,
5435 base_flags, 1 | (mss << 1));
5436 *start = NEXT_TX(entry);
5440 /* Now clean up the sw ring entries. */
5442 while (entry != last_plus_one) {
5446 len = skb_headlen(skb);
5448 len = skb_shinfo(skb)->frags[i-1].size;
5450 pci_unmap_single(tp->pdev,
5451 pci_unmap_addr(&tnapi->tx_buffers[entry],
5453 len, PCI_DMA_TODEVICE);
5455 tnapi->tx_buffers[entry].skb = new_skb;
5456 pci_unmap_addr_set(&tnapi->tx_buffers[entry], mapping,
5459 tnapi->tx_buffers[entry].skb = NULL;
5461 entry = NEXT_TX(entry);
5470 static void tg3_set_txd(struct tg3_napi *tnapi, int entry,
5471 dma_addr_t mapping, int len, u32 flags,
5474 struct tg3_tx_buffer_desc *txd = &tnapi->tx_ring[entry];
5475 int is_end = (mss_and_is_end & 0x1);
5476 u32 mss = (mss_and_is_end >> 1);
5480 flags |= TXD_FLAG_END;
5481 if (flags & TXD_FLAG_VLAN) {
5482 vlan_tag = flags >> 16;
5485 vlan_tag |= (mss << TXD_MSS_SHIFT);
5487 txd->addr_hi = ((u64) mapping >> 32);
5488 txd->addr_lo = ((u64) mapping & 0xffffffff);
5489 txd->len_flags = (len << TXD_LEN_SHIFT) | flags;
5490 txd->vlan_tag = vlan_tag << TXD_VLAN_TAG_SHIFT;
5493 /* hard_start_xmit for devices that don't have any bugs and
5494 * support TG3_FLG2_HW_TSO_2 and TG3_FLG2_HW_TSO_3 only.
5496 static netdev_tx_t tg3_start_xmit(struct sk_buff *skb,
5497 struct net_device *dev)
5499 struct tg3 *tp = netdev_priv(dev);
5500 u32 len, entry, base_flags, mss;
5502 struct tg3_napi *tnapi;
5503 struct netdev_queue *txq;
5504 unsigned int i, last;
5507 txq = netdev_get_tx_queue(dev, skb_get_queue_mapping(skb));
5508 tnapi = &tp->napi[skb_get_queue_mapping(skb)];
5509 if (tp->tg3_flags3 & TG3_FLG3_ENABLE_TSS)
5512 /* We are running in BH disabled context with netif_tx_lock
5513 * and TX reclaim runs via tp->napi.poll inside of a software
5514 * interrupt. Furthermore, IRQ processing runs lockless so we have
5515 * no IRQ context deadlocks to worry about either. Rejoice!
5517 if (unlikely(tg3_tx_avail(tnapi) <= (skb_shinfo(skb)->nr_frags + 1))) {
5518 if (!netif_tx_queue_stopped(txq)) {
5519 netif_tx_stop_queue(txq);
5521 /* This is a hard error, log it. */
5522 printk(KERN_ERR PFX "%s: BUG! Tx Ring full when "
5523 "queue awake!\n", dev->name);
5525 return NETDEV_TX_BUSY;
5528 entry = tnapi->tx_prod;
5531 if ((mss = skb_shinfo(skb)->gso_size) != 0) {
5532 int tcp_opt_len, ip_tcp_len;
5535 if (skb_header_cloned(skb) &&
5536 pskb_expand_head(skb, 0, 0, GFP_ATOMIC)) {
5541 if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6)
5542 hdrlen = skb_headlen(skb) - ETH_HLEN;
5544 struct iphdr *iph = ip_hdr(skb);
5546 tcp_opt_len = tcp_optlen(skb);
5547 ip_tcp_len = ip_hdrlen(skb) + sizeof(struct tcphdr);
5550 iph->tot_len = htons(mss + ip_tcp_len + tcp_opt_len);
5551 hdrlen = ip_tcp_len + tcp_opt_len;
5554 if (tp->tg3_flags2 & TG3_FLG2_HW_TSO_3) {
5555 mss |= (hdrlen & 0xc) << 12;
5557 base_flags |= 0x00000010;
5558 base_flags |= (hdrlen & 0x3e0) << 5;
5562 base_flags |= (TXD_FLAG_CPU_PRE_DMA |
5563 TXD_FLAG_CPU_POST_DMA);
5565 tcp_hdr(skb)->check = 0;
5568 else if (skb->ip_summed == CHECKSUM_PARTIAL)
5569 base_flags |= TXD_FLAG_TCPUDP_CSUM;
5570 #if TG3_VLAN_TAG_USED
5571 if (tp->vlgrp != NULL && vlan_tx_tag_present(skb))
5572 base_flags |= (TXD_FLAG_VLAN |
5573 (vlan_tx_tag_get(skb) << 16));
5576 len = skb_headlen(skb);
5578 /* Queue skb data, a.k.a. the main skb fragment. */
5579 mapping = pci_map_single(tp->pdev, skb->data, len, PCI_DMA_TODEVICE);
5580 if (pci_dma_mapping_error(tp->pdev, mapping)) {
5585 tnapi->tx_buffers[entry].skb = skb;
5586 pci_unmap_addr_set(&tnapi->tx_buffers[entry], mapping, mapping);
5588 if ((tp->tg3_flags3 & TG3_FLG3_USE_JUMBO_BDFLAG) &&
5589 !mss && skb->len > ETH_DATA_LEN)
5590 base_flags |= TXD_FLAG_JMB_PKT;
5592 tg3_set_txd(tnapi, entry, mapping, len, base_flags,
5593 (skb_shinfo(skb)->nr_frags == 0) | (mss << 1));
5595 entry = NEXT_TX(entry);
5597 /* Now loop through additional data fragments, and queue them. */
5598 if (skb_shinfo(skb)->nr_frags > 0) {
5599 last = skb_shinfo(skb)->nr_frags - 1;
5600 for (i = 0; i <= last; i++) {
5601 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
5604 mapping = pci_map_page(tp->pdev,
5607 len, PCI_DMA_TODEVICE);
5608 if (pci_dma_mapping_error(tp->pdev, mapping))
5611 tnapi->tx_buffers[entry].skb = NULL;
5612 pci_unmap_addr_set(&tnapi->tx_buffers[entry], mapping,
5615 tg3_set_txd(tnapi, entry, mapping, len,
5616 base_flags, (i == last) | (mss << 1));
5618 entry = NEXT_TX(entry);
5622 /* Packets are ready, update Tx producer idx local and on card. */
5623 tw32_tx_mbox(tnapi->prodmbox, entry);
5625 tnapi->tx_prod = entry;
5626 if (unlikely(tg3_tx_avail(tnapi) <= (MAX_SKB_FRAGS + 1))) {
5627 netif_tx_stop_queue(txq);
5628 if (tg3_tx_avail(tnapi) > TG3_TX_WAKEUP_THRESH(tnapi))
5629 netif_tx_wake_queue(txq);
5635 return NETDEV_TX_OK;
5639 entry = tnapi->tx_prod;
5640 tnapi->tx_buffers[entry].skb = NULL;
5641 pci_unmap_single(tp->pdev,
5642 pci_unmap_addr(&tnapi->tx_buffers[entry], mapping),
5645 for (i = 0; i <= last; i++) {
5646 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
5647 entry = NEXT_TX(entry);
5649 pci_unmap_page(tp->pdev,
5650 pci_unmap_addr(&tnapi->tx_buffers[entry],
5652 frag->size, PCI_DMA_TODEVICE);
5656 return NETDEV_TX_OK;
5659 static netdev_tx_t tg3_start_xmit_dma_bug(struct sk_buff *,
5660 struct net_device *);
5662 /* Use GSO to workaround a rare TSO bug that may be triggered when the
5663 * TSO header is greater than 80 bytes.
5665 static int tg3_tso_bug(struct tg3 *tp, struct sk_buff *skb)
5667 struct sk_buff *segs, *nskb;
5668 u32 frag_cnt_est = skb_shinfo(skb)->gso_segs * 3;
5670 /* Estimate the number of fragments in the worst case */
5671 if (unlikely(tg3_tx_avail(&tp->napi[0]) <= frag_cnt_est)) {
5672 netif_stop_queue(tp->dev);
5673 if (tg3_tx_avail(&tp->napi[0]) <= frag_cnt_est)
5674 return NETDEV_TX_BUSY;
5676 netif_wake_queue(tp->dev);
5679 segs = skb_gso_segment(skb, tp->dev->features & ~NETIF_F_TSO);
5681 goto tg3_tso_bug_end;
5687 tg3_start_xmit_dma_bug(nskb, tp->dev);
5693 return NETDEV_TX_OK;
5696 /* hard_start_xmit for devices that have the 4G bug and/or 40-bit bug and
5697 * support TG3_FLG2_HW_TSO_1 or firmware TSO only.
5699 static netdev_tx_t tg3_start_xmit_dma_bug(struct sk_buff *skb,
5700 struct net_device *dev)
5702 struct tg3 *tp = netdev_priv(dev);
5703 u32 len, entry, base_flags, mss;
5704 int would_hit_hwbug;
5706 struct tg3_napi *tnapi;
5707 struct netdev_queue *txq;
5708 unsigned int i, last;
5711 txq = netdev_get_tx_queue(dev, skb_get_queue_mapping(skb));
5712 tnapi = &tp->napi[skb_get_queue_mapping(skb)];
5713 if (tp->tg3_flags3 & TG3_FLG3_ENABLE_TSS)
5716 /* We are running in BH disabled context with netif_tx_lock
5717 * and TX reclaim runs via tp->napi.poll inside of a software
5718 * interrupt. Furthermore, IRQ processing runs lockless so we have
5719 * no IRQ context deadlocks to worry about either. Rejoice!
5721 if (unlikely(tg3_tx_avail(tnapi) <= (skb_shinfo(skb)->nr_frags + 1))) {
5722 if (!netif_tx_queue_stopped(txq)) {
5723 netif_tx_stop_queue(txq);
5725 /* This is a hard error, log it. */
5726 printk(KERN_ERR PFX "%s: BUG! Tx Ring full when "
5727 "queue awake!\n", dev->name);
5729 return NETDEV_TX_BUSY;
5732 entry = tnapi->tx_prod;
5734 if (skb->ip_summed == CHECKSUM_PARTIAL)
5735 base_flags |= TXD_FLAG_TCPUDP_CSUM;
5737 if ((mss = skb_shinfo(skb)->gso_size) != 0) {
5739 u32 tcp_opt_len, ip_tcp_len, hdr_len;
5741 if (skb_header_cloned(skb) &&
5742 pskb_expand_head(skb, 0, 0, GFP_ATOMIC)) {
5747 tcp_opt_len = tcp_optlen(skb);
5748 ip_tcp_len = ip_hdrlen(skb) + sizeof(struct tcphdr);
5750 hdr_len = ip_tcp_len + tcp_opt_len;
5751 if (unlikely((ETH_HLEN + hdr_len) > 80) &&
5752 (tp->tg3_flags2 & TG3_FLG2_TSO_BUG))
5753 return (tg3_tso_bug(tp, skb));
5755 base_flags |= (TXD_FLAG_CPU_PRE_DMA |
5756 TXD_FLAG_CPU_POST_DMA);
5760 iph->tot_len = htons(mss + hdr_len);
5761 if (tp->tg3_flags2 & TG3_FLG2_HW_TSO) {