2 * tg3.c: Broadcom Tigon3 ethernet driver.
4 * Copyright (C) 2001, 2002, 2003, 2004 David S. Miller (davem@redhat.com)
5 * Copyright (C) 2001, 2002, 2003 Jeff Garzik (jgarzik@pobox.com)
6 * Copyright (C) 2004 Sun Microsystems Inc.
7 * Copyright (C) 2005-2010 Broadcom Corporation.
10 * Derived from proprietary unpublished source code,
11 * Copyright (C) 2000-2003 Broadcom Corporation.
13 * Permission is hereby granted for the distribution of this firmware
14 * data in hexadecimal or equivalent format, provided this copyright
15 * notice is accompanying it.
19 #include <linux/module.h>
20 #include <linux/moduleparam.h>
21 #include <linux/kernel.h>
22 #include <linux/types.h>
23 #include <linux/compiler.h>
24 #include <linux/slab.h>
25 #include <linux/delay.h>
27 #include <linux/init.h>
28 #include <linux/ioport.h>
29 #include <linux/pci.h>
30 #include <linux/netdevice.h>
31 #include <linux/etherdevice.h>
32 #include <linux/skbuff.h>
33 #include <linux/ethtool.h>
34 #include <linux/mii.h>
35 #include <linux/phy.h>
36 #include <linux/brcmphy.h>
37 #include <linux/if_vlan.h>
39 #include <linux/tcp.h>
40 #include <linux/workqueue.h>
41 #include <linux/prefetch.h>
42 #include <linux/dma-mapping.h>
43 #include <linux/firmware.h>
45 #include <net/checksum.h>
48 #include <asm/system.h>
50 #include <asm/byteorder.h>
51 #include <asm/uaccess.h>
54 #include <asm/idprom.h>
61 #if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE)
62 #define TG3_VLAN_TAG_USED 1
64 #define TG3_VLAN_TAG_USED 0
69 #define DRV_MODULE_NAME "tg3"
70 #define PFX DRV_MODULE_NAME ": "
71 #define DRV_MODULE_VERSION "3.106"
72 #define DRV_MODULE_RELDATE "January 12, 2010"
74 #define TG3_DEF_MAC_MODE 0
75 #define TG3_DEF_RX_MODE 0
76 #define TG3_DEF_TX_MODE 0
77 #define TG3_DEF_MSG_ENABLE \
87 /* length of time before we decide the hardware is borked,
88 * and dev->tx_timeout() should be called to fix the problem
90 #define TG3_TX_TIMEOUT (5 * HZ)
92 /* hardware minimum and maximum for a single frame's data payload */
93 #define TG3_MIN_MTU 60
94 #define TG3_MAX_MTU(tp) \
95 ((tp->tg3_flags & TG3_FLAG_JUMBO_CAPABLE) ? 9000 : 1500)
97 /* These numbers seem to be hard coded in the NIC firmware somehow.
98 * You can't change the ring sizes, but you can change where you place
99 * them in the NIC onboard memory.
101 #define TG3_RX_RING_SIZE 512
102 #define TG3_DEF_RX_RING_PENDING 200
103 #define TG3_RX_JUMBO_RING_SIZE 256
104 #define TG3_DEF_RX_JUMBO_RING_PENDING 100
105 #define TG3_RSS_INDIR_TBL_SIZE 128
107 /* Do not place this n-ring entries value into the tp struct itself,
108 * we really want to expose these constants to GCC so that modulo et
109 * al. operations are done with shifts and masks instead of with
110 * hw multiply/modulo instructions. Another solution would be to
111 * replace things like '% foo' with '& (foo - 1)'.
113 #define TG3_RX_RCB_RING_SIZE(tp) \
114 (((tp->tg3_flags & TG3_FLAG_JUMBO_CAPABLE) && \
115 !(tp->tg3_flags2 & TG3_FLG2_5780_CLASS)) ? 1024 : 512)
117 #define TG3_TX_RING_SIZE 512
118 #define TG3_DEF_TX_RING_PENDING (TG3_TX_RING_SIZE - 1)
120 #define TG3_RX_RING_BYTES (sizeof(struct tg3_rx_buffer_desc) * \
122 #define TG3_RX_JUMBO_RING_BYTES (sizeof(struct tg3_ext_rx_buffer_desc) * \
123 TG3_RX_JUMBO_RING_SIZE)
124 #define TG3_RX_RCB_RING_BYTES(tp) (sizeof(struct tg3_rx_buffer_desc) * \
125 TG3_RX_RCB_RING_SIZE(tp))
126 #define TG3_TX_RING_BYTES (sizeof(struct tg3_tx_buffer_desc) * \
128 #define NEXT_TX(N) (((N) + 1) & (TG3_TX_RING_SIZE - 1))
130 #define TG3_DMA_BYTE_ENAB 64
132 #define TG3_RX_STD_DMA_SZ 1536
133 #define TG3_RX_JMB_DMA_SZ 9046
135 #define TG3_RX_DMA_TO_MAP_SZ(x) ((x) + TG3_DMA_BYTE_ENAB)
137 #define TG3_RX_STD_MAP_SZ TG3_RX_DMA_TO_MAP_SZ(TG3_RX_STD_DMA_SZ)
138 #define TG3_RX_JMB_MAP_SZ TG3_RX_DMA_TO_MAP_SZ(TG3_RX_JMB_DMA_SZ)
140 #define TG3_RX_STD_BUFF_RING_SIZE \
141 (sizeof(struct ring_info) * TG3_RX_RING_SIZE)
143 #define TG3_RX_JMB_BUFF_RING_SIZE \
144 (sizeof(struct ring_info) * TG3_RX_JUMBO_RING_SIZE)
146 /* minimum number of free TX descriptors required to wake up TX process */
147 #define TG3_TX_WAKEUP_THRESH(tnapi) ((tnapi)->tx_pending / 4)
149 #define TG3_RAW_IP_ALIGN 2
151 /* number of ETHTOOL_GSTATS u64's */
152 #define TG3_NUM_STATS (sizeof(struct tg3_ethtool_stats)/sizeof(u64))
154 #define TG3_NUM_TEST 6
156 #define FIRMWARE_TG3 "tigon/tg3.bin"
157 #define FIRMWARE_TG3TSO "tigon/tg3_tso.bin"
158 #define FIRMWARE_TG3TSO5 "tigon/tg3_tso5.bin"
160 static char version[] __devinitdata =
161 DRV_MODULE_NAME ".c:v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
163 MODULE_AUTHOR("David S. Miller (davem@redhat.com) and Jeff Garzik (jgarzik@pobox.com)");
164 MODULE_DESCRIPTION("Broadcom Tigon3 ethernet driver");
165 MODULE_LICENSE("GPL");
166 MODULE_VERSION(DRV_MODULE_VERSION);
167 MODULE_FIRMWARE(FIRMWARE_TG3);
168 MODULE_FIRMWARE(FIRMWARE_TG3TSO);
169 MODULE_FIRMWARE(FIRMWARE_TG3TSO5);
171 #define TG3_RSS_MIN_NUM_MSIX_VECS 2
173 static int tg3_debug = -1; /* -1 == use TG3_DEF_MSG_ENABLE as value */
174 module_param(tg3_debug, int, 0);
175 MODULE_PARM_DESC(tg3_debug, "Tigon3 bitmapped debugging message enable value");
177 static DEFINE_PCI_DEVICE_TABLE(tg3_pci_tbl) = {
178 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5700)},
179 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5701)},
180 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702)},
181 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703)},
182 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704)},
183 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702FE)},
184 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705)},
185 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705_2)},
186 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705M)},
187 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705M_2)},
188 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702X)},
189 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703X)},
190 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704S)},
191 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702A3)},
192 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703A3)},
193 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5782)},
194 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5788)},
195 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5789)},
196 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5901)},
197 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5901_2)},
198 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704S_2)},
199 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705F)},
200 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5720)},
201 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5721)},
202 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5722)},
203 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5750)},
204 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751)},
205 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5750M)},
206 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751M)},
207 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751F)},
208 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5752)},
209 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5752M)},
210 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753)},
211 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753M)},
212 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753F)},
213 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5754)},
214 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5754M)},
215 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5755)},
216 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5755M)},
217 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5756)},
218 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5786)},
219 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787)},
220 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787M)},
221 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787F)},
222 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5714)},
223 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5714S)},
224 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5715)},
225 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5715S)},
226 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5780)},
227 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5780S)},
228 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5781)},
229 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5906)},
230 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5906M)},
231 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5784)},
232 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5764)},
233 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5723)},
234 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5761)},
235 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5761E)},
236 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5761S)},
237 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5761SE)},
238 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5785_G)},
239 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5785_F)},
240 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57780)},
241 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57760)},
242 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57790)},
243 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57788)},
244 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5717)},
245 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5718)},
246 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5724)},
247 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57781)},
248 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57785)},
249 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57761)},
250 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57765)},
251 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57791)},
252 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57795)},
253 {PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9DXX)},
254 {PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9MXX)},
255 {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1000)},
256 {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1001)},
257 {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1003)},
258 {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC9100)},
259 {PCI_DEVICE(PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_TIGON3)},
263 MODULE_DEVICE_TABLE(pci, tg3_pci_tbl);
265 static const struct {
266 const char string[ETH_GSTRING_LEN];
267 } ethtool_stats_keys[TG3_NUM_STATS] = {
270 { "rx_ucast_packets" },
271 { "rx_mcast_packets" },
272 { "rx_bcast_packets" },
274 { "rx_align_errors" },
275 { "rx_xon_pause_rcvd" },
276 { "rx_xoff_pause_rcvd" },
277 { "rx_mac_ctrl_rcvd" },
278 { "rx_xoff_entered" },
279 { "rx_frame_too_long_errors" },
281 { "rx_undersize_packets" },
282 { "rx_in_length_errors" },
283 { "rx_out_length_errors" },
284 { "rx_64_or_less_octet_packets" },
285 { "rx_65_to_127_octet_packets" },
286 { "rx_128_to_255_octet_packets" },
287 { "rx_256_to_511_octet_packets" },
288 { "rx_512_to_1023_octet_packets" },
289 { "rx_1024_to_1522_octet_packets" },
290 { "rx_1523_to_2047_octet_packets" },
291 { "rx_2048_to_4095_octet_packets" },
292 { "rx_4096_to_8191_octet_packets" },
293 { "rx_8192_to_9022_octet_packets" },
300 { "tx_flow_control" },
302 { "tx_single_collisions" },
303 { "tx_mult_collisions" },
305 { "tx_excessive_collisions" },
306 { "tx_late_collisions" },
307 { "tx_collide_2times" },
308 { "tx_collide_3times" },
309 { "tx_collide_4times" },
310 { "tx_collide_5times" },
311 { "tx_collide_6times" },
312 { "tx_collide_7times" },
313 { "tx_collide_8times" },
314 { "tx_collide_9times" },
315 { "tx_collide_10times" },
316 { "tx_collide_11times" },
317 { "tx_collide_12times" },
318 { "tx_collide_13times" },
319 { "tx_collide_14times" },
320 { "tx_collide_15times" },
321 { "tx_ucast_packets" },
322 { "tx_mcast_packets" },
323 { "tx_bcast_packets" },
324 { "tx_carrier_sense_errors" },
328 { "dma_writeq_full" },
329 { "dma_write_prioq_full" },
333 { "rx_threshold_hit" },
335 { "dma_readq_full" },
336 { "dma_read_prioq_full" },
337 { "tx_comp_queue_full" },
339 { "ring_set_send_prod_index" },
340 { "ring_status_update" },
342 { "nic_avoided_irqs" },
343 { "nic_tx_threshold_hit" }
346 static const struct {
347 const char string[ETH_GSTRING_LEN];
348 } ethtool_test_keys[TG3_NUM_TEST] = {
349 { "nvram test (online) " },
350 { "link test (online) " },
351 { "register test (offline)" },
352 { "memory test (offline)" },
353 { "loopback test (offline)" },
354 { "interrupt test (offline)" },
357 static void tg3_write32(struct tg3 *tp, u32 off, u32 val)
359 writel(val, tp->regs + off);
362 static u32 tg3_read32(struct tg3 *tp, u32 off)
364 return (readl(tp->regs + off));
367 static void tg3_ape_write32(struct tg3 *tp, u32 off, u32 val)
369 writel(val, tp->aperegs + off);
372 static u32 tg3_ape_read32(struct tg3 *tp, u32 off)
374 return (readl(tp->aperegs + off));
377 static void tg3_write_indirect_reg32(struct tg3 *tp, u32 off, u32 val)
381 spin_lock_irqsave(&tp->indirect_lock, flags);
382 pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off);
383 pci_write_config_dword(tp->pdev, TG3PCI_REG_DATA, val);
384 spin_unlock_irqrestore(&tp->indirect_lock, flags);
387 static void tg3_write_flush_reg32(struct tg3 *tp, u32 off, u32 val)
389 writel(val, tp->regs + off);
390 readl(tp->regs + off);
393 static u32 tg3_read_indirect_reg32(struct tg3 *tp, u32 off)
398 spin_lock_irqsave(&tp->indirect_lock, flags);
399 pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off);
400 pci_read_config_dword(tp->pdev, TG3PCI_REG_DATA, &val);
401 spin_unlock_irqrestore(&tp->indirect_lock, flags);
405 static void tg3_write_indirect_mbox(struct tg3 *tp, u32 off, u32 val)
409 if (off == (MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW)) {
410 pci_write_config_dword(tp->pdev, TG3PCI_RCV_RET_RING_CON_IDX +
411 TG3_64BIT_REG_LOW, val);
414 if (off == TG3_RX_STD_PROD_IDX_REG) {
415 pci_write_config_dword(tp->pdev, TG3PCI_STD_RING_PROD_IDX +
416 TG3_64BIT_REG_LOW, val);
420 spin_lock_irqsave(&tp->indirect_lock, flags);
421 pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off + 0x5600);
422 pci_write_config_dword(tp->pdev, TG3PCI_REG_DATA, val);
423 spin_unlock_irqrestore(&tp->indirect_lock, flags);
425 /* In indirect mode when disabling interrupts, we also need
426 * to clear the interrupt bit in the GRC local ctrl register.
428 if ((off == (MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW)) &&
430 pci_write_config_dword(tp->pdev, TG3PCI_MISC_LOCAL_CTRL,
431 tp->grc_local_ctrl|GRC_LCLCTRL_CLEARINT);
435 static u32 tg3_read_indirect_mbox(struct tg3 *tp, u32 off)
440 spin_lock_irqsave(&tp->indirect_lock, flags);
441 pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off + 0x5600);
442 pci_read_config_dword(tp->pdev, TG3PCI_REG_DATA, &val);
443 spin_unlock_irqrestore(&tp->indirect_lock, flags);
447 /* usec_wait specifies the wait time in usec when writing to certain registers
448 * where it is unsafe to read back the register without some delay.
449 * GRC_LOCAL_CTRL is one example if the GPIOs are toggled to switch power.
450 * TG3PCI_CLOCK_CTRL is another example if the clock frequencies are changed.
452 static void _tw32_flush(struct tg3 *tp, u32 off, u32 val, u32 usec_wait)
454 if ((tp->tg3_flags & TG3_FLAG_PCIX_TARGET_HWBUG) ||
455 (tp->tg3_flags2 & TG3_FLG2_ICH_WORKAROUND))
456 /* Non-posted methods */
457 tp->write32(tp, off, val);
460 tg3_write32(tp, off, val);
465 /* Wait again after the read for the posted method to guarantee that
466 * the wait time is met.
472 static inline void tw32_mailbox_flush(struct tg3 *tp, u32 off, u32 val)
474 tp->write32_mbox(tp, off, val);
475 if (!(tp->tg3_flags & TG3_FLAG_MBOX_WRITE_REORDER) &&
476 !(tp->tg3_flags2 & TG3_FLG2_ICH_WORKAROUND))
477 tp->read32_mbox(tp, off);
480 static void tg3_write32_tx_mbox(struct tg3 *tp, u32 off, u32 val)
482 void __iomem *mbox = tp->regs + off;
484 if (tp->tg3_flags & TG3_FLAG_TXD_MBOX_HWBUG)
486 if (tp->tg3_flags & TG3_FLAG_MBOX_WRITE_REORDER)
490 static u32 tg3_read32_mbox_5906(struct tg3 *tp, u32 off)
492 return (readl(tp->regs + off + GRCMBOX_BASE));
495 static void tg3_write32_mbox_5906(struct tg3 *tp, u32 off, u32 val)
497 writel(val, tp->regs + off + GRCMBOX_BASE);
500 #define tw32_mailbox(reg, val) tp->write32_mbox(tp, reg, val)
501 #define tw32_mailbox_f(reg, val) tw32_mailbox_flush(tp, (reg), (val))
502 #define tw32_rx_mbox(reg, val) tp->write32_rx_mbox(tp, reg, val)
503 #define tw32_tx_mbox(reg, val) tp->write32_tx_mbox(tp, reg, val)
504 #define tr32_mailbox(reg) tp->read32_mbox(tp, reg)
506 #define tw32(reg,val) tp->write32(tp, reg, val)
507 #define tw32_f(reg,val) _tw32_flush(tp,(reg),(val), 0)
508 #define tw32_wait_f(reg,val,us) _tw32_flush(tp,(reg),(val), (us))
509 #define tr32(reg) tp->read32(tp, reg)
511 static void tg3_write_mem(struct tg3 *tp, u32 off, u32 val)
515 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) &&
516 (off >= NIC_SRAM_STATS_BLK) && (off < NIC_SRAM_TX_BUFFER_DESC))
519 spin_lock_irqsave(&tp->indirect_lock, flags);
520 if (tp->tg3_flags & TG3_FLAG_SRAM_USE_CONFIG) {
521 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off);
522 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
524 /* Always leave this as zero. */
525 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
527 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, off);
528 tw32_f(TG3PCI_MEM_WIN_DATA, val);
530 /* Always leave this as zero. */
531 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, 0);
533 spin_unlock_irqrestore(&tp->indirect_lock, flags);
536 static void tg3_read_mem(struct tg3 *tp, u32 off, u32 *val)
540 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) &&
541 (off >= NIC_SRAM_STATS_BLK) && (off < NIC_SRAM_TX_BUFFER_DESC)) {
546 spin_lock_irqsave(&tp->indirect_lock, flags);
547 if (tp->tg3_flags & TG3_FLAG_SRAM_USE_CONFIG) {
548 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off);
549 pci_read_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
551 /* Always leave this as zero. */
552 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
554 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, off);
555 *val = tr32(TG3PCI_MEM_WIN_DATA);
557 /* Always leave this as zero. */
558 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, 0);
560 spin_unlock_irqrestore(&tp->indirect_lock, flags);
563 static void tg3_ape_lock_init(struct tg3 *tp)
567 /* Make sure the driver hasn't any stale locks. */
568 for (i = 0; i < 8; i++)
569 tg3_ape_write32(tp, TG3_APE_LOCK_GRANT + 4 * i,
570 APE_LOCK_GRANT_DRIVER);
573 static int tg3_ape_lock(struct tg3 *tp, int locknum)
579 if (!(tp->tg3_flags3 & TG3_FLG3_ENABLE_APE))
583 case TG3_APE_LOCK_GRC:
584 case TG3_APE_LOCK_MEM:
592 tg3_ape_write32(tp, TG3_APE_LOCK_REQ + off, APE_LOCK_REQ_DRIVER);
594 /* Wait for up to 1 millisecond to acquire lock. */
595 for (i = 0; i < 100; i++) {
596 status = tg3_ape_read32(tp, TG3_APE_LOCK_GRANT + off);
597 if (status == APE_LOCK_GRANT_DRIVER)
602 if (status != APE_LOCK_GRANT_DRIVER) {
603 /* Revoke the lock request. */
604 tg3_ape_write32(tp, TG3_APE_LOCK_GRANT + off,
605 APE_LOCK_GRANT_DRIVER);
613 static void tg3_ape_unlock(struct tg3 *tp, int locknum)
617 if (!(tp->tg3_flags3 & TG3_FLG3_ENABLE_APE))
621 case TG3_APE_LOCK_GRC:
622 case TG3_APE_LOCK_MEM:
629 tg3_ape_write32(tp, TG3_APE_LOCK_GRANT + off, APE_LOCK_GRANT_DRIVER);
632 static void tg3_disable_ints(struct tg3 *tp)
636 tw32(TG3PCI_MISC_HOST_CTRL,
637 (tp->misc_host_ctrl | MISC_HOST_CTRL_MASK_PCI_INT));
638 for (i = 0; i < tp->irq_max; i++)
639 tw32_mailbox_f(tp->napi[i].int_mbox, 0x00000001);
642 static void tg3_enable_ints(struct tg3 *tp)
649 tw32(TG3PCI_MISC_HOST_CTRL,
650 (tp->misc_host_ctrl & ~MISC_HOST_CTRL_MASK_PCI_INT));
652 tp->coal_now = tp->coalesce_mode | HOSTCC_MODE_ENABLE;
653 for (i = 0; i < tp->irq_cnt; i++) {
654 struct tg3_napi *tnapi = &tp->napi[i];
655 tw32_mailbox_f(tnapi->int_mbox, tnapi->last_tag << 24);
656 if (tp->tg3_flags2 & TG3_FLG2_1SHOT_MSI)
657 tw32_mailbox_f(tnapi->int_mbox, tnapi->last_tag << 24);
659 tp->coal_now |= tnapi->coal_now;
662 /* Force an initial interrupt */
663 if (!(tp->tg3_flags & TG3_FLAG_TAGGED_STATUS) &&
664 (tp->napi[0].hw_status->status & SD_STATUS_UPDATED))
665 tw32(GRC_LOCAL_CTRL, tp->grc_local_ctrl | GRC_LCLCTRL_SETINT);
667 tw32(HOSTCC_MODE, tp->coal_now);
669 tp->coal_now &= ~(tp->napi[0].coal_now | tp->napi[1].coal_now);
672 static inline unsigned int tg3_has_work(struct tg3_napi *tnapi)
674 struct tg3 *tp = tnapi->tp;
675 struct tg3_hw_status *sblk = tnapi->hw_status;
676 unsigned int work_exists = 0;
678 /* check for phy events */
679 if (!(tp->tg3_flags &
680 (TG3_FLAG_USE_LINKCHG_REG |
681 TG3_FLAG_POLL_SERDES))) {
682 if (sblk->status & SD_STATUS_LINK_CHG)
685 /* check for RX/TX work to do */
686 if (sblk->idx[0].tx_consumer != tnapi->tx_cons ||
687 *(tnapi->rx_rcb_prod_idx) != tnapi->rx_rcb_ptr)
694 * similar to tg3_enable_ints, but it accurately determines whether there
695 * is new work pending and can return without flushing the PIO write
696 * which reenables interrupts
698 static void tg3_int_reenable(struct tg3_napi *tnapi)
700 struct tg3 *tp = tnapi->tp;
702 tw32_mailbox(tnapi->int_mbox, tnapi->last_tag << 24);
705 /* When doing tagged status, this work check is unnecessary.
706 * The last_tag we write above tells the chip which piece of
707 * work we've completed.
709 if (!(tp->tg3_flags & TG3_FLAG_TAGGED_STATUS) &&
711 tw32(HOSTCC_MODE, tp->coalesce_mode |
712 HOSTCC_MODE_ENABLE | tnapi->coal_now);
715 static void tg3_napi_disable(struct tg3 *tp)
719 for (i = tp->irq_cnt - 1; i >= 0; i--)
720 napi_disable(&tp->napi[i].napi);
723 static void tg3_napi_enable(struct tg3 *tp)
727 for (i = 0; i < tp->irq_cnt; i++)
728 napi_enable(&tp->napi[i].napi);
731 static inline void tg3_netif_stop(struct tg3 *tp)
733 tp->dev->trans_start = jiffies; /* prevent tx timeout */
734 tg3_napi_disable(tp);
735 netif_tx_disable(tp->dev);
738 static inline void tg3_netif_start(struct tg3 *tp)
740 /* NOTE: unconditional netif_tx_wake_all_queues is only
741 * appropriate so long as all callers are assured to
742 * have free tx slots (such as after tg3_init_hw)
744 netif_tx_wake_all_queues(tp->dev);
747 tp->napi[0].hw_status->status |= SD_STATUS_UPDATED;
751 static void tg3_switch_clocks(struct tg3 *tp)
756 if ((tp->tg3_flags & TG3_FLAG_CPMU_PRESENT) ||
757 (tp->tg3_flags2 & TG3_FLG2_5780_CLASS))
760 clock_ctrl = tr32(TG3PCI_CLOCK_CTRL);
762 orig_clock_ctrl = clock_ctrl;
763 clock_ctrl &= (CLOCK_CTRL_FORCE_CLKRUN |
764 CLOCK_CTRL_CLKRUN_OENABLE |
766 tp->pci_clock_ctrl = clock_ctrl;
768 if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
769 if (orig_clock_ctrl & CLOCK_CTRL_625_CORE) {
770 tw32_wait_f(TG3PCI_CLOCK_CTRL,
771 clock_ctrl | CLOCK_CTRL_625_CORE, 40);
773 } else if ((orig_clock_ctrl & CLOCK_CTRL_44MHZ_CORE) != 0) {
774 tw32_wait_f(TG3PCI_CLOCK_CTRL,
776 (CLOCK_CTRL_44MHZ_CORE | CLOCK_CTRL_ALTCLK),
778 tw32_wait_f(TG3PCI_CLOCK_CTRL,
779 clock_ctrl | (CLOCK_CTRL_ALTCLK),
782 tw32_wait_f(TG3PCI_CLOCK_CTRL, clock_ctrl, 40);
785 #define PHY_BUSY_LOOPS 5000
787 static int tg3_readphy(struct tg3 *tp, int reg, u32 *val)
793 if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
795 (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
801 frame_val = ((tp->phy_addr << MI_COM_PHY_ADDR_SHIFT) &
802 MI_COM_PHY_ADDR_MASK);
803 frame_val |= ((reg << MI_COM_REG_ADDR_SHIFT) &
804 MI_COM_REG_ADDR_MASK);
805 frame_val |= (MI_COM_CMD_READ | MI_COM_START);
807 tw32_f(MAC_MI_COM, frame_val);
809 loops = PHY_BUSY_LOOPS;
812 frame_val = tr32(MAC_MI_COM);
814 if ((frame_val & MI_COM_BUSY) == 0) {
816 frame_val = tr32(MAC_MI_COM);
824 *val = frame_val & MI_COM_DATA_MASK;
828 if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
829 tw32_f(MAC_MI_MODE, tp->mi_mode);
836 static int tg3_writephy(struct tg3 *tp, int reg, u32 val)
842 if ((tp->tg3_flags3 & TG3_FLG3_PHY_IS_FET) &&
843 (reg == MII_TG3_CTRL || reg == MII_TG3_AUX_CTRL))
846 if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
848 (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
852 frame_val = ((tp->phy_addr << MI_COM_PHY_ADDR_SHIFT) &
853 MI_COM_PHY_ADDR_MASK);
854 frame_val |= ((reg << MI_COM_REG_ADDR_SHIFT) &
855 MI_COM_REG_ADDR_MASK);
856 frame_val |= (val & MI_COM_DATA_MASK);
857 frame_val |= (MI_COM_CMD_WRITE | MI_COM_START);
859 tw32_f(MAC_MI_COM, frame_val);
861 loops = PHY_BUSY_LOOPS;
864 frame_val = tr32(MAC_MI_COM);
865 if ((frame_val & MI_COM_BUSY) == 0) {
867 frame_val = tr32(MAC_MI_COM);
877 if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
878 tw32_f(MAC_MI_MODE, tp->mi_mode);
885 static int tg3_bmcr_reset(struct tg3 *tp)
890 /* OK, reset it, and poll the BMCR_RESET bit until it
891 * clears or we time out.
893 phy_control = BMCR_RESET;
894 err = tg3_writephy(tp, MII_BMCR, phy_control);
900 err = tg3_readphy(tp, MII_BMCR, &phy_control);
904 if ((phy_control & BMCR_RESET) == 0) {
916 static int tg3_mdio_read(struct mii_bus *bp, int mii_id, int reg)
918 struct tg3 *tp = bp->priv;
921 spin_lock_bh(&tp->lock);
923 if (tg3_readphy(tp, reg, &val))
926 spin_unlock_bh(&tp->lock);
931 static int tg3_mdio_write(struct mii_bus *bp, int mii_id, int reg, u16 val)
933 struct tg3 *tp = bp->priv;
936 spin_lock_bh(&tp->lock);
938 if (tg3_writephy(tp, reg, val))
941 spin_unlock_bh(&tp->lock);
946 static int tg3_mdio_reset(struct mii_bus *bp)
951 static void tg3_mdio_config_5785(struct tg3 *tp)
954 struct phy_device *phydev;
956 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
957 switch (phydev->drv->phy_id & phydev->drv->phy_id_mask) {
958 case TG3_PHY_ID_BCM50610:
959 case TG3_PHY_ID_BCM50610M:
960 val = MAC_PHYCFG2_50610_LED_MODES;
962 case TG3_PHY_ID_BCMAC131:
963 val = MAC_PHYCFG2_AC131_LED_MODES;
965 case TG3_PHY_ID_RTL8211C:
966 val = MAC_PHYCFG2_RTL8211C_LED_MODES;
968 case TG3_PHY_ID_RTL8201E:
969 val = MAC_PHYCFG2_RTL8201E_LED_MODES;
975 if (phydev->interface != PHY_INTERFACE_MODE_RGMII) {
976 tw32(MAC_PHYCFG2, val);
978 val = tr32(MAC_PHYCFG1);
979 val &= ~(MAC_PHYCFG1_RGMII_INT |
980 MAC_PHYCFG1_RXCLK_TO_MASK | MAC_PHYCFG1_TXCLK_TO_MASK);
981 val |= MAC_PHYCFG1_RXCLK_TIMEOUT | MAC_PHYCFG1_TXCLK_TIMEOUT;
982 tw32(MAC_PHYCFG1, val);
987 if (!(tp->tg3_flags3 & TG3_FLG3_RGMII_STD_IBND_DISABLE))
988 val |= MAC_PHYCFG2_EMODE_MASK_MASK |
989 MAC_PHYCFG2_FMODE_MASK_MASK |
990 MAC_PHYCFG2_GMODE_MASK_MASK |
991 MAC_PHYCFG2_ACT_MASK_MASK |
992 MAC_PHYCFG2_QUAL_MASK_MASK |
993 MAC_PHYCFG2_INBAND_ENABLE;
995 tw32(MAC_PHYCFG2, val);
997 val = tr32(MAC_PHYCFG1);
998 val &= ~(MAC_PHYCFG1_RXCLK_TO_MASK | MAC_PHYCFG1_TXCLK_TO_MASK |
999 MAC_PHYCFG1_RGMII_EXT_RX_DEC | MAC_PHYCFG1_RGMII_SND_STAT_EN);
1000 if (!(tp->tg3_flags3 & TG3_FLG3_RGMII_STD_IBND_DISABLE)) {
1001 if (tp->tg3_flags3 & TG3_FLG3_RGMII_EXT_IBND_RX_EN)
1002 val |= MAC_PHYCFG1_RGMII_EXT_RX_DEC;
1003 if (tp->tg3_flags3 & TG3_FLG3_RGMII_EXT_IBND_TX_EN)
1004 val |= MAC_PHYCFG1_RGMII_SND_STAT_EN;
1006 val |= MAC_PHYCFG1_RXCLK_TIMEOUT | MAC_PHYCFG1_TXCLK_TIMEOUT |
1007 MAC_PHYCFG1_RGMII_INT | MAC_PHYCFG1_TXC_DRV;
1008 tw32(MAC_PHYCFG1, val);
1010 val = tr32(MAC_EXT_RGMII_MODE);
1011 val &= ~(MAC_RGMII_MODE_RX_INT_B |
1012 MAC_RGMII_MODE_RX_QUALITY |
1013 MAC_RGMII_MODE_RX_ACTIVITY |
1014 MAC_RGMII_MODE_RX_ENG_DET |
1015 MAC_RGMII_MODE_TX_ENABLE |
1016 MAC_RGMII_MODE_TX_LOWPWR |
1017 MAC_RGMII_MODE_TX_RESET);
1018 if (!(tp->tg3_flags3 & TG3_FLG3_RGMII_STD_IBND_DISABLE)) {
1019 if (tp->tg3_flags3 & TG3_FLG3_RGMII_EXT_IBND_RX_EN)
1020 val |= MAC_RGMII_MODE_RX_INT_B |
1021 MAC_RGMII_MODE_RX_QUALITY |
1022 MAC_RGMII_MODE_RX_ACTIVITY |
1023 MAC_RGMII_MODE_RX_ENG_DET;
1024 if (tp->tg3_flags3 & TG3_FLG3_RGMII_EXT_IBND_TX_EN)
1025 val |= MAC_RGMII_MODE_TX_ENABLE |
1026 MAC_RGMII_MODE_TX_LOWPWR |
1027 MAC_RGMII_MODE_TX_RESET;
1029 tw32(MAC_EXT_RGMII_MODE, val);
1032 static void tg3_mdio_start(struct tg3 *tp)
1034 tp->mi_mode &= ~MAC_MI_MODE_AUTO_POLL;
1035 tw32_f(MAC_MI_MODE, tp->mi_mode);
1038 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717) {
1039 u32 funcnum, is_serdes;
1041 funcnum = tr32(TG3_CPMU_STATUS) & TG3_CPMU_STATUS_PCIE_FUNC;
1047 if (tp->pci_chip_rev_id != CHIPREV_ID_5717_A0)
1048 is_serdes = tr32(SG_DIG_STATUS) & SG_DIG_IS_SERDES;
1050 is_serdes = tr32(TG3_CPMU_PHY_STRAP) &
1051 TG3_CPMU_PHY_STRAP_IS_SERDES;
1055 tp->phy_addr = TG3_PHY_MII_ADDR;
1057 if ((tp->tg3_flags3 & TG3_FLG3_MDIOBUS_INITED) &&
1058 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
1059 tg3_mdio_config_5785(tp);
1062 static int tg3_mdio_init(struct tg3 *tp)
1066 struct phy_device *phydev;
1070 if (!(tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB) ||
1071 (tp->tg3_flags3 & TG3_FLG3_MDIOBUS_INITED))
1074 tp->mdio_bus = mdiobus_alloc();
1075 if (tp->mdio_bus == NULL)
1078 tp->mdio_bus->name = "tg3 mdio bus";
1079 snprintf(tp->mdio_bus->id, MII_BUS_ID_SIZE, "%x",
1080 (tp->pdev->bus->number << 8) | tp->pdev->devfn);
1081 tp->mdio_bus->priv = tp;
1082 tp->mdio_bus->parent = &tp->pdev->dev;
1083 tp->mdio_bus->read = &tg3_mdio_read;
1084 tp->mdio_bus->write = &tg3_mdio_write;
1085 tp->mdio_bus->reset = &tg3_mdio_reset;
1086 tp->mdio_bus->phy_mask = ~(1 << TG3_PHY_MII_ADDR);
1087 tp->mdio_bus->irq = &tp->mdio_irq[0];
1089 for (i = 0; i < PHY_MAX_ADDR; i++)
1090 tp->mdio_bus->irq[i] = PHY_POLL;
1092 /* The bus registration will look for all the PHYs on the mdio bus.
1093 * Unfortunately, it does not ensure the PHY is powered up before
1094 * accessing the PHY ID registers. A chip reset is the
1095 * quickest way to bring the device back to an operational state..
1097 if (tg3_readphy(tp, MII_BMCR, ®) || (reg & BMCR_PDOWN))
1100 i = mdiobus_register(tp->mdio_bus);
1102 printk(KERN_WARNING "%s: mdiobus_reg failed (0x%x)\n",
1104 mdiobus_free(tp->mdio_bus);
1108 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
1110 if (!phydev || !phydev->drv) {
1111 printk(KERN_WARNING "%s: No PHY devices\n", tp->dev->name);
1112 mdiobus_unregister(tp->mdio_bus);
1113 mdiobus_free(tp->mdio_bus);
1117 switch (phydev->drv->phy_id & phydev->drv->phy_id_mask) {
1118 case TG3_PHY_ID_BCM57780:
1119 phydev->interface = PHY_INTERFACE_MODE_GMII;
1120 phydev->dev_flags |= PHY_BRCM_AUTO_PWRDWN_ENABLE;
1122 case TG3_PHY_ID_BCM50610:
1123 case TG3_PHY_ID_BCM50610M:
1124 phydev->dev_flags |= PHY_BRCM_CLEAR_RGMII_MODE |
1125 PHY_BRCM_RX_REFCLK_UNUSED |
1126 PHY_BRCM_DIS_TXCRXC_NOENRGY |
1127 PHY_BRCM_AUTO_PWRDWN_ENABLE;
1128 if (tp->tg3_flags3 & TG3_FLG3_RGMII_STD_IBND_DISABLE)
1129 phydev->dev_flags |= PHY_BRCM_STD_IBND_DISABLE;
1130 if (tp->tg3_flags3 & TG3_FLG3_RGMII_EXT_IBND_RX_EN)
1131 phydev->dev_flags |= PHY_BRCM_EXT_IBND_RX_ENABLE;
1132 if (tp->tg3_flags3 & TG3_FLG3_RGMII_EXT_IBND_TX_EN)
1133 phydev->dev_flags |= PHY_BRCM_EXT_IBND_TX_ENABLE;
1135 case TG3_PHY_ID_RTL8211C:
1136 phydev->interface = PHY_INTERFACE_MODE_RGMII;
1138 case TG3_PHY_ID_RTL8201E:
1139 case TG3_PHY_ID_BCMAC131:
1140 phydev->interface = PHY_INTERFACE_MODE_MII;
1141 phydev->dev_flags |= PHY_BRCM_AUTO_PWRDWN_ENABLE;
1142 tp->tg3_flags3 |= TG3_FLG3_PHY_IS_FET;
1146 tp->tg3_flags3 |= TG3_FLG3_MDIOBUS_INITED;
1148 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
1149 tg3_mdio_config_5785(tp);
1154 static void tg3_mdio_fini(struct tg3 *tp)
1156 if (tp->tg3_flags3 & TG3_FLG3_MDIOBUS_INITED) {
1157 tp->tg3_flags3 &= ~TG3_FLG3_MDIOBUS_INITED;
1158 mdiobus_unregister(tp->mdio_bus);
1159 mdiobus_free(tp->mdio_bus);
1163 /* tp->lock is held. */
1164 static inline void tg3_generate_fw_event(struct tg3 *tp)
1168 val = tr32(GRC_RX_CPU_EVENT);
1169 val |= GRC_RX_CPU_DRIVER_EVENT;
1170 tw32_f(GRC_RX_CPU_EVENT, val);
1172 tp->last_event_jiffies = jiffies;
1175 #define TG3_FW_EVENT_TIMEOUT_USEC 2500
1177 /* tp->lock is held. */
1178 static void tg3_wait_for_event_ack(struct tg3 *tp)
1181 unsigned int delay_cnt;
1184 /* If enough time has passed, no wait is necessary. */
1185 time_remain = (long)(tp->last_event_jiffies + 1 +
1186 usecs_to_jiffies(TG3_FW_EVENT_TIMEOUT_USEC)) -
1188 if (time_remain < 0)
1191 /* Check if we can shorten the wait time. */
1192 delay_cnt = jiffies_to_usecs(time_remain);
1193 if (delay_cnt > TG3_FW_EVENT_TIMEOUT_USEC)
1194 delay_cnt = TG3_FW_EVENT_TIMEOUT_USEC;
1195 delay_cnt = (delay_cnt >> 3) + 1;
1197 for (i = 0; i < delay_cnt; i++) {
1198 if (!(tr32(GRC_RX_CPU_EVENT) & GRC_RX_CPU_DRIVER_EVENT))
1204 /* tp->lock is held. */
1205 static void tg3_ump_link_report(struct tg3 *tp)
1210 if (!(tp->tg3_flags2 & TG3_FLG2_5780_CLASS) ||
1211 !(tp->tg3_flags & TG3_FLAG_ENABLE_ASF))
1214 tg3_wait_for_event_ack(tp);
1216 tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX, FWCMD_NICDRV_LINK_UPDATE);
1218 tg3_write_mem(tp, NIC_SRAM_FW_CMD_LEN_MBOX, 14);
1221 if (!tg3_readphy(tp, MII_BMCR, ®))
1223 if (!tg3_readphy(tp, MII_BMSR, ®))
1224 val |= (reg & 0xffff);
1225 tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX, val);
1228 if (!tg3_readphy(tp, MII_ADVERTISE, ®))
1230 if (!tg3_readphy(tp, MII_LPA, ®))
1231 val |= (reg & 0xffff);
1232 tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 4, val);
1235 if (!(tp->tg3_flags2 & TG3_FLG2_MII_SERDES)) {
1236 if (!tg3_readphy(tp, MII_CTRL1000, ®))
1238 if (!tg3_readphy(tp, MII_STAT1000, ®))
1239 val |= (reg & 0xffff);
1241 tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 8, val);
1243 if (!tg3_readphy(tp, MII_PHYADDR, ®))
1247 tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 12, val);
1249 tg3_generate_fw_event(tp);
1252 static void tg3_link_report(struct tg3 *tp)
1254 if (!netif_carrier_ok(tp->dev)) {
1255 if (netif_msg_link(tp))
1256 printk(KERN_INFO PFX "%s: Link is down.\n",
1258 tg3_ump_link_report(tp);
1259 } else if (netif_msg_link(tp)) {
1260 printk(KERN_INFO PFX "%s: Link is up at %d Mbps, %s duplex.\n",
1262 (tp->link_config.active_speed == SPEED_1000 ?
1264 (tp->link_config.active_speed == SPEED_100 ?
1266 (tp->link_config.active_duplex == DUPLEX_FULL ?
1269 printk(KERN_INFO PFX
1270 "%s: Flow control is %s for TX and %s for RX.\n",
1272 (tp->link_config.active_flowctrl & FLOW_CTRL_TX) ?
1274 (tp->link_config.active_flowctrl & FLOW_CTRL_RX) ?
1276 tg3_ump_link_report(tp);
1280 static u16 tg3_advert_flowctrl_1000T(u8 flow_ctrl)
1284 if ((flow_ctrl & FLOW_CTRL_TX) && (flow_ctrl & FLOW_CTRL_RX))
1285 miireg = ADVERTISE_PAUSE_CAP;
1286 else if (flow_ctrl & FLOW_CTRL_TX)
1287 miireg = ADVERTISE_PAUSE_ASYM;
1288 else if (flow_ctrl & FLOW_CTRL_RX)
1289 miireg = ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM;
1296 static u16 tg3_advert_flowctrl_1000X(u8 flow_ctrl)
1300 if ((flow_ctrl & FLOW_CTRL_TX) && (flow_ctrl & FLOW_CTRL_RX))
1301 miireg = ADVERTISE_1000XPAUSE;
1302 else if (flow_ctrl & FLOW_CTRL_TX)
1303 miireg = ADVERTISE_1000XPSE_ASYM;
1304 else if (flow_ctrl & FLOW_CTRL_RX)
1305 miireg = ADVERTISE_1000XPAUSE | ADVERTISE_1000XPSE_ASYM;
1312 static u8 tg3_resolve_flowctrl_1000X(u16 lcladv, u16 rmtadv)
1316 if (lcladv & ADVERTISE_1000XPAUSE) {
1317 if (lcladv & ADVERTISE_1000XPSE_ASYM) {
1318 if (rmtadv & LPA_1000XPAUSE)
1319 cap = FLOW_CTRL_TX | FLOW_CTRL_RX;
1320 else if (rmtadv & LPA_1000XPAUSE_ASYM)
1323 if (rmtadv & LPA_1000XPAUSE)
1324 cap = FLOW_CTRL_TX | FLOW_CTRL_RX;
1326 } else if (lcladv & ADVERTISE_1000XPSE_ASYM) {
1327 if ((rmtadv & LPA_1000XPAUSE) && (rmtadv & LPA_1000XPAUSE_ASYM))
1334 static void tg3_setup_flow_control(struct tg3 *tp, u32 lcladv, u32 rmtadv)
1338 u32 old_rx_mode = tp->rx_mode;
1339 u32 old_tx_mode = tp->tx_mode;
1341 if (tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB)
1342 autoneg = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]->autoneg;
1344 autoneg = tp->link_config.autoneg;
1346 if (autoneg == AUTONEG_ENABLE &&
1347 (tp->tg3_flags & TG3_FLAG_PAUSE_AUTONEG)) {
1348 if (tp->tg3_flags2 & TG3_FLG2_ANY_SERDES)
1349 flowctrl = tg3_resolve_flowctrl_1000X(lcladv, rmtadv);
1351 flowctrl = mii_resolve_flowctrl_fdx(lcladv, rmtadv);
1353 flowctrl = tp->link_config.flowctrl;
1355 tp->link_config.active_flowctrl = flowctrl;
1357 if (flowctrl & FLOW_CTRL_RX)
1358 tp->rx_mode |= RX_MODE_FLOW_CTRL_ENABLE;
1360 tp->rx_mode &= ~RX_MODE_FLOW_CTRL_ENABLE;
1362 if (old_rx_mode != tp->rx_mode)
1363 tw32_f(MAC_RX_MODE, tp->rx_mode);
1365 if (flowctrl & FLOW_CTRL_TX)
1366 tp->tx_mode |= TX_MODE_FLOW_CTRL_ENABLE;
1368 tp->tx_mode &= ~TX_MODE_FLOW_CTRL_ENABLE;
1370 if (old_tx_mode != tp->tx_mode)
1371 tw32_f(MAC_TX_MODE, tp->tx_mode);
1374 static void tg3_adjust_link(struct net_device *dev)
1376 u8 oldflowctrl, linkmesg = 0;
1377 u32 mac_mode, lcl_adv, rmt_adv;
1378 struct tg3 *tp = netdev_priv(dev);
1379 struct phy_device *phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
1381 spin_lock_bh(&tp->lock);
1383 mac_mode = tp->mac_mode & ~(MAC_MODE_PORT_MODE_MASK |
1384 MAC_MODE_HALF_DUPLEX);
1386 oldflowctrl = tp->link_config.active_flowctrl;
1392 if (phydev->speed == SPEED_100 || phydev->speed == SPEED_10)
1393 mac_mode |= MAC_MODE_PORT_MODE_MII;
1394 else if (phydev->speed == SPEED_1000 ||
1395 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785)
1396 mac_mode |= MAC_MODE_PORT_MODE_GMII;
1398 mac_mode |= MAC_MODE_PORT_MODE_MII;
1400 if (phydev->duplex == DUPLEX_HALF)
1401 mac_mode |= MAC_MODE_HALF_DUPLEX;
1403 lcl_adv = tg3_advert_flowctrl_1000T(
1404 tp->link_config.flowctrl);
1407 rmt_adv = LPA_PAUSE_CAP;
1408 if (phydev->asym_pause)
1409 rmt_adv |= LPA_PAUSE_ASYM;
1412 tg3_setup_flow_control(tp, lcl_adv, rmt_adv);
1414 mac_mode |= MAC_MODE_PORT_MODE_GMII;
1416 if (mac_mode != tp->mac_mode) {
1417 tp->mac_mode = mac_mode;
1418 tw32_f(MAC_MODE, tp->mac_mode);
1422 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785) {
1423 if (phydev->speed == SPEED_10)
1425 MAC_MI_STAT_10MBPS_MODE |
1426 MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
1428 tw32(MAC_MI_STAT, MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
1431 if (phydev->speed == SPEED_1000 && phydev->duplex == DUPLEX_HALF)
1432 tw32(MAC_TX_LENGTHS,
1433 ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
1434 (6 << TX_LENGTHS_IPG_SHIFT) |
1435 (0xff << TX_LENGTHS_SLOT_TIME_SHIFT)));
1437 tw32(MAC_TX_LENGTHS,
1438 ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
1439 (6 << TX_LENGTHS_IPG_SHIFT) |
1440 (32 << TX_LENGTHS_SLOT_TIME_SHIFT)));
1442 if ((phydev->link && tp->link_config.active_speed == SPEED_INVALID) ||
1443 (!phydev->link && tp->link_config.active_speed != SPEED_INVALID) ||
1444 phydev->speed != tp->link_config.active_speed ||
1445 phydev->duplex != tp->link_config.active_duplex ||
1446 oldflowctrl != tp->link_config.active_flowctrl)
1449 tp->link_config.active_speed = phydev->speed;
1450 tp->link_config.active_duplex = phydev->duplex;
1452 spin_unlock_bh(&tp->lock);
1455 tg3_link_report(tp);
1458 static int tg3_phy_init(struct tg3 *tp)
1460 struct phy_device *phydev;
1462 if (tp->tg3_flags3 & TG3_FLG3_PHY_CONNECTED)
1465 /* Bring the PHY back to a known state. */
1468 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
1470 /* Attach the MAC to the PHY. */
1471 phydev = phy_connect(tp->dev, dev_name(&phydev->dev), tg3_adjust_link,
1472 phydev->dev_flags, phydev->interface);
1473 if (IS_ERR(phydev)) {
1474 printk(KERN_ERR "%s: Could not attach to PHY\n", tp->dev->name);
1475 return PTR_ERR(phydev);
1478 /* Mask with MAC supported features. */
1479 switch (phydev->interface) {
1480 case PHY_INTERFACE_MODE_GMII:
1481 case PHY_INTERFACE_MODE_RGMII:
1482 if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY)) {
1483 phydev->supported &= (PHY_GBIT_FEATURES |
1485 SUPPORTED_Asym_Pause);
1489 case PHY_INTERFACE_MODE_MII:
1490 phydev->supported &= (PHY_BASIC_FEATURES |
1492 SUPPORTED_Asym_Pause);
1495 phy_disconnect(tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]);
1499 tp->tg3_flags3 |= TG3_FLG3_PHY_CONNECTED;
1501 phydev->advertising = phydev->supported;
1506 static void tg3_phy_start(struct tg3 *tp)
1508 struct phy_device *phydev;
1510 if (!(tp->tg3_flags3 & TG3_FLG3_PHY_CONNECTED))
1513 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
1515 if (tp->link_config.phy_is_low_power) {
1516 tp->link_config.phy_is_low_power = 0;
1517 phydev->speed = tp->link_config.orig_speed;
1518 phydev->duplex = tp->link_config.orig_duplex;
1519 phydev->autoneg = tp->link_config.orig_autoneg;
1520 phydev->advertising = tp->link_config.orig_advertising;
1525 phy_start_aneg(phydev);
1528 static void tg3_phy_stop(struct tg3 *tp)
1530 if (!(tp->tg3_flags3 & TG3_FLG3_PHY_CONNECTED))
1533 phy_stop(tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]);
1536 static void tg3_phy_fini(struct tg3 *tp)
1538 if (tp->tg3_flags3 & TG3_FLG3_PHY_CONNECTED) {
1539 phy_disconnect(tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]);
1540 tp->tg3_flags3 &= ~TG3_FLG3_PHY_CONNECTED;
1544 static void tg3_phydsp_write(struct tg3 *tp, u32 reg, u32 val)
1546 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, reg);
1547 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, val);
1550 static void tg3_phy_fet_toggle_apd(struct tg3 *tp, bool enable)
1554 if (!tg3_readphy(tp, MII_TG3_FET_TEST, &phytest)) {
1557 tg3_writephy(tp, MII_TG3_FET_TEST,
1558 phytest | MII_TG3_FET_SHADOW_EN);
1559 if (!tg3_readphy(tp, MII_TG3_FET_SHDW_AUXSTAT2, &phy)) {
1561 phy |= MII_TG3_FET_SHDW_AUXSTAT2_APD;
1563 phy &= ~MII_TG3_FET_SHDW_AUXSTAT2_APD;
1564 tg3_writephy(tp, MII_TG3_FET_SHDW_AUXSTAT2, phy);
1566 tg3_writephy(tp, MII_TG3_FET_TEST, phytest);
1570 static void tg3_phy_toggle_apd(struct tg3 *tp, bool enable)
1574 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS) ||
1575 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 &&
1576 (tp->tg3_flags2 & TG3_FLG2_MII_SERDES)))
1579 if (tp->tg3_flags3 & TG3_FLG3_PHY_IS_FET) {
1580 tg3_phy_fet_toggle_apd(tp, enable);
1584 reg = MII_TG3_MISC_SHDW_WREN |
1585 MII_TG3_MISC_SHDW_SCR5_SEL |
1586 MII_TG3_MISC_SHDW_SCR5_LPED |
1587 MII_TG3_MISC_SHDW_SCR5_DLPTLM |
1588 MII_TG3_MISC_SHDW_SCR5_SDTL |
1589 MII_TG3_MISC_SHDW_SCR5_C125OE;
1590 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5784 || !enable)
1591 reg |= MII_TG3_MISC_SHDW_SCR5_DLLAPD;
1593 tg3_writephy(tp, MII_TG3_MISC_SHDW, reg);
1596 reg = MII_TG3_MISC_SHDW_WREN |
1597 MII_TG3_MISC_SHDW_APD_SEL |
1598 MII_TG3_MISC_SHDW_APD_WKTM_84MS;
1600 reg |= MII_TG3_MISC_SHDW_APD_ENABLE;
1602 tg3_writephy(tp, MII_TG3_MISC_SHDW, reg);
1605 static void tg3_phy_toggle_automdix(struct tg3 *tp, int enable)
1609 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS) ||
1610 (tp->tg3_flags2 & TG3_FLG2_ANY_SERDES))
1613 if (tp->tg3_flags3 & TG3_FLG3_PHY_IS_FET) {
1616 if (!tg3_readphy(tp, MII_TG3_FET_TEST, &ephy)) {
1617 u32 reg = MII_TG3_FET_SHDW_MISCCTRL;
1619 tg3_writephy(tp, MII_TG3_FET_TEST,
1620 ephy | MII_TG3_FET_SHADOW_EN);
1621 if (!tg3_readphy(tp, reg, &phy)) {
1623 phy |= MII_TG3_FET_SHDW_MISCCTRL_MDIX;
1625 phy &= ~MII_TG3_FET_SHDW_MISCCTRL_MDIX;
1626 tg3_writephy(tp, reg, phy);
1628 tg3_writephy(tp, MII_TG3_FET_TEST, ephy);
1631 phy = MII_TG3_AUXCTL_MISC_RDSEL_MISC |
1632 MII_TG3_AUXCTL_SHDWSEL_MISC;
1633 if (!tg3_writephy(tp, MII_TG3_AUX_CTRL, phy) &&
1634 !tg3_readphy(tp, MII_TG3_AUX_CTRL, &phy)) {
1636 phy |= MII_TG3_AUXCTL_MISC_FORCE_AMDIX;
1638 phy &= ~MII_TG3_AUXCTL_MISC_FORCE_AMDIX;
1639 phy |= MII_TG3_AUXCTL_MISC_WREN;
1640 tg3_writephy(tp, MII_TG3_AUX_CTRL, phy);
1645 static void tg3_phy_set_wirespeed(struct tg3 *tp)
1649 if (tp->tg3_flags2 & TG3_FLG2_NO_ETH_WIRE_SPEED)
1652 if (!tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x7007) &&
1653 !tg3_readphy(tp, MII_TG3_AUX_CTRL, &val))
1654 tg3_writephy(tp, MII_TG3_AUX_CTRL,
1655 (val | (1 << 15) | (1 << 4)));
1658 static void tg3_phy_apply_otp(struct tg3 *tp)
1667 /* Enable SM_DSP clock and tx 6dB coding. */
1668 phy = MII_TG3_AUXCTL_SHDWSEL_AUXCTL |
1669 MII_TG3_AUXCTL_ACTL_SMDSP_ENA |
1670 MII_TG3_AUXCTL_ACTL_TX_6DB;
1671 tg3_writephy(tp, MII_TG3_AUX_CTRL, phy);
1673 phy = ((otp & TG3_OTP_AGCTGT_MASK) >> TG3_OTP_AGCTGT_SHIFT);
1674 phy |= MII_TG3_DSP_TAP1_AGCTGT_DFLT;
1675 tg3_phydsp_write(tp, MII_TG3_DSP_TAP1, phy);
1677 phy = ((otp & TG3_OTP_HPFFLTR_MASK) >> TG3_OTP_HPFFLTR_SHIFT) |
1678 ((otp & TG3_OTP_HPFOVER_MASK) >> TG3_OTP_HPFOVER_SHIFT);
1679 tg3_phydsp_write(tp, MII_TG3_DSP_AADJ1CH0, phy);
1681 phy = ((otp & TG3_OTP_LPFDIS_MASK) >> TG3_OTP_LPFDIS_SHIFT);
1682 phy |= MII_TG3_DSP_AADJ1CH3_ADCCKADJ;
1683 tg3_phydsp_write(tp, MII_TG3_DSP_AADJ1CH3, phy);
1685 phy = ((otp & TG3_OTP_VDAC_MASK) >> TG3_OTP_VDAC_SHIFT);
1686 tg3_phydsp_write(tp, MII_TG3_DSP_EXP75, phy);
1688 phy = ((otp & TG3_OTP_10BTAMP_MASK) >> TG3_OTP_10BTAMP_SHIFT);
1689 tg3_phydsp_write(tp, MII_TG3_DSP_EXP96, phy);
1691 phy = ((otp & TG3_OTP_ROFF_MASK) >> TG3_OTP_ROFF_SHIFT) |
1692 ((otp & TG3_OTP_RCOFF_MASK) >> TG3_OTP_RCOFF_SHIFT);
1693 tg3_phydsp_write(tp, MII_TG3_DSP_EXP97, phy);
1695 /* Turn off SM_DSP clock. */
1696 phy = MII_TG3_AUXCTL_SHDWSEL_AUXCTL |
1697 MII_TG3_AUXCTL_ACTL_TX_6DB;
1698 tg3_writephy(tp, MII_TG3_AUX_CTRL, phy);
1701 static int tg3_wait_macro_done(struct tg3 *tp)
1708 if (!tg3_readphy(tp, 0x16, &tmp32)) {
1709 if ((tmp32 & 0x1000) == 0)
1719 static int tg3_phy_write_and_check_testpat(struct tg3 *tp, int *resetp)
1721 static const u32 test_pat[4][6] = {
1722 { 0x00005555, 0x00000005, 0x00002aaa, 0x0000000a, 0x00003456, 0x00000003 },
1723 { 0x00002aaa, 0x0000000a, 0x00003333, 0x00000003, 0x0000789a, 0x00000005 },
1724 { 0x00005a5a, 0x00000005, 0x00002a6a, 0x0000000a, 0x00001bcd, 0x00000003 },
1725 { 0x00002a5a, 0x0000000a, 0x000033c3, 0x00000003, 0x00002ef1, 0x00000005 }
1729 for (chan = 0; chan < 4; chan++) {
1732 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
1733 (chan * 0x2000) | 0x0200);
1734 tg3_writephy(tp, 0x16, 0x0002);
1736 for (i = 0; i < 6; i++)
1737 tg3_writephy(tp, MII_TG3_DSP_RW_PORT,
1740 tg3_writephy(tp, 0x16, 0x0202);
1741 if (tg3_wait_macro_done(tp)) {
1746 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
1747 (chan * 0x2000) | 0x0200);
1748 tg3_writephy(tp, 0x16, 0x0082);
1749 if (tg3_wait_macro_done(tp)) {
1754 tg3_writephy(tp, 0x16, 0x0802);
1755 if (tg3_wait_macro_done(tp)) {
1760 for (i = 0; i < 6; i += 2) {
1763 if (tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &low) ||
1764 tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &high) ||
1765 tg3_wait_macro_done(tp)) {
1771 if (low != test_pat[chan][i] ||
1772 high != test_pat[chan][i+1]) {
1773 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000b);
1774 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x4001);
1775 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x4005);
1785 static int tg3_phy_reset_chanpat(struct tg3 *tp)
1789 for (chan = 0; chan < 4; chan++) {
1792 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
1793 (chan * 0x2000) | 0x0200);
1794 tg3_writephy(tp, 0x16, 0x0002);
1795 for (i = 0; i < 6; i++)
1796 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x000);
1797 tg3_writephy(tp, 0x16, 0x0202);
1798 if (tg3_wait_macro_done(tp))
1805 static int tg3_phy_reset_5703_4_5(struct tg3 *tp)
1807 u32 reg32, phy9_orig;
1808 int retries, do_phy_reset, err;
1814 err = tg3_bmcr_reset(tp);
1820 /* Disable transmitter and interrupt. */
1821 if (tg3_readphy(tp, MII_TG3_EXT_CTRL, ®32))
1825 tg3_writephy(tp, MII_TG3_EXT_CTRL, reg32);
1827 /* Set full-duplex, 1000 mbps. */
1828 tg3_writephy(tp, MII_BMCR,
1829 BMCR_FULLDPLX | TG3_BMCR_SPEED1000);
1831 /* Set to master mode. */
1832 if (tg3_readphy(tp, MII_TG3_CTRL, &phy9_orig))
1835 tg3_writephy(tp, MII_TG3_CTRL,
1836 (MII_TG3_CTRL_AS_MASTER |
1837 MII_TG3_CTRL_ENABLE_AS_MASTER));
1839 /* Enable SM_DSP_CLOCK and 6dB. */
1840 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0c00);
1842 /* Block the PHY control access. */
1843 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8005);
1844 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0800);
1846 err = tg3_phy_write_and_check_testpat(tp, &do_phy_reset);
1849 } while (--retries);
1851 err = tg3_phy_reset_chanpat(tp);
1855 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8005);
1856 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0000);
1858 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8200);
1859 tg3_writephy(tp, 0x16, 0x0000);
1861 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
1862 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
1863 /* Set Extended packet length bit for jumbo frames */
1864 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x4400);
1867 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0400);
1870 tg3_writephy(tp, MII_TG3_CTRL, phy9_orig);
1872 if (!tg3_readphy(tp, MII_TG3_EXT_CTRL, ®32)) {
1874 tg3_writephy(tp, MII_TG3_EXT_CTRL, reg32);
1881 /* This will reset the tigon3 PHY if there is no valid
1882 * link unless the FORCE argument is non-zero.
1884 static int tg3_phy_reset(struct tg3 *tp)
1890 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
1893 val = tr32(GRC_MISC_CFG);
1894 tw32_f(GRC_MISC_CFG, val & ~GRC_MISC_CFG_EPHY_IDDQ);
1897 err = tg3_readphy(tp, MII_BMSR, &phy_status);
1898 err |= tg3_readphy(tp, MII_BMSR, &phy_status);
1902 if (netif_running(tp->dev) && netif_carrier_ok(tp->dev)) {
1903 netif_carrier_off(tp->dev);
1904 tg3_link_report(tp);
1907 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
1908 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
1909 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
1910 err = tg3_phy_reset_5703_4_5(tp);
1917 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 &&
1918 GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5784_AX) {
1919 cpmuctrl = tr32(TG3_CPMU_CTRL);
1920 if (cpmuctrl & CPMU_CTRL_GPHY_10MB_RXONLY)
1922 cpmuctrl & ~CPMU_CTRL_GPHY_10MB_RXONLY);
1925 err = tg3_bmcr_reset(tp);
1929 if (cpmuctrl & CPMU_CTRL_GPHY_10MB_RXONLY) {
1932 phy = MII_TG3_DSP_EXP8_AEDW | MII_TG3_DSP_EXP8_REJ2MHz;
1933 tg3_phydsp_write(tp, MII_TG3_DSP_EXP8, phy);
1935 tw32(TG3_CPMU_CTRL, cpmuctrl);
1938 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5784_AX ||
1939 GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5761_AX) {
1942 val = tr32(TG3_CPMU_LSPD_1000MB_CLK);
1943 if ((val & CPMU_LSPD_1000MB_MACCLK_MASK) ==
1944 CPMU_LSPD_1000MB_MACCLK_12_5) {
1945 val &= ~CPMU_LSPD_1000MB_MACCLK_MASK;
1947 tw32_f(TG3_CPMU_LSPD_1000MB_CLK, val);
1951 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 &&
1952 (tp->tg3_flags2 & TG3_FLG2_MII_SERDES))
1955 tg3_phy_apply_otp(tp);
1957 if (tp->tg3_flags3 & TG3_FLG3_PHY_ENABLE_APD)
1958 tg3_phy_toggle_apd(tp, true);
1960 tg3_phy_toggle_apd(tp, false);
1963 if (tp->tg3_flags2 & TG3_FLG2_PHY_ADC_BUG) {
1964 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0c00);
1965 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x201f);
1966 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x2aaa);
1967 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000a);
1968 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0323);
1969 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0400);
1971 if (tp->tg3_flags2 & TG3_FLG2_PHY_5704_A0_BUG) {
1972 tg3_writephy(tp, 0x1c, 0x8d68);
1973 tg3_writephy(tp, 0x1c, 0x8d68);
1975 if (tp->tg3_flags2 & TG3_FLG2_PHY_BER_BUG) {
1976 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0c00);
1977 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000a);
1978 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x310b);
1979 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x201f);
1980 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x9506);
1981 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x401f);
1982 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x14e2);
1983 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0400);
1985 else if (tp->tg3_flags2 & TG3_FLG2_PHY_JITTER_BUG) {
1986 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0c00);
1987 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000a);
1988 if (tp->tg3_flags2 & TG3_FLG2_PHY_ADJUST_TRIM) {
1989 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x110b);
1990 tg3_writephy(tp, MII_TG3_TEST1,
1991 MII_TG3_TEST1_TRIM_EN | 0x4);
1993 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x010b);
1994 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0400);
1996 /* Set Extended packet length bit (bit 14) on all chips that */
1997 /* support jumbo frames */
1998 if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5401) {
1999 /* Cannot do read-modify-write on 5401 */
2000 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x4c20);
2001 } else if (tp->tg3_flags & TG3_FLAG_JUMBO_CAPABLE) {
2004 /* Set bit 14 with read-modify-write to preserve other bits */
2005 if (!tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0007) &&
2006 !tg3_readphy(tp, MII_TG3_AUX_CTRL, &phy_reg))
2007 tg3_writephy(tp, MII_TG3_AUX_CTRL, phy_reg | 0x4000);
2010 /* Set phy register 0x10 bit 0 to high fifo elasticity to support
2011 * jumbo frames transmission.
2013 if (tp->tg3_flags & TG3_FLAG_JUMBO_CAPABLE) {
2016 if (!tg3_readphy(tp, MII_TG3_EXT_CTRL, &phy_reg))
2017 tg3_writephy(tp, MII_TG3_EXT_CTRL,
2018 phy_reg | MII_TG3_EXT_CTRL_FIFO_ELASTIC);
2021 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
2022 /* adjust output voltage */
2023 tg3_writephy(tp, MII_TG3_FET_PTEST, 0x12);
2026 tg3_phy_toggle_automdix(tp, 1);
2027 tg3_phy_set_wirespeed(tp);
2031 static void tg3_frob_aux_power(struct tg3 *tp)
2033 struct tg3 *tp_peer = tp;
2035 /* The GPIOs do something completely different on 57765. */
2036 if ((tp->tg3_flags2 & TG3_FLG2_IS_NIC) == 0 ||
2037 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765)
2040 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
2041 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714 ||
2042 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717) {
2043 struct net_device *dev_peer;
2045 dev_peer = pci_get_drvdata(tp->pdev_peer);
2046 /* remove_one() may have been run on the peer. */
2050 tp_peer = netdev_priv(dev_peer);
2053 if ((tp->tg3_flags & TG3_FLAG_WOL_ENABLE) != 0 ||
2054 (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) != 0 ||
2055 (tp_peer->tg3_flags & TG3_FLAG_WOL_ENABLE) != 0 ||
2056 (tp_peer->tg3_flags & TG3_FLAG_ENABLE_ASF) != 0) {
2057 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
2058 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
2059 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
2060 (GRC_LCLCTRL_GPIO_OE0 |
2061 GRC_LCLCTRL_GPIO_OE1 |
2062 GRC_LCLCTRL_GPIO_OE2 |
2063 GRC_LCLCTRL_GPIO_OUTPUT0 |
2064 GRC_LCLCTRL_GPIO_OUTPUT1),
2066 } else if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761 ||
2067 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761S) {
2068 /* The 5761 non-e device swaps GPIO 0 and GPIO 2. */
2069 u32 grc_local_ctrl = GRC_LCLCTRL_GPIO_OE0 |
2070 GRC_LCLCTRL_GPIO_OE1 |
2071 GRC_LCLCTRL_GPIO_OE2 |
2072 GRC_LCLCTRL_GPIO_OUTPUT0 |
2073 GRC_LCLCTRL_GPIO_OUTPUT1 |
2075 tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl, 100);
2077 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OUTPUT2;
2078 tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl, 100);
2080 grc_local_ctrl &= ~GRC_LCLCTRL_GPIO_OUTPUT0;
2081 tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl, 100);
2084 u32 grc_local_ctrl = 0;
2086 if (tp_peer != tp &&
2087 (tp_peer->tg3_flags & TG3_FLAG_INIT_COMPLETE) != 0)
2090 /* Workaround to prevent overdrawing Amps. */
2091 if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
2093 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE3;
2094 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
2095 grc_local_ctrl, 100);
2098 /* On 5753 and variants, GPIO2 cannot be used. */
2099 no_gpio2 = tp->nic_sram_data_cfg &
2100 NIC_SRAM_DATA_CFG_NO_GPIO2;
2102 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE0 |
2103 GRC_LCLCTRL_GPIO_OE1 |
2104 GRC_LCLCTRL_GPIO_OE2 |
2105 GRC_LCLCTRL_GPIO_OUTPUT1 |
2106 GRC_LCLCTRL_GPIO_OUTPUT2;
2108 grc_local_ctrl &= ~(GRC_LCLCTRL_GPIO_OE2 |
2109 GRC_LCLCTRL_GPIO_OUTPUT2);
2111 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
2112 grc_local_ctrl, 100);
2114 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OUTPUT0;
2116 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
2117 grc_local_ctrl, 100);
2120 grc_local_ctrl &= ~GRC_LCLCTRL_GPIO_OUTPUT2;
2121 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
2122 grc_local_ctrl, 100);
2126 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
2127 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701) {
2128 if (tp_peer != tp &&
2129 (tp_peer->tg3_flags & TG3_FLAG_INIT_COMPLETE) != 0)
2132 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
2133 (GRC_LCLCTRL_GPIO_OE1 |
2134 GRC_LCLCTRL_GPIO_OUTPUT1), 100);
2136 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
2137 GRC_LCLCTRL_GPIO_OE1, 100);
2139 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
2140 (GRC_LCLCTRL_GPIO_OE1 |
2141 GRC_LCLCTRL_GPIO_OUTPUT1), 100);
2146 static int tg3_5700_link_polarity(struct tg3 *tp, u32 speed)
2148 if (tp->led_ctrl == LED_CTRL_MODE_PHY_2)
2150 else if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5411) {
2151 if (speed != SPEED_10)
2153 } else if (speed == SPEED_10)
2159 static int tg3_setup_phy(struct tg3 *, int);
2161 #define RESET_KIND_SHUTDOWN 0
2162 #define RESET_KIND_INIT 1
2163 #define RESET_KIND_SUSPEND 2
2165 static void tg3_write_sig_post_reset(struct tg3 *, int);
2166 static int tg3_halt_cpu(struct tg3 *, u32);
2168 static void tg3_power_down_phy(struct tg3 *tp, bool do_low_power)
2172 if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) {
2173 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
2174 u32 sg_dig_ctrl = tr32(SG_DIG_CTRL);
2175 u32 serdes_cfg = tr32(MAC_SERDES_CFG);
2178 SG_DIG_USING_HW_AUTONEG | SG_DIG_SOFT_RESET;
2179 tw32(SG_DIG_CTRL, sg_dig_ctrl);
2180 tw32(MAC_SERDES_CFG, serdes_cfg | (1 << 15));
2185 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
2187 val = tr32(GRC_MISC_CFG);
2188 tw32_f(GRC_MISC_CFG, val | GRC_MISC_CFG_EPHY_IDDQ);
2191 } else if (tp->tg3_flags3 & TG3_FLG3_PHY_IS_FET) {
2193 if (!tg3_readphy(tp, MII_TG3_FET_TEST, &phytest)) {
2196 tg3_writephy(tp, MII_ADVERTISE, 0);
2197 tg3_writephy(tp, MII_BMCR,
2198 BMCR_ANENABLE | BMCR_ANRESTART);
2200 tg3_writephy(tp, MII_TG3_FET_TEST,
2201 phytest | MII_TG3_FET_SHADOW_EN);
2202 if (!tg3_readphy(tp, MII_TG3_FET_SHDW_AUXMODE4, &phy)) {
2203 phy |= MII_TG3_FET_SHDW_AUXMODE4_SBPD;
2205 MII_TG3_FET_SHDW_AUXMODE4,
2208 tg3_writephy(tp, MII_TG3_FET_TEST, phytest);
2211 } else if (do_low_power) {
2212 tg3_writephy(tp, MII_TG3_EXT_CTRL,
2213 MII_TG3_EXT_CTRL_FORCE_LED_OFF);
2215 tg3_writephy(tp, MII_TG3_AUX_CTRL,
2216 MII_TG3_AUXCTL_SHDWSEL_PWRCTL |
2217 MII_TG3_AUXCTL_PCTL_100TX_LPWR |
2218 MII_TG3_AUXCTL_PCTL_SPR_ISOLATE |
2219 MII_TG3_AUXCTL_PCTL_VREG_11V);
2222 /* The PHY should not be powered down on some chips because
2225 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
2226 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
2227 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780 &&
2228 (tp->tg3_flags2 & TG3_FLG2_MII_SERDES)))
2231 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5784_AX ||
2232 GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5761_AX) {
2233 val = tr32(TG3_CPMU_LSPD_1000MB_CLK);
2234 val &= ~CPMU_LSPD_1000MB_MACCLK_MASK;
2235 val |= CPMU_LSPD_1000MB_MACCLK_12_5;
2236 tw32_f(TG3_CPMU_LSPD_1000MB_CLK, val);
2239 tg3_writephy(tp, MII_BMCR, BMCR_PDOWN);
2242 /* tp->lock is held. */
2243 static int tg3_nvram_lock(struct tg3 *tp)
2245 if (tp->tg3_flags & TG3_FLAG_NVRAM) {
2248 if (tp->nvram_lock_cnt == 0) {
2249 tw32(NVRAM_SWARB, SWARB_REQ_SET1);
2250 for (i = 0; i < 8000; i++) {
2251 if (tr32(NVRAM_SWARB) & SWARB_GNT1)
2256 tw32(NVRAM_SWARB, SWARB_REQ_CLR1);
2260 tp->nvram_lock_cnt++;
2265 /* tp->lock is held. */
2266 static void tg3_nvram_unlock(struct tg3 *tp)
2268 if (tp->tg3_flags & TG3_FLAG_NVRAM) {
2269 if (tp->nvram_lock_cnt > 0)
2270 tp->nvram_lock_cnt--;
2271 if (tp->nvram_lock_cnt == 0)
2272 tw32_f(NVRAM_SWARB, SWARB_REQ_CLR1);
2276 /* tp->lock is held. */
2277 static void tg3_enable_nvram_access(struct tg3 *tp)
2279 if ((tp->tg3_flags2 & TG3_FLG2_5750_PLUS) &&
2280 !(tp->tg3_flags3 & TG3_FLG3_PROTECTED_NVRAM)) {
2281 u32 nvaccess = tr32(NVRAM_ACCESS);
2283 tw32(NVRAM_ACCESS, nvaccess | ACCESS_ENABLE);
2287 /* tp->lock is held. */
2288 static void tg3_disable_nvram_access(struct tg3 *tp)
2290 if ((tp->tg3_flags2 & TG3_FLG2_5750_PLUS) &&
2291 !(tp->tg3_flags3 & TG3_FLG3_PROTECTED_NVRAM)) {
2292 u32 nvaccess = tr32(NVRAM_ACCESS);
2294 tw32(NVRAM_ACCESS, nvaccess & ~ACCESS_ENABLE);
2298 static int tg3_nvram_read_using_eeprom(struct tg3 *tp,
2299 u32 offset, u32 *val)
2304 if (offset > EEPROM_ADDR_ADDR_MASK || (offset % 4) != 0)
2307 tmp = tr32(GRC_EEPROM_ADDR) & ~(EEPROM_ADDR_ADDR_MASK |
2308 EEPROM_ADDR_DEVID_MASK |
2310 tw32(GRC_EEPROM_ADDR,
2312 (0 << EEPROM_ADDR_DEVID_SHIFT) |
2313 ((offset << EEPROM_ADDR_ADDR_SHIFT) &
2314 EEPROM_ADDR_ADDR_MASK) |
2315 EEPROM_ADDR_READ | EEPROM_ADDR_START);
2317 for (i = 0; i < 1000; i++) {
2318 tmp = tr32(GRC_EEPROM_ADDR);
2320 if (tmp & EEPROM_ADDR_COMPLETE)
2324 if (!(tmp & EEPROM_ADDR_COMPLETE))
2327 tmp = tr32(GRC_EEPROM_DATA);
2330 * The data will always be opposite the native endian
2331 * format. Perform a blind byteswap to compensate.
2338 #define NVRAM_CMD_TIMEOUT 10000
2340 static int tg3_nvram_exec_cmd(struct tg3 *tp, u32 nvram_cmd)
2344 tw32(NVRAM_CMD, nvram_cmd);
2345 for (i = 0; i < NVRAM_CMD_TIMEOUT; i++) {
2347 if (tr32(NVRAM_CMD) & NVRAM_CMD_DONE) {
2353 if (i == NVRAM_CMD_TIMEOUT)
2359 static u32 tg3_nvram_phys_addr(struct tg3 *tp, u32 addr)
2361 if ((tp->tg3_flags & TG3_FLAG_NVRAM) &&
2362 (tp->tg3_flags & TG3_FLAG_NVRAM_BUFFERED) &&
2363 (tp->tg3_flags2 & TG3_FLG2_FLASH) &&
2364 !(tp->tg3_flags3 & TG3_FLG3_NO_NVRAM_ADDR_TRANS) &&
2365 (tp->nvram_jedecnum == JEDEC_ATMEL))
2367 addr = ((addr / tp->nvram_pagesize) <<
2368 ATMEL_AT45DB0X1B_PAGE_POS) +
2369 (addr % tp->nvram_pagesize);
2374 static u32 tg3_nvram_logical_addr(struct tg3 *tp, u32 addr)
2376 if ((tp->tg3_flags & TG3_FLAG_NVRAM) &&
2377 (tp->tg3_flags & TG3_FLAG_NVRAM_BUFFERED) &&
2378 (tp->tg3_flags2 & TG3_FLG2_FLASH) &&
2379 !(tp->tg3_flags3 & TG3_FLG3_NO_NVRAM_ADDR_TRANS) &&
2380 (tp->nvram_jedecnum == JEDEC_ATMEL))
2382 addr = ((addr >> ATMEL_AT45DB0X1B_PAGE_POS) *
2383 tp->nvram_pagesize) +
2384 (addr & ((1 << ATMEL_AT45DB0X1B_PAGE_POS) - 1));
2389 /* NOTE: Data read in from NVRAM is byteswapped according to
2390 * the byteswapping settings for all other register accesses.
2391 * tg3 devices are BE devices, so on a BE machine, the data
2392 * returned will be exactly as it is seen in NVRAM. On a LE
2393 * machine, the 32-bit value will be byteswapped.
2395 static int tg3_nvram_read(struct tg3 *tp, u32 offset, u32 *val)
2399 if (!(tp->tg3_flags & TG3_FLAG_NVRAM))
2400 return tg3_nvram_read_using_eeprom(tp, offset, val);
2402 offset = tg3_nvram_phys_addr(tp, offset);
2404 if (offset > NVRAM_ADDR_MSK)
2407 ret = tg3_nvram_lock(tp);
2411 tg3_enable_nvram_access(tp);
2413 tw32(NVRAM_ADDR, offset);
2414 ret = tg3_nvram_exec_cmd(tp, NVRAM_CMD_RD | NVRAM_CMD_GO |
2415 NVRAM_CMD_FIRST | NVRAM_CMD_LAST | NVRAM_CMD_DONE);
2418 *val = tr32(NVRAM_RDDATA);
2420 tg3_disable_nvram_access(tp);
2422 tg3_nvram_unlock(tp);
2427 /* Ensures NVRAM data is in bytestream format. */
2428 static int tg3_nvram_read_be32(struct tg3 *tp, u32 offset, __be32 *val)
2431 int res = tg3_nvram_read(tp, offset, &v);
2433 *val = cpu_to_be32(v);
2437 /* tp->lock is held. */
2438 static void __tg3_set_mac_addr(struct tg3 *tp, int skip_mac_1)
2440 u32 addr_high, addr_low;
2443 addr_high = ((tp->dev->dev_addr[0] << 8) |
2444 tp->dev->dev_addr[1]);
2445 addr_low = ((tp->dev->dev_addr[2] << 24) |
2446 (tp->dev->dev_addr[3] << 16) |
2447 (tp->dev->dev_addr[4] << 8) |
2448 (tp->dev->dev_addr[5] << 0));
2449 for (i = 0; i < 4; i++) {
2450 if (i == 1 && skip_mac_1)
2452 tw32(MAC_ADDR_0_HIGH + (i * 8), addr_high);
2453 tw32(MAC_ADDR_0_LOW + (i * 8), addr_low);
2456 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
2457 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
2458 for (i = 0; i < 12; i++) {
2459 tw32(MAC_EXTADDR_0_HIGH + (i * 8), addr_high);
2460 tw32(MAC_EXTADDR_0_LOW + (i * 8), addr_low);
2464 addr_high = (tp->dev->dev_addr[0] +
2465 tp->dev->dev_addr[1] +
2466 tp->dev->dev_addr[2] +
2467 tp->dev->dev_addr[3] +
2468 tp->dev->dev_addr[4] +
2469 tp->dev->dev_addr[5]) &
2470 TX_BACKOFF_SEED_MASK;
2471 tw32(MAC_TX_BACKOFF_SEED, addr_high);
2474 static int tg3_set_power_state(struct tg3 *tp, pci_power_t state)
2477 bool device_should_wake, do_low_power;
2479 /* Make sure register accesses (indirect or otherwise)
2480 * will function correctly.
2482 pci_write_config_dword(tp->pdev,
2483 TG3PCI_MISC_HOST_CTRL,
2484 tp->misc_host_ctrl);
2488 pci_enable_wake(tp->pdev, state, false);
2489 pci_set_power_state(tp->pdev, PCI_D0);
2491 /* Switch out of Vaux if it is a NIC */
2492 if (tp->tg3_flags2 & TG3_FLG2_IS_NIC)
2493 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl, 100);
2503 printk(KERN_ERR PFX "%s: Invalid power state (D%d) requested\n",
2504 tp->dev->name, state);
2508 /* Restore the CLKREQ setting. */
2509 if (tp->tg3_flags3 & TG3_FLG3_CLKREQ_BUG) {
2512 pci_read_config_word(tp->pdev,
2513 tp->pcie_cap + PCI_EXP_LNKCTL,
2515 lnkctl |= PCI_EXP_LNKCTL_CLKREQ_EN;
2516 pci_write_config_word(tp->pdev,
2517 tp->pcie_cap + PCI_EXP_LNKCTL,
2521 misc_host_ctrl = tr32(TG3PCI_MISC_HOST_CTRL);
2522 tw32(TG3PCI_MISC_HOST_CTRL,
2523 misc_host_ctrl | MISC_HOST_CTRL_MASK_PCI_INT);
2525 device_should_wake = pci_pme_capable(tp->pdev, state) &&
2526 device_may_wakeup(&tp->pdev->dev) &&
2527 (tp->tg3_flags & TG3_FLAG_WOL_ENABLE);
2529 if (tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB) {
2530 do_low_power = false;
2531 if ((tp->tg3_flags3 & TG3_FLG3_PHY_CONNECTED) &&
2532 !tp->link_config.phy_is_low_power) {
2533 struct phy_device *phydev;
2534 u32 phyid, advertising;
2536 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
2538 tp->link_config.phy_is_low_power = 1;
2540 tp->link_config.orig_speed = phydev->speed;
2541 tp->link_config.orig_duplex = phydev->duplex;
2542 tp->link_config.orig_autoneg = phydev->autoneg;
2543 tp->link_config.orig_advertising = phydev->advertising;
2545 advertising = ADVERTISED_TP |
2547 ADVERTISED_Autoneg |
2548 ADVERTISED_10baseT_Half;
2550 if ((tp->tg3_flags & TG3_FLAG_ENABLE_ASF) ||
2551 device_should_wake) {
2552 if (tp->tg3_flags & TG3_FLAG_WOL_SPEED_100MB)
2554 ADVERTISED_100baseT_Half |
2555 ADVERTISED_100baseT_Full |
2556 ADVERTISED_10baseT_Full;
2558 advertising |= ADVERTISED_10baseT_Full;
2561 phydev->advertising = advertising;
2563 phy_start_aneg(phydev);
2565 phyid = phydev->drv->phy_id & phydev->drv->phy_id_mask;
2566 if (phyid != TG3_PHY_ID_BCMAC131) {
2567 phyid &= TG3_PHY_OUI_MASK;
2568 if (phyid == TG3_PHY_OUI_1 ||
2569 phyid == TG3_PHY_OUI_2 ||
2570 phyid == TG3_PHY_OUI_3)
2571 do_low_power = true;
2575 do_low_power = true;
2577 if (tp->link_config.phy_is_low_power == 0) {
2578 tp->link_config.phy_is_low_power = 1;
2579 tp->link_config.orig_speed = tp->link_config.speed;
2580 tp->link_config.orig_duplex = tp->link_config.duplex;
2581 tp->link_config.orig_autoneg = tp->link_config.autoneg;
2584 if (!(tp->tg3_flags2 & TG3_FLG2_ANY_SERDES)) {
2585 tp->link_config.speed = SPEED_10;
2586 tp->link_config.duplex = DUPLEX_HALF;
2587 tp->link_config.autoneg = AUTONEG_ENABLE;
2588 tg3_setup_phy(tp, 0);
2592 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
2595 val = tr32(GRC_VCPU_EXT_CTRL);
2596 tw32(GRC_VCPU_EXT_CTRL, val | GRC_VCPU_EXT_CTRL_DISABLE_WOL);
2597 } else if (!(tp->tg3_flags & TG3_FLAG_ENABLE_ASF)) {
2601 for (i = 0; i < 200; i++) {
2602 tg3_read_mem(tp, NIC_SRAM_FW_ASF_STATUS_MBOX, &val);
2603 if (val == ~NIC_SRAM_FIRMWARE_MBOX_MAGIC1)
2608 if (tp->tg3_flags & TG3_FLAG_WOL_CAP)
2609 tg3_write_mem(tp, NIC_SRAM_WOL_MBOX, WOL_SIGNATURE |
2610 WOL_DRV_STATE_SHUTDOWN |
2614 if (device_should_wake) {
2617 if (!(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)) {
2619 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x5a);
2623 if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES)
2624 mac_mode = MAC_MODE_PORT_MODE_GMII;
2626 mac_mode = MAC_MODE_PORT_MODE_MII;
2628 mac_mode |= tp->mac_mode & MAC_MODE_LINK_POLARITY;
2629 if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
2631 u32 speed = (tp->tg3_flags &
2632 TG3_FLAG_WOL_SPEED_100MB) ?
2633 SPEED_100 : SPEED_10;
2634 if (tg3_5700_link_polarity(tp, speed))
2635 mac_mode |= MAC_MODE_LINK_POLARITY;
2637 mac_mode &= ~MAC_MODE_LINK_POLARITY;
2640 mac_mode = MAC_MODE_PORT_MODE_TBI;
2643 if (!(tp->tg3_flags2 & TG3_FLG2_5750_PLUS))
2644 tw32(MAC_LED_CTRL, tp->led_ctrl);
2646 mac_mode |= MAC_MODE_MAGIC_PKT_ENABLE;
2647 if (((tp->tg3_flags2 & TG3_FLG2_5705_PLUS) &&
2648 !(tp->tg3_flags2 & TG3_FLG2_5780_CLASS)) &&
2649 ((tp->tg3_flags & TG3_FLAG_ENABLE_ASF) ||
2650 (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE)))
2651 mac_mode |= MAC_MODE_KEEP_FRAME_IN_WOL;
2653 if (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE) {
2654 mac_mode |= tp->mac_mode &
2655 (MAC_MODE_APE_TX_EN | MAC_MODE_APE_RX_EN);
2656 if (mac_mode & MAC_MODE_APE_TX_EN)
2657 mac_mode |= MAC_MODE_TDE_ENABLE;
2660 tw32_f(MAC_MODE, mac_mode);
2663 tw32_f(MAC_RX_MODE, RX_MODE_ENABLE);
2667 if (!(tp->tg3_flags & TG3_FLAG_WOL_SPEED_100MB) &&
2668 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
2669 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)) {
2672 base_val = tp->pci_clock_ctrl;
2673 base_val |= (CLOCK_CTRL_RXCLK_DISABLE |
2674 CLOCK_CTRL_TXCLK_DISABLE);
2676 tw32_wait_f(TG3PCI_CLOCK_CTRL, base_val | CLOCK_CTRL_ALTCLK |
2677 CLOCK_CTRL_PWRDOWN_PLL133, 40);
2678 } else if ((tp->tg3_flags2 & TG3_FLG2_5780_CLASS) ||
2679 (tp->tg3_flags & TG3_FLAG_CPMU_PRESENT) ||
2680 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)) {
2682 } else if (!((tp->tg3_flags2 & TG3_FLG2_5750_PLUS) &&
2683 (tp->tg3_flags & TG3_FLAG_ENABLE_ASF))) {
2684 u32 newbits1, newbits2;
2686 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
2687 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
2688 newbits1 = (CLOCK_CTRL_RXCLK_DISABLE |
2689 CLOCK_CTRL_TXCLK_DISABLE |
2691 newbits2 = newbits1 | CLOCK_CTRL_44MHZ_CORE;
2692 } else if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
2693 newbits1 = CLOCK_CTRL_625_CORE;
2694 newbits2 = newbits1 | CLOCK_CTRL_ALTCLK;
2696 newbits1 = CLOCK_CTRL_ALTCLK;
2697 newbits2 = newbits1 | CLOCK_CTRL_44MHZ_CORE;
2700 tw32_wait_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl | newbits1,
2703 tw32_wait_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl | newbits2,
2706 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
2709 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
2710 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
2711 newbits3 = (CLOCK_CTRL_RXCLK_DISABLE |
2712 CLOCK_CTRL_TXCLK_DISABLE |
2713 CLOCK_CTRL_44MHZ_CORE);
2715 newbits3 = CLOCK_CTRL_44MHZ_CORE;
2718 tw32_wait_f(TG3PCI_CLOCK_CTRL,
2719 tp->pci_clock_ctrl | newbits3, 40);
2723 if (!(device_should_wake) &&
2724 !(tp->tg3_flags & TG3_FLAG_ENABLE_ASF))
2725 tg3_power_down_phy(tp, do_low_power);
2727 tg3_frob_aux_power(tp);
2729 /* Workaround for unstable PLL clock */
2730 if ((GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_AX) ||
2731 (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_BX)) {
2732 u32 val = tr32(0x7d00);
2734 val &= ~((1 << 16) | (1 << 4) | (1 << 2) | (1 << 1) | 1);
2736 if (!(tp->tg3_flags & TG3_FLAG_ENABLE_ASF)) {
2739 err = tg3_nvram_lock(tp);
2740 tg3_halt_cpu(tp, RX_CPU_BASE);
2742 tg3_nvram_unlock(tp);
2746 tg3_write_sig_post_reset(tp, RESET_KIND_SHUTDOWN);
2748 if (device_should_wake)
2749 pci_enable_wake(tp->pdev, state, true);
2751 /* Finally, set the new power state. */
2752 pci_set_power_state(tp->pdev, state);
2757 static void tg3_aux_stat_to_speed_duplex(struct tg3 *tp, u32 val, u16 *speed, u8 *duplex)
2759 switch (val & MII_TG3_AUX_STAT_SPDMASK) {
2760 case MII_TG3_AUX_STAT_10HALF:
2762 *duplex = DUPLEX_HALF;
2765 case MII_TG3_AUX_STAT_10FULL:
2767 *duplex = DUPLEX_FULL;
2770 case MII_TG3_AUX_STAT_100HALF:
2772 *duplex = DUPLEX_HALF;
2775 case MII_TG3_AUX_STAT_100FULL:
2777 *duplex = DUPLEX_FULL;
2780 case MII_TG3_AUX_STAT_1000HALF:
2781 *speed = SPEED_1000;
2782 *duplex = DUPLEX_HALF;
2785 case MII_TG3_AUX_STAT_1000FULL:
2786 *speed = SPEED_1000;
2787 *duplex = DUPLEX_FULL;
2791 if (tp->tg3_flags3 & TG3_FLG3_PHY_IS_FET) {
2792 *speed = (val & MII_TG3_AUX_STAT_100) ? SPEED_100 :
2794 *duplex = (val & MII_TG3_AUX_STAT_FULL) ? DUPLEX_FULL :
2798 *speed = SPEED_INVALID;
2799 *duplex = DUPLEX_INVALID;
2804 static void tg3_phy_copper_begin(struct tg3 *tp)
2809 if (tp->link_config.phy_is_low_power) {
2810 /* Entering low power mode. Disable gigabit and
2811 * 100baseT advertisements.
2813 tg3_writephy(tp, MII_TG3_CTRL, 0);
2815 new_adv = (ADVERTISE_10HALF | ADVERTISE_10FULL |
2816 ADVERTISE_CSMA | ADVERTISE_PAUSE_CAP);
2817 if (tp->tg3_flags & TG3_FLAG_WOL_SPEED_100MB)
2818 new_adv |= (ADVERTISE_100HALF | ADVERTISE_100FULL);
2820 tg3_writephy(tp, MII_ADVERTISE, new_adv);
2821 } else if (tp->link_config.speed == SPEED_INVALID) {
2822 if (tp->tg3_flags & TG3_FLAG_10_100_ONLY)
2823 tp->link_config.advertising &=
2824 ~(ADVERTISED_1000baseT_Half |
2825 ADVERTISED_1000baseT_Full);
2827 new_adv = ADVERTISE_CSMA;
2828 if (tp->link_config.advertising & ADVERTISED_10baseT_Half)
2829 new_adv |= ADVERTISE_10HALF;
2830 if (tp->link_config.advertising & ADVERTISED_10baseT_Full)
2831 new_adv |= ADVERTISE_10FULL;
2832 if (tp->link_config.advertising & ADVERTISED_100baseT_Half)
2833 new_adv |= ADVERTISE_100HALF;
2834 if (tp->link_config.advertising & ADVERTISED_100baseT_Full)
2835 new_adv |= ADVERTISE_100FULL;
2837 new_adv |= tg3_advert_flowctrl_1000T(tp->link_config.flowctrl);
2839 tg3_writephy(tp, MII_ADVERTISE, new_adv);
2841 if (tp->link_config.advertising &
2842 (ADVERTISED_1000baseT_Half | ADVERTISED_1000baseT_Full)) {
2844 if (tp->link_config.advertising & ADVERTISED_1000baseT_Half)
2845 new_adv |= MII_TG3_CTRL_ADV_1000_HALF;
2846 if (tp->link_config.advertising & ADVERTISED_1000baseT_Full)
2847 new_adv |= MII_TG3_CTRL_ADV_1000_FULL;
2848 if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY) &&
2849 (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
2850 tp->pci_chip_rev_id == CHIPREV_ID_5701_B0))
2851 new_adv |= (MII_TG3_CTRL_AS_MASTER |
2852 MII_TG3_CTRL_ENABLE_AS_MASTER);
2853 tg3_writephy(tp, MII_TG3_CTRL, new_adv);
2855 tg3_writephy(tp, MII_TG3_CTRL, 0);
2858 new_adv = tg3_advert_flowctrl_1000T(tp->link_config.flowctrl);
2859 new_adv |= ADVERTISE_CSMA;
2861 /* Asking for a specific link mode. */
2862 if (tp->link_config.speed == SPEED_1000) {
2863 tg3_writephy(tp, MII_ADVERTISE, new_adv);
2865 if (tp->link_config.duplex == DUPLEX_FULL)
2866 new_adv = MII_TG3_CTRL_ADV_1000_FULL;
2868 new_adv = MII_TG3_CTRL_ADV_1000_HALF;
2869 if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
2870 tp->pci_chip_rev_id == CHIPREV_ID_5701_B0)
2871 new_adv |= (MII_TG3_CTRL_AS_MASTER |
2872 MII_TG3_CTRL_ENABLE_AS_MASTER);
2874 if (tp->link_config.speed == SPEED_100) {
2875 if (tp->link_config.duplex == DUPLEX_FULL)
2876 new_adv |= ADVERTISE_100FULL;
2878 new_adv |= ADVERTISE_100HALF;
2880 if (tp->link_config.duplex == DUPLEX_FULL)
2881 new_adv |= ADVERTISE_10FULL;
2883 new_adv |= ADVERTISE_10HALF;
2885 tg3_writephy(tp, MII_ADVERTISE, new_adv);
2890 tg3_writephy(tp, MII_TG3_CTRL, new_adv);
2893 if (tp->link_config.autoneg == AUTONEG_DISABLE &&
2894 tp->link_config.speed != SPEED_INVALID) {
2895 u32 bmcr, orig_bmcr;
2897 tp->link_config.active_speed = tp->link_config.speed;
2898 tp->link_config.active_duplex = tp->link_config.duplex;
2901 switch (tp->link_config.speed) {
2907 bmcr |= BMCR_SPEED100;
2911 bmcr |= TG3_BMCR_SPEED1000;
2915 if (tp->link_config.duplex == DUPLEX_FULL)
2916 bmcr |= BMCR_FULLDPLX;
2918 if (!tg3_readphy(tp, MII_BMCR, &orig_bmcr) &&
2919 (bmcr != orig_bmcr)) {
2920 tg3_writephy(tp, MII_BMCR, BMCR_LOOPBACK);
2921 for (i = 0; i < 1500; i++) {
2925 if (tg3_readphy(tp, MII_BMSR, &tmp) ||
2926 tg3_readphy(tp, MII_BMSR, &tmp))
2928 if (!(tmp & BMSR_LSTATUS)) {
2933 tg3_writephy(tp, MII_BMCR, bmcr);
2937 tg3_writephy(tp, MII_BMCR,
2938 BMCR_ANENABLE | BMCR_ANRESTART);
2942 static int tg3_init_5401phy_dsp(struct tg3 *tp)
2946 /* Turn off tap power management. */
2947 /* Set Extended packet length bit */
2948 err = tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x4c20);
2950 err |= tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x0012);
2951 err |= tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x1804);
2953 err |= tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x0013);
2954 err |= tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x1204);
2956 err |= tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8006);
2957 err |= tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0132);
2959 err |= tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8006);
2960 err |= tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0232);
2962 err |= tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x201f);
2963 err |= tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0a20);
2970 static int tg3_copper_is_advertising_all(struct tg3 *tp, u32 mask)
2972 u32 adv_reg, all_mask = 0;
2974 if (mask & ADVERTISED_10baseT_Half)
2975 all_mask |= ADVERTISE_10HALF;
2976 if (mask & ADVERTISED_10baseT_Full)
2977 all_mask |= ADVERTISE_10FULL;
2978 if (mask & ADVERTISED_100baseT_Half)
2979 all_mask |= ADVERTISE_100HALF;
2980 if (mask & ADVERTISED_100baseT_Full)
2981 all_mask |= ADVERTISE_100FULL;
2983 if (tg3_readphy(tp, MII_ADVERTISE, &adv_reg))
2986 if ((adv_reg & all_mask) != all_mask)
2988 if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY)) {
2992 if (mask & ADVERTISED_1000baseT_Half)
2993 all_mask |= ADVERTISE_1000HALF;
2994 if (mask & ADVERTISED_1000baseT_Full)
2995 all_mask |= ADVERTISE_1000FULL;
2997 if (tg3_readphy(tp, MII_TG3_CTRL, &tg3_ctrl))
3000 if ((tg3_ctrl & all_mask) != all_mask)
3006 static int tg3_adv_1000T_flowctrl_ok(struct tg3 *tp, u32 *lcladv, u32 *rmtadv)
3010 if (tg3_readphy(tp, MII_ADVERTISE, lcladv))
3013 curadv = *lcladv & (ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM);
3014 reqadv = tg3_advert_flowctrl_1000T(tp->link_config.flowctrl);
3016 if (tp->link_config.active_duplex == DUPLEX_FULL) {
3017 if (curadv != reqadv)
3020 if (tp->tg3_flags & TG3_FLAG_PAUSE_AUTONEG)
3021 tg3_readphy(tp, MII_LPA, rmtadv);
3023 /* Reprogram the advertisement register, even if it
3024 * does not affect the current link. If the link
3025 * gets renegotiated in the future, we can save an
3026 * additional renegotiation cycle by advertising
3027 * it correctly in the first place.
3029 if (curadv != reqadv) {
3030 *lcladv &= ~(ADVERTISE_PAUSE_CAP |
3031 ADVERTISE_PAUSE_ASYM);
3032 tg3_writephy(tp, MII_ADVERTISE, *lcladv | reqadv);
3039 static int tg3_setup_copper_phy(struct tg3 *tp, int force_reset)
3041 int current_link_up;
3043 u32 lcl_adv, rmt_adv;
3051 (MAC_STATUS_SYNC_CHANGED |
3052 MAC_STATUS_CFG_CHANGED |
3053 MAC_STATUS_MI_COMPLETION |
3054 MAC_STATUS_LNKSTATE_CHANGED));
3057 if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
3059 (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
3063 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x02);
3065 /* Some third-party PHYs need to be reset on link going
3068 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
3069 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
3070 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) &&
3071 netif_carrier_ok(tp->dev)) {
3072 tg3_readphy(tp, MII_BMSR, &bmsr);
3073 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
3074 !(bmsr & BMSR_LSTATUS))
3080 if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5401) {
3081 tg3_readphy(tp, MII_BMSR, &bmsr);
3082 if (tg3_readphy(tp, MII_BMSR, &bmsr) ||
3083 !(tp->tg3_flags & TG3_FLAG_INIT_COMPLETE))
3086 if (!(bmsr & BMSR_LSTATUS)) {
3087 err = tg3_init_5401phy_dsp(tp);
3091 tg3_readphy(tp, MII_BMSR, &bmsr);
3092 for (i = 0; i < 1000; i++) {
3094 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
3095 (bmsr & BMSR_LSTATUS)) {
3101 if ((tp->phy_id & PHY_ID_REV_MASK) == PHY_REV_BCM5401_B0 &&
3102 !(bmsr & BMSR_LSTATUS) &&
3103 tp->link_config.active_speed == SPEED_1000) {
3104 err = tg3_phy_reset(tp);
3106 err = tg3_init_5401phy_dsp(tp);
3111 } else if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
3112 tp->pci_chip_rev_id == CHIPREV_ID_5701_B0) {
3113 /* 5701 {A0,B0} CRC bug workaround */
3114 tg3_writephy(tp, 0x15, 0x0a75);
3115 tg3_writephy(tp, 0x1c, 0x8c68);
3116 tg3_writephy(tp, 0x1c, 0x8d68);
3117 tg3_writephy(tp, 0x1c, 0x8c68);
3120 /* Clear pending interrupts... */
3121 tg3_readphy(tp, MII_TG3_ISTAT, &dummy);
3122 tg3_readphy(tp, MII_TG3_ISTAT, &dummy);
3124 if (tp->tg3_flags & TG3_FLAG_USE_MI_INTERRUPT)
3125 tg3_writephy(tp, MII_TG3_IMASK, ~MII_TG3_INT_LINKCHG);
3126 else if (!(tp->tg3_flags3 & TG3_FLG3_PHY_IS_FET))
3127 tg3_writephy(tp, MII_TG3_IMASK, ~0);
3129 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
3130 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
3131 if (tp->led_ctrl == LED_CTRL_MODE_PHY_1)
3132 tg3_writephy(tp, MII_TG3_EXT_CTRL,
3133 MII_TG3_EXT_CTRL_LNK3_LED_MODE);
3135 tg3_writephy(tp, MII_TG3_EXT_CTRL, 0);
3138 current_link_up = 0;
3139 current_speed = SPEED_INVALID;
3140 current_duplex = DUPLEX_INVALID;
3142 if (tp->tg3_flags2 & TG3_FLG2_CAPACITIVE_COUPLING) {
3145 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x4007);
3146 tg3_readphy(tp, MII_TG3_AUX_CTRL, &val);
3147 if (!(val & (1 << 10))) {
3149 tg3_writephy(tp, MII_TG3_AUX_CTRL, val);
3155 for (i = 0; i < 100; i++) {
3156 tg3_readphy(tp, MII_BMSR, &bmsr);
3157 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
3158 (bmsr & BMSR_LSTATUS))
3163 if (bmsr & BMSR_LSTATUS) {
3166 tg3_readphy(tp, MII_TG3_AUX_STAT, &aux_stat);
3167 for (i = 0; i < 2000; i++) {
3169 if (!tg3_readphy(tp, MII_TG3_AUX_STAT, &aux_stat) &&
3174 tg3_aux_stat_to_speed_duplex(tp, aux_stat,
3179 for (i = 0; i < 200; i++) {
3180 tg3_readphy(tp, MII_BMCR, &bmcr);
3181 if (tg3_readphy(tp, MII_BMCR, &bmcr))
3183 if (bmcr && bmcr != 0x7fff)
3191 tp->link_config.active_speed = current_speed;
3192 tp->link_config.active_duplex = current_duplex;
3194 if (tp->link_config.autoneg == AUTONEG_ENABLE) {
3195 if ((bmcr & BMCR_ANENABLE) &&
3196 tg3_copper_is_advertising_all(tp,
3197 tp->link_config.advertising)) {
3198 if (tg3_adv_1000T_flowctrl_ok(tp, &lcl_adv,
3200 current_link_up = 1;
3203 if (!(bmcr & BMCR_ANENABLE) &&
3204 tp->link_config.speed == current_speed &&
3205 tp->link_config.duplex == current_duplex &&
3206 tp->link_config.flowctrl ==
3207 tp->link_config.active_flowctrl) {
3208 current_link_up = 1;
3212 if (current_link_up == 1 &&
3213 tp->link_config.active_duplex == DUPLEX_FULL)
3214 tg3_setup_flow_control(tp, lcl_adv, rmt_adv);
3218 if (current_link_up == 0 || tp->link_config.phy_is_low_power) {
3221 tg3_phy_copper_begin(tp);
3223 tg3_readphy(tp, MII_BMSR, &tmp);
3224 if (!tg3_readphy(tp, MII_BMSR, &tmp) &&
3225 (tmp & BMSR_LSTATUS))
3226 current_link_up = 1;
3229 tp->mac_mode &= ~MAC_MODE_PORT_MODE_MASK;
3230 if (current_link_up == 1) {
3231 if (tp->link_config.active_speed == SPEED_100 ||
3232 tp->link_config.active_speed == SPEED_10)
3233 tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
3235 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
3236 } else if (tp->tg3_flags3 & TG3_FLG3_PHY_IS_FET)
3237 tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
3239 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
3241 tp->mac_mode &= ~MAC_MODE_HALF_DUPLEX;
3242 if (tp->link_config.active_duplex == DUPLEX_HALF)
3243 tp->mac_mode |= MAC_MODE_HALF_DUPLEX;
3245 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700) {
3246 if (current_link_up == 1 &&
3247 tg3_5700_link_polarity(tp, tp->link_config.active_speed))
3248 tp->mac_mode |= MAC_MODE_LINK_POLARITY;
3250 tp->mac_mode &= ~MAC_MODE_LINK_POLARITY;
3253 /* ??? Without this setting Netgear GA302T PHY does not
3254 * ??? send/receive packets...
3256 if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5411 &&
3257 tp->pci_chip_rev_id == CHIPREV_ID_5700_ALTIMA) {
3258 tp->mi_mode |= MAC_MI_MODE_AUTO_POLL;
3259 tw32_f(MAC_MI_MODE, tp->mi_mode);
3263 tw32_f(MAC_MODE, tp->mac_mode);
3266 if (tp->tg3_flags & TG3_FLAG_USE_LINKCHG_REG) {
3267 /* Polled via timer. */
3268 tw32_f(MAC_EVENT, 0);
3270 tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
3274 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 &&
3275 current_link_up == 1 &&
3276 tp->link_config.active_speed == SPEED_1000 &&
3277 ((tp->tg3_flags & TG3_FLAG_PCIX_MODE) ||
3278 (tp->tg3_flags & TG3_FLAG_PCI_HIGH_SPEED))) {
3281 (MAC_STATUS_SYNC_CHANGED |
3282 MAC_STATUS_CFG_CHANGED));
3285 NIC_SRAM_FIRMWARE_MBOX,
3286 NIC_SRAM_FIRMWARE_MBOX_MAGIC2);
3289 /* Prevent send BD corruption. */
3290 if (tp->tg3_flags3 & TG3_FLG3_CLKREQ_BUG) {
3291 u16 oldlnkctl, newlnkctl;
3293 pci_read_config_word(tp->pdev,
3294 tp->pcie_cap + PCI_EXP_LNKCTL,
3296 if (tp->link_config.active_speed == SPEED_100 ||
3297 tp->link_config.active_speed == SPEED_10)
3298 newlnkctl = oldlnkctl & ~PCI_EXP_LNKCTL_CLKREQ_EN;
3300 newlnkctl = oldlnkctl | PCI_EXP_LNKCTL_CLKREQ_EN;
3301 if (newlnkctl != oldlnkctl)
3302 pci_write_config_word(tp->pdev,
3303 tp->pcie_cap + PCI_EXP_LNKCTL,
3307 if (current_link_up != netif_carrier_ok(tp->dev)) {
3308 if (current_link_up)
3309 netif_carrier_on(tp->dev);
3311 netif_carrier_off(tp->dev);
3312 tg3_link_report(tp);
3318 struct tg3_fiber_aneginfo {
3320 #define ANEG_STATE_UNKNOWN 0
3321 #define ANEG_STATE_AN_ENABLE 1
3322 #define ANEG_STATE_RESTART_INIT 2
3323 #define ANEG_STATE_RESTART 3
3324 #define ANEG_STATE_DISABLE_LINK_OK 4
3325 #define ANEG_STATE_ABILITY_DETECT_INIT 5
3326 #define ANEG_STATE_ABILITY_DETECT 6
3327 #define ANEG_STATE_ACK_DETECT_INIT 7
3328 #define ANEG_STATE_ACK_DETECT 8
3329 #define ANEG_STATE_COMPLETE_ACK_INIT 9
3330 #define ANEG_STATE_COMPLETE_ACK 10
3331 #define ANEG_STATE_IDLE_DETECT_INIT 11
3332 #define ANEG_STATE_IDLE_DETECT 12
3333 #define ANEG_STATE_LINK_OK 13
3334 #define ANEG_STATE_NEXT_PAGE_WAIT_INIT 14
3335 #define ANEG_STATE_NEXT_PAGE_WAIT 15
3338 #define MR_AN_ENABLE 0x00000001
3339 #define MR_RESTART_AN 0x00000002
3340 #define MR_AN_COMPLETE 0x00000004
3341 #define MR_PAGE_RX 0x00000008
3342 #define MR_NP_LOADED 0x00000010
3343 #define MR_TOGGLE_TX 0x00000020
3344 #define MR_LP_ADV_FULL_DUPLEX 0x00000040
3345 #define MR_LP_ADV_HALF_DUPLEX 0x00000080
3346 #define MR_LP_ADV_SYM_PAUSE 0x00000100
3347 #define MR_LP_ADV_ASYM_PAUSE 0x00000200
3348 #define MR_LP_ADV_REMOTE_FAULT1 0x00000400
3349 #define MR_LP_ADV_REMOTE_FAULT2 0x00000800
3350 #define MR_LP_ADV_NEXT_PAGE 0x00001000
3351 #define MR_TOGGLE_RX 0x00002000
3352 #define MR_NP_RX 0x00004000
3354 #define MR_LINK_OK 0x80000000
3356 unsigned long link_time, cur_time;
3358 u32 ability_match_cfg;
3359 int ability_match_count;
3361 char ability_match, idle_match, ack_match;
3363 u32 txconfig, rxconfig;
3364 #define ANEG_CFG_NP 0x00000080
3365 #define ANEG_CFG_ACK 0x00000040
3366 #define ANEG_CFG_RF2 0x00000020
3367 #define ANEG_CFG_RF1 0x00000010
3368 #define ANEG_CFG_PS2 0x00000001
3369 #define ANEG_CFG_PS1 0x00008000
3370 #define ANEG_CFG_HD 0x00004000
3371 #define ANEG_CFG_FD 0x00002000
3372 #define ANEG_CFG_INVAL 0x00001f06
3377 #define ANEG_TIMER_ENAB 2
3378 #define ANEG_FAILED -1
3380 #define ANEG_STATE_SETTLE_TIME 10000
3382 static int tg3_fiber_aneg_smachine(struct tg3 *tp,
3383 struct tg3_fiber_aneginfo *ap)
3386 unsigned long delta;
3390 if (ap->state == ANEG_STATE_UNKNOWN) {
3394 ap->ability_match_cfg = 0;
3395 ap->ability_match_count = 0;
3396 ap->ability_match = 0;
3402 if (tr32(MAC_STATUS) & MAC_STATUS_RCVD_CFG) {
3403 rx_cfg_reg = tr32(MAC_RX_AUTO_NEG);
3405 if (rx_cfg_reg != ap->ability_match_cfg) {
3406 ap->ability_match_cfg = rx_cfg_reg;
3407 ap->ability_match = 0;
3408 ap->ability_match_count = 0;
3410 if (++ap->ability_match_count > 1) {
3411 ap->ability_match = 1;
3412 ap->ability_match_cfg = rx_cfg_reg;
3415 if (rx_cfg_reg & ANEG_CFG_ACK)
3423 ap->ability_match_cfg = 0;
3424 ap->ability_match_count = 0;
3425 ap->ability_match = 0;
3431 ap->rxconfig = rx_cfg_reg;
3435 case ANEG_STATE_UNKNOWN:
3436 if (ap->flags & (MR_AN_ENABLE | MR_RESTART_AN))
3437 ap->state = ANEG_STATE_AN_ENABLE;
3440 case ANEG_STATE_AN_ENABLE:
3441 ap->flags &= ~(MR_AN_COMPLETE | MR_PAGE_RX);
3442 if (ap->flags & MR_AN_ENABLE) {
3445 ap->ability_match_cfg = 0;
3446 ap->ability_match_count = 0;
3447 ap->ability_match = 0;
3451 ap->state = ANEG_STATE_RESTART_INIT;
3453 ap->state = ANEG_STATE_DISABLE_LINK_OK;
3457 case ANEG_STATE_RESTART_INIT:
3458 ap->link_time = ap->cur_time;
3459 ap->flags &= ~(MR_NP_LOADED);
3461 tw32(MAC_TX_AUTO_NEG, 0);
3462 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
3463 tw32_f(MAC_MODE, tp->mac_mode);
3466 ret = ANEG_TIMER_ENAB;
3467 ap->state = ANEG_STATE_RESTART;
3470 case ANEG_STATE_RESTART:
3471 delta = ap->cur_time - ap->link_time;
3472 if (delta > ANEG_STATE_SETTLE_TIME) {
3473 ap->state = ANEG_STATE_ABILITY_DETECT_INIT;
3475 ret = ANEG_TIMER_ENAB;
3479 case ANEG_STATE_DISABLE_LINK_OK:
3483 case ANEG_STATE_ABILITY_DETECT_INIT:
3484 ap->flags &= ~(MR_TOGGLE_TX);
3485 ap->txconfig = ANEG_CFG_FD;
3486 flowctrl = tg3_advert_flowctrl_1000X(tp->link_config.flowctrl);
3487 if (flowctrl & ADVERTISE_1000XPAUSE)
3488 ap->txconfig |= ANEG_CFG_PS1;
3489 if (flowctrl & ADVERTISE_1000XPSE_ASYM)
3490 ap->txconfig |= ANEG_CFG_PS2;
3491 tw32(MAC_TX_AUTO_NEG, ap->txconfig);
3492 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
3493 tw32_f(MAC_MODE, tp->mac_mode);
3496 ap->state = ANEG_STATE_ABILITY_DETECT;
3499 case ANEG_STATE_ABILITY_DETECT:
3500 if (ap->ability_match != 0 && ap->rxconfig != 0) {
3501 ap->state = ANEG_STATE_ACK_DETECT_INIT;
3505 case ANEG_STATE_ACK_DETECT_INIT:
3506 ap->txconfig |= ANEG_CFG_ACK;
3507 tw32(MAC_TX_AUTO_NEG, ap->txconfig);
3508 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
3509 tw32_f(MAC_MODE, tp->mac_mode);
3512 ap->state = ANEG_STATE_ACK_DETECT;
3515 case ANEG_STATE_ACK_DETECT:
3516 if (ap->ack_match != 0) {
3517 if ((ap->rxconfig & ~ANEG_CFG_ACK) ==
3518 (ap->ability_match_cfg & ~ANEG_CFG_ACK)) {
3519 ap->state = ANEG_STATE_COMPLETE_ACK_INIT;
3521 ap->state = ANEG_STATE_AN_ENABLE;
3523 } else if (ap->ability_match != 0 &&
3524 ap->rxconfig == 0) {
3525 ap->state = ANEG_STATE_AN_ENABLE;
3529 case ANEG_STATE_COMPLETE_ACK_INIT:
3530 if (ap->rxconfig & ANEG_CFG_INVAL) {
3534 ap->flags &= ~(MR_LP_ADV_FULL_DUPLEX |
3535 MR_LP_ADV_HALF_DUPLEX |
3536 MR_LP_ADV_SYM_PAUSE |
3537 MR_LP_ADV_ASYM_PAUSE |
3538 MR_LP_ADV_REMOTE_FAULT1 |
3539 MR_LP_ADV_REMOTE_FAULT2 |
3540 MR_LP_ADV_NEXT_PAGE |
3543 if (ap->rxconfig & ANEG_CFG_FD)
3544 ap->flags |= MR_LP_ADV_FULL_DUPLEX;
3545 if (ap->rxconfig & ANEG_CFG_HD)
3546 ap->flags |= MR_LP_ADV_HALF_DUPLEX;
3547 if (ap->rxconfig & ANEG_CFG_PS1)
3548 ap->flags |= MR_LP_ADV_SYM_PAUSE;
3549 if (ap->rxconfig & ANEG_CFG_PS2)
3550 ap->flags |= MR_LP_ADV_ASYM_PAUSE;
3551 if (ap->rxconfig & ANEG_CFG_RF1)
3552 ap->flags |= MR_LP_ADV_REMOTE_FAULT1;
3553 if (ap->rxconfig & ANEG_CFG_RF2)
3554 ap->flags |= MR_LP_ADV_REMOTE_FAULT2;
3555 if (ap->rxconfig & ANEG_CFG_NP)
3556 ap->flags |= MR_LP_ADV_NEXT_PAGE;
3558 ap->link_time = ap->cur_time;
3560 ap->flags ^= (MR_TOGGLE_TX);
3561 if (ap->rxconfig & 0x0008)
3562 ap->flags |= MR_TOGGLE_RX;
3563 if (ap->rxconfig & ANEG_CFG_NP)
3564 ap->flags |= MR_NP_RX;
3565 ap->flags |= MR_PAGE_RX;
3567 ap->state = ANEG_STATE_COMPLETE_ACK;
3568 ret = ANEG_TIMER_ENAB;
3571 case ANEG_STATE_COMPLETE_ACK:
3572 if (ap->ability_match != 0 &&
3573 ap->rxconfig == 0) {
3574 ap->state = ANEG_STATE_AN_ENABLE;
3577 delta = ap->cur_time - ap->link_time;
3578 if (delta > ANEG_STATE_SETTLE_TIME) {
3579 if (!(ap->flags & (MR_LP_ADV_NEXT_PAGE))) {
3580 ap->state = ANEG_STATE_IDLE_DETECT_INIT;
3582 if ((ap->txconfig & ANEG_CFG_NP) == 0 &&
3583 !(ap->flags & MR_NP_RX)) {
3584 ap->state = ANEG_STATE_IDLE_DETECT_INIT;
3592 case ANEG_STATE_IDLE_DETECT_INIT:
3593 ap->link_time = ap->cur_time;
3594 tp->mac_mode &= ~MAC_MODE_SEND_CONFIGS;
3595 tw32_f(MAC_MODE, tp->mac_mode);
3598 ap->state = ANEG_STATE_IDLE_DETECT;
3599 ret = ANEG_TIMER_ENAB;
3602 case ANEG_STATE_IDLE_DETECT:
3603 if (ap->ability_match != 0 &&
3604 ap->rxconfig == 0) {
3605 ap->state = ANEG_STATE_AN_ENABLE;
3608 delta = ap->cur_time - ap->link_time;
3609 if (delta > ANEG_STATE_SETTLE_TIME) {
3610 /* XXX another gem from the Broadcom driver :( */
3611 ap->state = ANEG_STATE_LINK_OK;
3615 case ANEG_STATE_LINK_OK:
3616 ap->flags |= (MR_AN_COMPLETE | MR_LINK_OK);
3620 case ANEG_STATE_NEXT_PAGE_WAIT_INIT:
3621 /* ??? unimplemented */
3624 case ANEG_STATE_NEXT_PAGE_WAIT:
3625 /* ??? unimplemented */
3636 static int fiber_autoneg(struct tg3 *tp, u32 *txflags, u32 *rxflags)
3639 struct tg3_fiber_aneginfo aninfo;
3640 int status = ANEG_FAILED;
3644 tw32_f(MAC_TX_AUTO_NEG, 0);
3646 tmp = tp->mac_mode & ~MAC_MODE_PORT_MODE_MASK;
3647 tw32_f(MAC_MODE, tmp | MAC_MODE_PORT_MODE_GMII);
3650 tw32_f(MAC_MODE, tp->mac_mode | MAC_MODE_SEND_CONFIGS);
3653 memset(&aninfo, 0, sizeof(aninfo));
3654 aninfo.flags |= MR_AN_ENABLE;
3655 aninfo.state = ANEG_STATE_UNKNOWN;
3656 aninfo.cur_time = 0;
3658 while (++tick < 195000) {
3659 status = tg3_fiber_aneg_smachine(tp, &aninfo);
3660 if (status == ANEG_DONE || status == ANEG_FAILED)
3666 tp->mac_mode &= ~MAC_MODE_SEND_CONFIGS;
3667 tw32_f(MAC_MODE, tp->mac_mode);
3670 *txflags = aninfo.txconfig;
3671 *rxflags = aninfo.flags;
3673 if (status == ANEG_DONE &&
3674 (aninfo.flags & (MR_AN_COMPLETE | MR_LINK_OK |
3675 MR_LP_ADV_FULL_DUPLEX)))
3681 static void tg3_init_bcm8002(struct tg3 *tp)
3683 u32 mac_status = tr32(MAC_STATUS);
3686 /* Reset when initting first time or we have a link. */
3687 if ((tp->tg3_flags & TG3_FLAG_INIT_COMPLETE) &&
3688 !(mac_status & MAC_STATUS_PCS_SYNCED))
3691 /* Set PLL lock range. */
3692 tg3_writephy(tp, 0x16, 0x8007);
3695 tg3_writephy(tp, MII_BMCR, BMCR_RESET);
3697 /* Wait for reset to complete. */
3698 /* XXX schedule_timeout() ... */
3699 for (i = 0; i < 500; i++)
3702 /* Config mode; select PMA/Ch 1 regs. */
3703 tg3_writephy(tp, 0x10, 0x8411);
3705 /* Enable auto-lock and comdet, select txclk for tx. */
3706 tg3_writephy(tp, 0x11, 0x0a10);
3708 tg3_writephy(tp, 0x18, 0x00a0);
3709 tg3_writephy(tp, 0x16, 0x41ff);
3711 /* Assert and deassert POR. */
3712 tg3_writephy(tp, 0x13, 0x0400);
3714 tg3_writephy(tp, 0x13, 0x0000);
3716 tg3_writephy(tp, 0x11, 0x0a50);
3718 tg3_writephy(tp, 0x11, 0x0a10);
3720 /* Wait for signal to stabilize */
3721 /* XXX schedule_timeout() ... */
3722 for (i = 0; i < 15000; i++)
3725 /* Deselect the channel register so we can read the PHYID
3728 tg3_writephy(tp, 0x10, 0x8011);
3731 static int tg3_setup_fiber_hw_autoneg(struct tg3 *tp, u32 mac_status)
3734 u32 sg_dig_ctrl, sg_dig_status;
3735 u32 serdes_cfg, expected_sg_dig_ctrl;
3736 int workaround, port_a;
3737 int current_link_up;
3740 expected_sg_dig_ctrl = 0;
3743 current_link_up = 0;
3745 if (tp->pci_chip_rev_id != CHIPREV_ID_5704_A0 &&
3746 tp->pci_chip_rev_id != CHIPREV_ID_5704_A1) {
3748 if (tr32(TG3PCI_DUAL_MAC_CTRL) & DUAL_MAC_CTRL_ID)
3751 /* preserve bits 0-11,13,14 for signal pre-emphasis */
3752 /* preserve bits 20-23 for voltage regulator */
3753 serdes_cfg = tr32(MAC_SERDES_CFG) & 0x00f06fff;
3756 sg_dig_ctrl = tr32(SG_DIG_CTRL);
3758 if (tp->link_config.autoneg != AUTONEG_ENABLE) {
3759 if (sg_dig_ctrl & SG_DIG_USING_HW_AUTONEG) {
3761 u32 val = serdes_cfg;
3767 tw32_f(MAC_SERDES_CFG, val);
3770 tw32_f(SG_DIG_CTRL, SG_DIG_COMMON_SETUP);
3772 if (mac_status & MAC_STATUS_PCS_SYNCED) {
3773 tg3_setup_flow_control(tp, 0, 0);
3774 current_link_up = 1;
3779 /* Want auto-negotiation. */
3780 expected_sg_dig_ctrl = SG_DIG_USING_HW_AUTONEG | SG_DIG_COMMON_SETUP;
3782 flowctrl = tg3_advert_flowctrl_1000X(tp->link_config.flowctrl);
3783 if (flowctrl & ADVERTISE_1000XPAUSE)
3784 expected_sg_dig_ctrl |= SG_DIG_PAUSE_CAP;
3785 if (flowctrl & ADVERTISE_1000XPSE_ASYM)
3786 expected_sg_dig_ctrl |= SG_DIG_ASYM_PAUSE;
3788 if (sg_dig_ctrl != expected_sg_dig_ctrl) {
3789 if ((tp->tg3_flags2 & TG3_FLG2_PARALLEL_DETECT) &&
3790 tp->serdes_counter &&
3791 ((mac_status & (MAC_STATUS_PCS_SYNCED |
3792 MAC_STATUS_RCVD_CFG)) ==
3793 MAC_STATUS_PCS_SYNCED)) {
3794 tp->serdes_counter--;
3795 current_link_up = 1;
3800 tw32_f(MAC_SERDES_CFG, serdes_cfg | 0xc011000);
3801 tw32_f(SG_DIG_CTRL, expected_sg_dig_ctrl | SG_DIG_SOFT_RESET);
3803 tw32_f(SG_DIG_CTRL, expected_sg_dig_ctrl);
3805 tp->serdes_counter = SERDES_AN_TIMEOUT_5704S;
3806 tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
3807 } else if (mac_status & (MAC_STATUS_PCS_SYNCED |
3808 MAC_STATUS_SIGNAL_DET)) {
3809 sg_dig_status = tr32(SG_DIG_STATUS);
3810 mac_status = tr32(MAC_STATUS);
3812 if ((sg_dig_status & SG_DIG_AUTONEG_COMPLETE) &&
3813 (mac_status & MAC_STATUS_PCS_SYNCED)) {
3814 u32 local_adv = 0, remote_adv = 0;
3816 if (sg_dig_ctrl & SG_DIG_PAUSE_CAP)
3817 local_adv |= ADVERTISE_1000XPAUSE;
3818 if (sg_dig_ctrl & SG_DIG_ASYM_PAUSE)
3819 local_adv |= ADVERTISE_1000XPSE_ASYM;
3821 if (sg_dig_status & SG_DIG_PARTNER_PAUSE_CAPABLE)
3822 remote_adv |= LPA_1000XPAUSE;
3823 if (sg_dig_status & SG_DIG_PARTNER_ASYM_PAUSE)
3824 remote_adv |= LPA_1000XPAUSE_ASYM;
3826 tg3_setup_flow_control(tp, local_adv, remote_adv);
3827 current_link_up = 1;
3828 tp->serdes_counter = 0;
3829 tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
3830 } else if (!(sg_dig_status & SG_DIG_AUTONEG_COMPLETE)) {
3831 if (tp->serdes_counter)
3832 tp->serdes_counter--;
3835 u32 val = serdes_cfg;
3842 tw32_f(MAC_SERDES_CFG, val);
3845 tw32_f(SG_DIG_CTRL, SG_DIG_COMMON_SETUP);
3848 /* Link parallel detection - link is up */
3849 /* only if we have PCS_SYNC and not */
3850 /* receiving config code words */
3851 mac_status = tr32(MAC_STATUS);
3852 if ((mac_status & MAC_STATUS_PCS_SYNCED) &&
3853 !(mac_status & MAC_STATUS_RCVD_CFG)) {
3854 tg3_setup_flow_control(tp, 0, 0);
3855 current_link_up = 1;
3857 TG3_FLG2_PARALLEL_DETECT;
3858 tp->serdes_counter =
3859 SERDES_PARALLEL_DET_TIMEOUT;
3861 goto restart_autoneg;
3865 tp->serdes_counter = SERDES_AN_TIMEOUT_5704S;
3866 tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
3870 return current_link_up;
3873 static int tg3_setup_fiber_by_hand(struct tg3 *tp, u32 mac_status)
3875 int current_link_up = 0;
3877 if (!(mac_status & MAC_STATUS_PCS_SYNCED))
3880 if (tp->link_config.autoneg == AUTONEG_ENABLE) {
3881 u32 txflags, rxflags;
3884 if (fiber_autoneg(tp, &txflags, &rxflags)) {
3885 u32 local_adv = 0, remote_adv = 0;
3887 if (txflags & ANEG_CFG_PS1)
3888 local_adv |= ADVERTISE_1000XPAUSE;
3889 if (txflags & ANEG_CFG_PS2)
3890 local_adv |= ADVERTISE_1000XPSE_ASYM;
3892 if (rxflags & MR_LP_ADV_SYM_PAUSE)
3893 remote_adv |= LPA_1000XPAUSE;
3894 if (rxflags & MR_LP_ADV_ASYM_PAUSE)
3895 remote_adv |= LPA_1000XPAUSE_ASYM;
3897 tg3_setup_flow_control(tp, local_adv, remote_adv);
3899 current_link_up = 1;
3901 for (i = 0; i < 30; i++) {
3904 (MAC_STATUS_SYNC_CHANGED |
3905 MAC_STATUS_CFG_CHANGED));
3907 if ((tr32(MAC_STATUS) &
3908 (MAC_STATUS_SYNC_CHANGED |
3909 MAC_STATUS_CFG_CHANGED)) == 0)
3913 mac_status = tr32(MAC_STATUS);
3914 if (current_link_up == 0 &&
3915 (mac_status & MAC_STATUS_PCS_SYNCED) &&
3916 !(mac_status & MAC_STATUS_RCVD_CFG))
3917 current_link_up = 1;
3919 tg3_setup_flow_control(tp, 0, 0);
3921 /* Forcing 1000FD link up. */
3922 current_link_up = 1;
3924 tw32_f(MAC_MODE, (tp->mac_mode | MAC_MODE_SEND_CONFIGS));
3927 tw32_f(MAC_MODE, tp->mac_mode);
3932 return current_link_up;
3935 static int tg3_setup_fiber_phy(struct tg3 *tp, int force_reset)
3938 u16 orig_active_speed;
3939 u8 orig_active_duplex;
3941 int current_link_up;
3944 orig_pause_cfg = tp->link_config.active_flowctrl;
3945 orig_active_speed = tp->link_config.active_speed;
3946 orig_active_duplex = tp->link_config.active_duplex;
3948 if (!(tp->tg3_flags2 & TG3_FLG2_HW_AUTONEG) &&
3949 netif_carrier_ok(tp->dev) &&
3950 (tp->tg3_flags & TG3_FLAG_INIT_COMPLETE)) {
3951 mac_status = tr32(MAC_STATUS);
3952 mac_status &= (MAC_STATUS_PCS_SYNCED |
3953 MAC_STATUS_SIGNAL_DET |
3954 MAC_STATUS_CFG_CHANGED |
3955 MAC_STATUS_RCVD_CFG);
3956 if (mac_status == (MAC_STATUS_PCS_SYNCED |
3957 MAC_STATUS_SIGNAL_DET)) {
3958 tw32_f(MAC_STATUS, (MAC_STATUS_SYNC_CHANGED |
3959 MAC_STATUS_CFG_CHANGED));
3964 tw32_f(MAC_TX_AUTO_NEG, 0);
3966 tp->mac_mode &= ~(MAC_MODE_PORT_MODE_MASK | MAC_MODE_HALF_DUPLEX);
3967 tp->mac_mode |= MAC_MODE_PORT_MODE_TBI;
3968 tw32_f(MAC_MODE, tp->mac_mode);
3971 if (tp->phy_id == PHY_ID_BCM8002)
3972 tg3_init_bcm8002(tp);
3974 /* Enable link change event even when serdes polling. */
3975 tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
3978 current_link_up = 0;
3979 mac_status = tr32(MAC_STATUS);
3981 if (tp->tg3_flags2 & TG3_FLG2_HW_AUTONEG)
3982 current_link_up = tg3_setup_fiber_hw_autoneg(tp, mac_status);
3984 current_link_up = tg3_setup_fiber_by_hand(tp, mac_status);
3986 tp->napi[0].hw_status->status =
3987 (SD_STATUS_UPDATED |
3988 (tp->napi[0].hw_status->status & ~SD_STATUS_LINK_CHG));
3990 for (i = 0; i < 100; i++) {
3991 tw32_f(MAC_STATUS, (MAC_STATUS_SYNC_CHANGED |
3992 MAC_STATUS_CFG_CHANGED));
3994 if ((tr32(MAC_STATUS) & (MAC_STATUS_SYNC_CHANGED |
3995 MAC_STATUS_CFG_CHANGED |
3996 MAC_STATUS_LNKSTATE_CHANGED)) == 0)
4000 mac_status = tr32(MAC_STATUS);
4001 if ((mac_status & MAC_STATUS_PCS_SYNCED) == 0) {
4002 current_link_up = 0;
4003 if (tp->link_config.autoneg == AUTONEG_ENABLE &&
4004 tp->serdes_counter == 0) {
4005 tw32_f(MAC_MODE, (tp->mac_mode |
4006 MAC_MODE_SEND_CONFIGS));
4008 tw32_f(MAC_MODE, tp->mac_mode);
4012 if (current_link_up == 1) {
4013 tp->link_config.active_speed = SPEED_1000;
4014 tp->link_config.active_duplex = DUPLEX_FULL;
4015 tw32(MAC_LED_CTRL, (tp->led_ctrl |
4016 LED_CTRL_LNKLED_OVERRIDE |
4017 LED_CTRL_1000MBPS_ON));
4019 tp->link_config.active_speed = SPEED_INVALID;
4020 tp->link_config.active_duplex = DUPLEX_INVALID;
4021 tw32(MAC_LED_CTRL, (tp->led_ctrl |
4022 LED_CTRL_LNKLED_OVERRIDE |
4023 LED_CTRL_TRAFFIC_OVERRIDE));
4026 if (current_link_up != netif_carrier_ok(tp->dev)) {
4027 if (current_link_up)
4028 netif_carrier_on(tp->dev);
4030 netif_carrier_off(tp->dev);
4031 tg3_link_report(tp);
4033 u32 now_pause_cfg = tp->link_config.active_flowctrl;
4034 if (orig_pause_cfg != now_pause_cfg ||
4035 orig_active_speed != tp->link_config.active_speed ||
4036 orig_active_duplex != tp->link_config.active_duplex)
4037 tg3_link_report(tp);
4043 static int tg3_setup_fiber_mii_phy(struct tg3 *tp, int force_reset)
4045 int current_link_up, err = 0;
4049 u32 local_adv, remote_adv;
4051 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
4052 tw32_f(MAC_MODE, tp->mac_mode);
4058 (MAC_STATUS_SYNC_CHANGED |
4059 MAC_STATUS_CFG_CHANGED |
4060 MAC_STATUS_MI_COMPLETION |
4061 MAC_STATUS_LNKSTATE_CHANGED));
4067 current_link_up = 0;
4068 current_speed = SPEED_INVALID;
4069 current_duplex = DUPLEX_INVALID;
4071 err |= tg3_readphy(tp, MII_BMSR, &bmsr);
4072 err |= tg3_readphy(tp, MII_BMSR, &bmsr);
4073 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) {
4074 if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
4075 bmsr |= BMSR_LSTATUS;
4077 bmsr &= ~BMSR_LSTATUS;
4080 err |= tg3_readphy(tp, MII_BMCR, &bmcr);
4082 if ((tp->link_config.autoneg == AUTONEG_ENABLE) && !force_reset &&
4083 (tp->tg3_flags2 & TG3_FLG2_PARALLEL_DETECT)) {
4084 /* do nothing, just check for link up at the end */
4085 } else if (tp->link_config.autoneg == AUTONEG_ENABLE) {
4088 err |= tg3_readphy(tp, MII_ADVERTISE, &adv);
4089 new_adv = adv & ~(ADVERTISE_1000XFULL | ADVERTISE_1000XHALF |
4090 ADVERTISE_1000XPAUSE |
4091 ADVERTISE_1000XPSE_ASYM |
4094 new_adv |= tg3_advert_flowctrl_1000X(tp->link_config.flowctrl);
4096 if (tp->link_config.advertising & ADVERTISED_1000baseT_Half)
4097 new_adv |= ADVERTISE_1000XHALF;
4098 if (tp->link_config.advertising & ADVERTISED_1000baseT_Full)
4099 new_adv |= ADVERTISE_1000XFULL;
4101 if ((new_adv != adv) || !(bmcr & BMCR_ANENABLE)) {
4102 tg3_writephy(tp, MII_ADVERTISE, new_adv);
4103 bmcr |= BMCR_ANENABLE | BMCR_ANRESTART;
4104 tg3_writephy(tp, MII_BMCR, bmcr);
4106 tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
4107 tp->serdes_counter = SERDES_AN_TIMEOUT_5714S;
4108 tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
4115 bmcr &= ~BMCR_SPEED1000;
4116 new_bmcr = bmcr & ~(BMCR_ANENABLE | BMCR_FULLDPLX);
4118 if (tp->link_config.duplex == DUPLEX_FULL)
4119 new_bmcr |= BMCR_FULLDPLX;
4121 if (new_bmcr != bmcr) {
4122 /* BMCR_SPEED1000 is a reserved bit that needs
4123 * to be set on write.
4125 new_bmcr |= BMCR_SPEED1000;
4127 /* Force a linkdown */
4128 if (netif_carrier_ok(tp->dev)) {
4131 err |= tg3_readphy(tp, MII_ADVERTISE, &adv);
4132 adv &= ~(ADVERTISE_1000XFULL |
4133 ADVERTISE_1000XHALF |
4135 tg3_writephy(tp, MII_ADVERTISE, adv);
4136 tg3_writephy(tp, MII_BMCR, bmcr |
4140 netif_carrier_off(tp->dev);
4142 tg3_writephy(tp, MII_BMCR, new_bmcr);
4144 err |= tg3_readphy(tp, MII_BMSR, &bmsr);
4145 err |= tg3_readphy(tp, MII_BMSR, &bmsr);
4146 if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
4148 if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
4149 bmsr |= BMSR_LSTATUS;
4151 bmsr &= ~BMSR_LSTATUS;
4153 tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
4157 if (bmsr & BMSR_LSTATUS) {
4158 current_speed = SPEED_1000;
4159 current_link_up = 1;
4160 if (bmcr & BMCR_FULLDPLX)
4161 current_duplex = DUPLEX_FULL;
4163 current_duplex = DUPLEX_HALF;
4168 if (bmcr & BMCR_ANENABLE) {
4171 err |= tg3_readphy(tp, MII_ADVERTISE, &local_adv);
4172 err |= tg3_readphy(tp, MII_LPA, &remote_adv);
4173 common = local_adv & remote_adv;
4174 if (common & (ADVERTISE_1000XHALF |
4175 ADVERTISE_1000XFULL)) {
4176 if (common & ADVERTISE_1000XFULL)
4177 current_duplex = DUPLEX_FULL;
4179 current_duplex = DUPLEX_HALF;
4182 current_link_up = 0;
4186 if (current_link_up == 1 && current_duplex == DUPLEX_FULL)
4187 tg3_setup_flow_control(tp, local_adv, remote_adv);
4189 tp->mac_mode &= ~MAC_MODE_HALF_DUPLEX;
4190 if (tp->link_config.active_duplex == DUPLEX_HALF)
4191 tp->mac_mode |= MAC_MODE_HALF_DUPLEX;
4193 tw32_f(MAC_MODE, tp->mac_mode);
4196 tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
4198 tp->link_config.active_speed = current_speed;
4199 tp->link_config.active_duplex = current_duplex;
4201 if (current_link_up != netif_carrier_ok(tp->dev)) {
4202 if (current_link_up)
4203 netif_carrier_on(tp->dev);
4205 netif_carrier_off(tp->dev);
4206 tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
4208 tg3_link_report(tp);
4213 static void tg3_serdes_parallel_detect(struct tg3 *tp)
4215 if (tp->serdes_counter) {
4216 /* Give autoneg time to complete. */
4217 tp->serdes_counter--;
4220 if (!netif_carrier_ok(tp->dev) &&
4221 (tp->link_config.autoneg == AUTONEG_ENABLE)) {
4224 tg3_readphy(tp, MII_BMCR, &bmcr);
4225 if (bmcr & BMCR_ANENABLE) {
4228 /* Select shadow register 0x1f */
4229 tg3_writephy(tp, 0x1c, 0x7c00);
4230 tg3_readphy(tp, 0x1c, &phy1);
4232 /* Select expansion interrupt status register */
4233 tg3_writephy(tp, 0x17, 0x0f01);
4234 tg3_readphy(tp, 0x15, &phy2);
4235 tg3_readphy(tp, 0x15, &phy2);
4237 if ((phy1 & 0x10) && !(phy2 & 0x20)) {
4238 /* We have signal detect and not receiving
4239 * config code words, link is up by parallel
4243 bmcr &= ~BMCR_ANENABLE;
4244 bmcr |= BMCR_SPEED1000 | BMCR_FULLDPLX;
4245 tg3_writephy(tp, MII_BMCR, bmcr);
4246 tp->tg3_flags2 |= TG3_FLG2_PARALLEL_DETECT;
4250 else if (netif_carrier_ok(tp->dev) &&
4251 (tp->link_config.autoneg == AUTONEG_ENABLE) &&
4252 (tp->tg3_flags2 & TG3_FLG2_PARALLEL_DETECT)) {
4255 /* Select expansion interrupt status register */
4256 tg3_writephy(tp, 0x17, 0x0f01);
4257 tg3_readphy(tp, 0x15, &phy2);
4261 /* Config code words received, turn on autoneg. */
4262 tg3_readphy(tp, MII_BMCR, &bmcr);
4263 tg3_writephy(tp, MII_BMCR, bmcr | BMCR_ANENABLE);
4265 tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
4271 static int tg3_setup_phy(struct tg3 *tp, int force_reset)
4275 if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) {
4276 err = tg3_setup_fiber_phy(tp, force_reset);
4277 } else if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES) {
4278 err = tg3_setup_fiber_mii_phy(tp, force_reset);
4280 err = tg3_setup_copper_phy(tp, force_reset);
4283 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5784_AX) {
4286 val = tr32(TG3_CPMU_CLCK_STAT) & CPMU_CLCK_STAT_MAC_CLCK_MASK;
4287 if (val == CPMU_CLCK_STAT_MAC_CLCK_62_5)
4289 else if (val == CPMU_CLCK_STAT_MAC_CLCK_6_25)
4294 val = tr32(GRC_MISC_CFG) & ~GRC_MISC_CFG_PRESCALAR_MASK;
4295 val |= (scale << GRC_MISC_CFG_PRESCALAR_SHIFT);
4296 tw32(GRC_MISC_CFG, val);
4299 if (tp->link_config.active_speed == SPEED_1000 &&
4300 tp->link_config.active_duplex == DUPLEX_HALF)
4301 tw32(MAC_TX_LENGTHS,
4302 ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
4303 (6 << TX_LENGTHS_IPG_SHIFT) |
4304 (0xff << TX_LENGTHS_SLOT_TIME_SHIFT)));
4306 tw32(MAC_TX_LENGTHS,
4307 ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
4308 (6 << TX_LENGTHS_IPG_SHIFT) |
4309 (32 << TX_LENGTHS_SLOT_TIME_SHIFT)));
4311 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
4312 if (netif_carrier_ok(tp->dev)) {
4313 tw32(HOSTCC_STAT_COAL_TICKS,
4314 tp->coal.stats_block_coalesce_usecs);
4316 tw32(HOSTCC_STAT_COAL_TICKS, 0);
4320 if (tp->tg3_flags & TG3_FLAG_ASPM_WORKAROUND) {
4321 u32 val = tr32(PCIE_PWR_MGMT_THRESH);
4322 if (!netif_carrier_ok(tp->dev))
4323 val = (val & ~PCIE_PWR_MGMT_L1_THRESH_MSK) |
4326 val |= PCIE_PWR_MGMT_L1_THRESH_MSK;
4327 tw32(PCIE_PWR_MGMT_THRESH, val);
4333 /* This is called whenever we suspect that the system chipset is re-
4334 * ordering the sequence of MMIO to the tx send mailbox. The symptom
4335 * is bogus tx completions. We try to recover by setting the
4336 * TG3_FLAG_MBOX_WRITE_REORDER flag and resetting the chip later
4339 static void tg3_tx_recover(struct tg3 *tp)
4341 BUG_ON((tp->tg3_flags & TG3_FLAG_MBOX_WRITE_REORDER) ||
4342 tp->write32_tx_mbox == tg3_write_indirect_mbox);
4344 printk(KERN_WARNING PFX "%s: The system may be re-ordering memory-"
4345 "mapped I/O cycles to the network device, attempting to "
4346 "recover. Please report the problem to the driver maintainer "
4347 "and include system chipset information.\n", tp->dev->name);
4349 spin_lock(&tp->lock);
4350 tp->tg3_flags |= TG3_FLAG_TX_RECOVERY_PENDING;
4351 spin_unlock(&tp->lock);
4354 static inline u32 tg3_tx_avail(struct tg3_napi *tnapi)
4357 return tnapi->tx_pending -
4358 ((tnapi->tx_prod - tnapi->tx_cons) & (TG3_TX_RING_SIZE - 1));
4361 /* Tigon3 never reports partial packet sends. So we do not
4362 * need special logic to handle SKBs that have not had all
4363 * of their frags sent yet, like SunGEM does.
4365 static void tg3_tx(struct tg3_napi *tnapi)
4367 struct tg3 *tp = tnapi->tp;
4368 u32 hw_idx = tnapi->hw_status->idx[0].tx_consumer;
4369 u32 sw_idx = tnapi->tx_cons;
4370 struct netdev_queue *txq;
4371 int index = tnapi - tp->napi;
4373 if (tp->tg3_flags3 & TG3_FLG3_ENABLE_TSS)
4376 txq = netdev_get_tx_queue(tp->dev, index);
4378 while (sw_idx != hw_idx) {
4379 struct ring_info *ri = &tnapi->tx_buffers[sw_idx];
4380 struct sk_buff *skb = ri->skb;
4383 if (unlikely(skb == NULL)) {
4388 pci_unmap_single(tp->pdev,
4389 pci_unmap_addr(ri, mapping),
4395 sw_idx = NEXT_TX(sw_idx);
4397 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
4398 ri = &tnapi->tx_buffers[sw_idx];
4399 if (unlikely(ri->skb != NULL || sw_idx == hw_idx))
4402 pci_unmap_page(tp->pdev,
4403 pci_unmap_addr(ri, mapping),
4404 skb_shinfo(skb)->frags[i].size,
4406 sw_idx = NEXT_TX(sw_idx);
4411 if (unlikely(tx_bug)) {
4417 tnapi->tx_cons = sw_idx;
4419 /* Need to make the tx_cons update visible to tg3_start_xmit()
4420 * before checking for netif_queue_stopped(). Without the
4421 * memory barrier, there is a small possibility that tg3_start_xmit()
4422 * will miss it and cause the queue to be stopped forever.
4426 if (unlikely(netif_tx_queue_stopped(txq) &&
4427 (tg3_tx_avail(tnapi) > TG3_TX_WAKEUP_THRESH(tnapi)))) {
4428 __netif_tx_lock(txq, smp_processor_id());
4429 if (netif_tx_queue_stopped(txq) &&
4430 (tg3_tx_avail(tnapi) > TG3_TX_WAKEUP_THRESH(tnapi)))
4431 netif_tx_wake_queue(txq);
4432 __netif_tx_unlock(txq);
4436 static void tg3_rx_skb_free(struct tg3 *tp, struct ring_info *ri, u32 map_sz)
4441 pci_unmap_single(tp->pdev, pci_unmap_addr(ri, mapping),
4442 map_sz, PCI_DMA_FROMDEVICE);
4443 dev_kfree_skb_any(ri->skb);
4447 /* Returns size of skb allocated or < 0 on error.
4449 * We only need to fill in the address because the other members
4450 * of the RX descriptor are invariant, see tg3_init_rings.
4452 * Note the purposeful assymetry of cpu vs. chip accesses. For
4453 * posting buffers we only dirty the first cache line of the RX
4454 * descriptor (containing the address). Whereas for the RX status
4455 * buffers the cpu only reads the last cacheline of the RX descriptor
4456 * (to fetch the error flags, vlan tag, checksum, and opaque cookie).
4458 static int tg3_alloc_rx_skb(struct tg3 *tp, struct tg3_rx_prodring_set *tpr,
4459 u32 opaque_key, u32 dest_idx_unmasked)
4461 struct tg3_rx_buffer_desc *desc;
4462 struct ring_info *map, *src_map;
4463 struct sk_buff *skb;
4465 int skb_size, dest_idx;
4468 switch (opaque_key) {
4469 case RXD_OPAQUE_RING_STD:
4470 dest_idx = dest_idx_unmasked % TG3_RX_RING_SIZE;
4471 desc = &tpr->rx_std[dest_idx];
4472 map = &tpr->rx_std_buffers[dest_idx];
4473 skb_size = tp->rx_pkt_map_sz;
4476 case RXD_OPAQUE_RING_JUMBO:
4477 dest_idx = dest_idx_unmasked % TG3_RX_JUMBO_RING_SIZE;
4478 desc = &tpr->rx_jmb[dest_idx].std;
4479 map = &tpr->rx_jmb_buffers[dest_idx];
4480 skb_size = TG3_RX_JMB_MAP_SZ;
4487 /* Do not overwrite any of the map or rp information
4488 * until we are sure we can commit to a new buffer.
4490 * Callers depend upon this behavior and assume that
4491 * we leave everything unchanged if we fail.
4493 skb = netdev_alloc_skb(tp->dev, skb_size + tp->rx_offset);
4497 skb_reserve(skb, tp->rx_offset);
4499 mapping = pci_map_single(tp->pdev, skb->data, skb_size,
4500 PCI_DMA_FROMDEVICE);
4501 if (pci_dma_mapping_error(tp->pdev, mapping)) {
4507 pci_unmap_addr_set(map, mapping, mapping);
4509 desc->addr_hi = ((u64)mapping >> 32);
4510 desc->addr_lo = ((u64)mapping & 0xffffffff);
4515 /* We only need to move over in the address because the other
4516 * members of the RX descriptor are invariant. See notes above
4517 * tg3_alloc_rx_skb for full details.
4519 static void tg3_recycle_rx(struct tg3_napi *tnapi,
4520 struct tg3_rx_prodring_set *dpr,
4521 u32 opaque_key, int src_idx,
4522 u32 dest_idx_unmasked)
4524 struct tg3 *tp = tnapi->tp;
4525 struct tg3_rx_buffer_desc *src_desc, *dest_desc;
4526 struct ring_info *src_map, *dest_map;
4528 struct tg3_rx_prodring_set *spr = &tp->prodring[0];
4530 switch (opaque_key) {
4531 case RXD_OPAQUE_RING_STD:
4532 dest_idx = dest_idx_unmasked % TG3_RX_RING_SIZE;
4533 dest_desc = &dpr->rx_std[dest_idx];
4534 dest_map = &dpr->rx_std_buffers[dest_idx];
4535 src_desc = &spr->rx_std[src_idx];
4536 src_map = &spr->rx_std_buffers[src_idx];
4539 case RXD_OPAQUE_RING_JUMBO:
4540 dest_idx = dest_idx_unmasked % TG3_RX_JUMBO_RING_SIZE;
4541 dest_desc = &dpr->rx_jmb[dest_idx].std;
4542 dest_map = &dpr->rx_jmb_buffers[dest_idx];
4543 src_desc = &spr->rx_jmb[src_idx].std;
4544 src_map = &spr->rx_jmb_buffers[src_idx];
4551 dest_map->skb = src_map->skb;
4552 pci_unmap_addr_set(dest_map, mapping,
4553 pci_unmap_addr(src_map, mapping));
4554 dest_desc->addr_hi = src_desc->addr_hi;
4555 dest_desc->addr_lo = src_desc->addr_lo;
4557 /* Ensure that the update to the skb happens after the physical
4558 * addresses have been transferred to the new BD location.
4562 src_map->skb = NULL;
4565 /* The RX ring scheme is composed of multiple rings which post fresh
4566 * buffers to the chip, and one special ring the chip uses to report
4567 * status back to the host.
4569 * The special ring reports the status of received packets to the
4570 * host. The chip does not write into the original descriptor the
4571 * RX buffer was obtained from. The chip simply takes the original
4572 * descriptor as provided by the host, updates the status and length
4573 * field, then writes this into the next status ring entry.
4575 * Each ring the host uses to post buffers to the chip is described
4576 * by a TG3_BDINFO entry in the chips SRAM area. When a packet arrives,
4577 * it is first placed into the on-chip ram. When the packet's length
4578 * is known, it walks down the TG3_BDINFO entries to select the ring.
4579 * Each TG3_BDINFO specifies a MAXLEN field and the first TG3_BDINFO
4580 * which is within the range of the new packet's length is chosen.
4582 * The "separate ring for rx status" scheme may sound queer, but it makes
4583 * sense from a cache coherency perspective. If only the host writes
4584 * to the buffer post rings, and only the chip writes to the rx status
4585 * rings, then cache lines never move beyond shared-modified state.
4586 * If both the host and chip were to write into the same ring, cache line
4587 * eviction could occur since both entities want it in an exclusive state.
4589 static int tg3_rx(struct tg3_napi *tnapi, int budget)
4591 struct tg3 *tp = tnapi->tp;
4592 u32 work_mask, rx_std_posted = 0;
4593 u32 std_prod_idx, jmb_prod_idx;
4594 u32 sw_idx = tnapi->rx_rcb_ptr;
4597 struct tg3_rx_prodring_set *tpr = tnapi->prodring;
4599 hw_idx = *(tnapi->rx_rcb_prod_idx);
4601 * We need to order the read of hw_idx and the read of
4602 * the opaque cookie.
4607 std_prod_idx = tpr->rx_std_prod_idx;
4608 jmb_prod_idx = tpr->rx_jmb_prod_idx;
4609 while (sw_idx != hw_idx && budget > 0) {
4610 struct ring_info *ri;
4611 struct tg3_rx_buffer_desc *desc = &tnapi->rx_rcb[sw_idx];
4613 struct sk_buff *skb;
4614 dma_addr_t dma_addr;
4615 u32 opaque_key, desc_idx, *post_ptr;
4617 desc_idx = desc->opaque & RXD_OPAQUE_INDEX_MASK;
4618 opaque_key = desc->opaque & RXD_OPAQUE_RING_MASK;
4619 if (opaque_key == RXD_OPAQUE_RING_STD) {
4620 ri = &tp->prodring[0].rx_std_buffers[desc_idx];
4621 dma_addr = pci_unmap_addr(ri, mapping);
4623 post_ptr = &std_prod_idx;
4625 } else if (opaque_key == RXD_OPAQUE_RING_JUMBO) {
4626 ri = &tp->prodring[0].rx_jmb_buffers[desc_idx];
4627 dma_addr = pci_unmap_addr(ri, mapping);
4629 post_ptr = &jmb_prod_idx;
4631 goto next_pkt_nopost;
4633 work_mask |= opaque_key;
4635 if ((desc->err_vlan & RXD_ERR_MASK) != 0 &&
4636 (desc->err_vlan != RXD_ERR_ODD_NIBBLE_RCVD_MII)) {
4638 tg3_recycle_rx(tnapi, tpr, opaque_key,
4639 desc_idx, *post_ptr);
4641 /* Other statistics kept track of by card. */
4642 tp->net_stats.rx_dropped++;
4646 len = ((desc->idx_len & RXD_LEN_MASK) >> RXD_LEN_SHIFT) -
4649 if (len > RX_COPY_THRESHOLD &&
4650 tp->rx_offset == NET_IP_ALIGN) {
4651 /* rx_offset will likely not equal NET_IP_ALIGN
4652 * if this is a 5701 card running in PCI-X mode
4653 * [see tg3_get_invariants()]
4657 skb_size = tg3_alloc_rx_skb(tp, tpr, opaque_key,
4664 pci_unmap_single(tp->pdev, dma_addr, skb_size,
4665 PCI_DMA_FROMDEVICE);
4669 struct sk_buff *copy_skb;
4671 tg3_recycle_rx(tnapi, tpr, opaque_key,
4672 desc_idx, *post_ptr);
4674 copy_skb = netdev_alloc_skb(tp->dev,
4675 len + TG3_RAW_IP_ALIGN);
4676 if (copy_skb == NULL)
4677 goto drop_it_no_recycle;
4679 skb_reserve(copy_skb, TG3_RAW_IP_ALIGN);
4680 skb_put(copy_skb, len);
4681 pci_dma_sync_single_for_cpu(tp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE);
4682 skb_copy_from_linear_data(skb, copy_skb->data, len);
4683 pci_dma_sync_single_for_device(tp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE);
4685 /* We'll reuse the original ring buffer. */
4689 if ((tp->tg3_flags & TG3_FLAG_RX_CHECKSUMS) &&
4690 (desc->type_flags & RXD_FLAG_TCPUDP_CSUM) &&
4691 (((desc->ip_tcp_csum & RXD_TCPCSUM_MASK)
4692 >> RXD_TCPCSUM_SHIFT) == 0xffff))
4693 skb->ip_summed = CHECKSUM_UNNECESSARY;
4695 skb->ip_summed = CHECKSUM_NONE;
4697 skb->protocol = eth_type_trans(skb, tp->dev);
4699 if (len > (tp->dev->mtu + ETH_HLEN) &&
4700 skb->protocol != htons(ETH_P_8021Q)) {
4705 #if TG3_VLAN_TAG_USED
4706 if (tp->vlgrp != NULL &&
4707 desc->type_flags & RXD_FLAG_VLAN) {
4708 vlan_gro_receive(&tnapi->napi, tp->vlgrp,
4709 desc->err_vlan & RXD_VLAN_MASK, skb);
4712 napi_gro_receive(&tnapi->napi, skb);
4720 if (unlikely(rx_std_posted >= tp->rx_std_max_post)) {
4721 tpr->rx_std_prod_idx = std_prod_idx % TG3_RX_RING_SIZE;
4722 tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG,
4723 tpr->rx_std_prod_idx);
4724 work_mask &= ~RXD_OPAQUE_RING_STD;
4729 sw_idx &= (TG3_RX_RCB_RING_SIZE(tp) - 1);
4731 /* Refresh hw_idx to see if there is new work */
4732 if (sw_idx == hw_idx) {
4733 hw_idx = *(tnapi->rx_rcb_prod_idx);
4738 /* ACK the status ring. */
4739 tnapi->rx_rcb_ptr = sw_idx;
4740 tw32_rx_mbox(tnapi->consmbox, sw_idx);
4742 /* Refill RX ring(s). */
4743 if (!(tp->tg3_flags3 & TG3_FLG3_ENABLE_RSS)) {
4744 if (work_mask & RXD_OPAQUE_RING_STD) {
4745 tpr->rx_std_prod_idx = std_prod_idx % TG3_RX_RING_SIZE;
4746 tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG,
4747 tpr->rx_std_prod_idx);
4749 if (work_mask & RXD_OPAQUE_RING_JUMBO) {
4750 tpr->rx_jmb_prod_idx = jmb_prod_idx %
4751 TG3_RX_JUMBO_RING_SIZE;
4752 tw32_rx_mbox(TG3_RX_JMB_PROD_IDX_REG,
4753 tpr->rx_jmb_prod_idx);
4756 } else if (work_mask) {
4757 /* rx_std_buffers[] and rx_jmb_buffers[] entries must be
4758 * updated before the producer indices can be updated.
4762 tpr->rx_std_prod_idx = std_prod_idx % TG3_RX_RING_SIZE;
4763 tpr->rx_jmb_prod_idx = jmb_prod_idx % TG3_RX_JUMBO_RING_SIZE;
4765 if (tnapi != &tp->napi[1])
4766 napi_schedule(&tp->napi[1].napi);
4772 static void tg3_poll_link(struct tg3 *tp)
4774 /* handle link change and other phy events */
4775 if (!(tp->tg3_flags &
4776 (TG3_FLAG_USE_LINKCHG_REG |
4777 TG3_FLAG_POLL_SERDES))) {
4778 struct tg3_hw_status *sblk = tp->napi[0].hw_status;
4780 if (sblk->status & SD_STATUS_LINK_CHG) {
4781 sblk->status = SD_STATUS_UPDATED |
4782 (sblk->status & ~SD_STATUS_LINK_CHG);
4783 spin_lock(&tp->lock);
4784 if (tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB) {
4786 (MAC_STATUS_SYNC_CHANGED |
4787 MAC_STATUS_CFG_CHANGED |
4788 MAC_STATUS_MI_COMPLETION |
4789 MAC_STATUS_LNKSTATE_CHANGED));
4792 tg3_setup_phy(tp, 0);
4793 spin_unlock(&tp->lock);
4798 static int tg3_rx_prodring_xfer(struct tg3 *tp,
4799 struct tg3_rx_prodring_set *dpr,
4800 struct tg3_rx_prodring_set *spr)
4802 u32 si, di, cpycnt, src_prod_idx;
4806 src_prod_idx = spr->rx_std_prod_idx;
4808 /* Make sure updates to the rx_std_buffers[] entries and the
4809 * standard producer index are seen in the correct order.
4813 if (spr->rx_std_cons_idx == src_prod_idx)
4816 if (spr->rx_std_cons_idx < src_prod_idx)
4817 cpycnt = src_prod_idx - spr->rx_std_cons_idx;
4819 cpycnt = TG3_RX_RING_SIZE - spr->rx_std_cons_idx;
4821 cpycnt = min(cpycnt, TG3_RX_RING_SIZE - dpr->rx_std_prod_idx);
4823 si = spr->rx_std_cons_idx;
4824 di = dpr->rx_std_prod_idx;
4826 for (i = di; i < di + cpycnt; i++) {
4827 if (dpr->rx_std_buffers[i].skb) {
4837 /* Ensure that updates to the rx_std_buffers ring and the
4838 * shadowed hardware producer ring from tg3_recycle_skb() are
4839 * ordered correctly WRT the skb check above.
4843 memcpy(&dpr->rx_std_buffers[di],
4844 &spr->rx_std_buffers[si],
4845 cpycnt * sizeof(struct ring_info));
4847 for (i = 0; i < cpycnt; i++, di++, si++) {
4848 struct tg3_rx_buffer_desc *sbd, *dbd;
4849 sbd = &spr->rx_std[si];
4850 dbd = &dpr->rx_std[di];
4851 dbd->addr_hi = sbd->addr_hi;
4852 dbd->addr_lo = sbd->addr_lo;
4855 spr->rx_std_cons_idx = (spr->rx_std_cons_idx + cpycnt) %
4857 dpr->rx_std_prod_idx = (dpr->rx_std_prod_idx + cpycnt) %
4862 src_prod_idx = spr->rx_jmb_prod_idx;
4864 /* Make sure updates to the rx_jmb_buffers[] entries and
4865 * the jumbo producer index are seen in the correct order.
4869 if (spr->rx_jmb_cons_idx == src_prod_idx)
4872 if (spr->rx_jmb_cons_idx < src_prod_idx)
4873 cpycnt = src_prod_idx - spr->rx_jmb_cons_idx;
4875 cpycnt = TG3_RX_JUMBO_RING_SIZE - spr->rx_jmb_cons_idx;
4877 cpycnt = min(cpycnt,
4878 TG3_RX_JUMBO_RING_SIZE - dpr->rx_jmb_prod_idx);
4880 si = spr->rx_jmb_cons_idx;
4881 di = dpr->rx_jmb_prod_idx;
4883 for (i = di; i < di + cpycnt; i++) {
4884 if (dpr->rx_jmb_buffers[i].skb) {
4894 /* Ensure that updates to the rx_jmb_buffers ring and the
4895 * shadowed hardware producer ring from tg3_recycle_skb() are
4896 * ordered correctly WRT the skb check above.
4900 memcpy(&dpr->rx_jmb_buffers[di],
4901 &spr->rx_jmb_buffers[si],
4902 cpycnt * sizeof(struct ring_info));
4904 for (i = 0; i < cpycnt; i++, di++, si++) {
4905 struct tg3_rx_buffer_desc *sbd, *dbd;
4906 sbd = &spr->rx_jmb[si].std;
4907 dbd = &dpr->rx_jmb[di].std;
4908 dbd->addr_hi = sbd->addr_hi;
4909 dbd->addr_lo = sbd->addr_lo;
4912 spr->rx_jmb_cons_idx = (spr->rx_jmb_cons_idx + cpycnt) %
4913 TG3_RX_JUMBO_RING_SIZE;
4914 dpr->rx_jmb_prod_idx = (dpr->rx_jmb_prod_idx + cpycnt) %
4915 TG3_RX_JUMBO_RING_SIZE;
4921 static int tg3_poll_work(struct tg3_napi *tnapi, int work_done, int budget)
4923 struct tg3 *tp = tnapi->tp;
4925 /* run TX completion thread */
4926 if (tnapi->hw_status->idx[0].tx_consumer != tnapi->tx_cons) {
4928 if (unlikely(tp->tg3_flags & TG3_FLAG_TX_RECOVERY_PENDING))
4932 /* run RX thread, within the bounds set by NAPI.
4933 * All RX "locking" is done by ensuring outside
4934 * code synchronizes with tg3->napi.poll()
4936 if (*(tnapi->rx_rcb_prod_idx) != tnapi->rx_rcb_ptr)
4937 work_done += tg3_rx(tnapi, budget - work_done);
4939 if ((tp->tg3_flags3 & TG3_FLG3_ENABLE_RSS) && tnapi == &tp->napi[1]) {
4940 struct tg3_rx_prodring_set *dpr = &tp->prodring[0];
4942 u32 std_prod_idx = dpr->rx_std_prod_idx;
4943 u32 jmb_prod_idx = dpr->rx_jmb_prod_idx;
4945 for (i = 1; i < tp->irq_cnt; i++)
4946 err |= tg3_rx_prodring_xfer(tp, dpr,
4947 tp->napi[i].prodring);
4951 if (std_prod_idx != dpr->rx_std_prod_idx)
4952 tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG,
4953 dpr->rx_std_prod_idx);
4955 if (jmb_prod_idx != dpr->rx_jmb_prod_idx)
4956 tw32_rx_mbox(TG3_RX_JMB_PROD_IDX_REG,
4957 dpr->rx_jmb_prod_idx);
4962 tw32_f(HOSTCC_MODE, tp->coal_now);
4968 static int tg3_poll_msix(struct napi_struct *napi, int budget)
4970 struct tg3_napi *tnapi = container_of(napi, struct tg3_napi, napi);
4971 struct tg3 *tp = tnapi->tp;
4973 struct tg3_hw_status *sblk = tnapi->hw_status;
4976 work_done = tg3_poll_work(tnapi, work_done, budget);
4978 if (unlikely(tp->tg3_flags & TG3_FLAG_TX_RECOVERY_PENDING))
4981 if (unlikely(work_done >= budget))
4984 /* tp->last_tag is used in tg3_restart_ints() below
4985 * to tell the hw how much work has been processed,
4986 * so we must read it before checking for more work.
4988 tnapi->last_tag = sblk->status_tag;
4989 tnapi->last_irq_tag = tnapi->last_tag;
4992 /* check for RX/TX work to do */
4993 if (sblk->idx[0].tx_consumer == tnapi->tx_cons &&
4994 *(tnapi->rx_rcb_prod_idx) == tnapi->rx_rcb_ptr) {
4995 napi_complete(napi);
4996 /* Reenable interrupts. */
4997 tw32_mailbox(tnapi->int_mbox, tnapi->last_tag << 24);
5006 /* work_done is guaranteed to be less than budget. */
5007 napi_complete(napi);
5008 schedule_work(&tp->reset_task);
5012 static int tg3_poll(struct napi_struct *napi, int budget)
5014 struct tg3_napi *tnapi = container_of(napi, struct tg3_napi, napi);
5015 struct tg3 *tp = tnapi->tp;
5017 struct tg3_hw_status *sblk = tnapi->hw_status;
5022 work_done = tg3_poll_work(tnapi, work_done, budget);
5024 if (unlikely(tp->tg3_flags & TG3_FLAG_TX_RECOVERY_PENDING))
5027 if (unlikely(work_done >= budget))
5030 if (tp->tg3_flags & TG3_FLAG_TAGGED_STATUS) {
5031 /* tp->last_tag is used in tg3_int_reenable() below
5032 * to tell the hw how much work has been processed,
5033 * so we must read it before checking for more work.
5035 tnapi->last_tag = sblk->status_tag;
5036 tnapi->last_irq_tag = tnapi->last_tag;
5039 sblk->status &= ~SD_STATUS_UPDATED;
5041 if (likely(!tg3_has_work(tnapi))) {
5042 napi_complete(napi);
5043 tg3_int_reenable(tnapi);
5051 /* work_done is guaranteed to be less than budget. */
5052 napi_complete(napi);
5053 schedule_work(&tp->reset_task);
5057 static void tg3_irq_quiesce(struct tg3 *tp)
5061 BUG_ON(tp->irq_sync);
5066 for (i = 0; i < tp->irq_cnt; i++)
5067 synchronize_irq(tp->napi[i].irq_vec);
5070 static inline int tg3_irq_sync(struct tg3 *tp)
5072 return tp->irq_sync;
5075 /* Fully shutdown all tg3 driver activity elsewhere in the system.
5076 * If irq_sync is non-zero, then the IRQ handler must be synchronized
5077 * with as well. Most of the time, this is not necessary except when
5078 * shutting down the device.
5080 static inline void tg3_full_lock(struct tg3 *tp, int irq_sync)
5082 spin_lock_bh(&tp->lock);
5084 tg3_irq_quiesce(tp);
5087 static inline void tg3_full_unlock(struct tg3 *tp)
5089 spin_unlock_bh(&tp->lock);
5092 /* One-shot MSI handler - Chip automatically disables interrupt
5093 * after sending MSI so driver doesn't have to do it.
5095 static irqreturn_t tg3_msi_1shot(int irq, void *dev_id)
5097 struct tg3_napi *tnapi = dev_id;
5098 struct tg3 *tp = tnapi->tp;
5100 prefetch(tnapi->hw_status);
5102 prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
5104 if (likely(!tg3_irq_sync(tp)))
5105 napi_schedule(&tnapi->napi);
5110 /* MSI ISR - No need to check for interrupt sharing and no need to
5111 * flush status block and interrupt mailbox. PCI ordering rules
5112 * guarantee that MSI will arrive after the status block.
5114 static irqreturn_t tg3_msi(int irq, void *dev_id)
5116 struct tg3_napi *tnapi = dev_id;
5117 struct tg3 *tp = tnapi->tp;
5119 prefetch(tnapi->hw_status);
5121 prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
5123 * Writing any value to intr-mbox-0 clears PCI INTA# and
5124 * chip-internal interrupt pending events.
5125 * Writing non-zero to intr-mbox-0 additional tells the
5126 * NIC to stop sending us irqs, engaging "in-intr-handler"
5129 tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
5130 if (likely(!tg3_irq_sync(tp)))
5131 napi_schedule(&tnapi->napi);
5133 return IRQ_RETVAL(1);
5136 static irqreturn_t tg3_interrupt(int irq, void *dev_id)
5138 struct tg3_napi *tnapi = dev_id;
5139 struct tg3 *tp = tnapi->tp;
5140 struct tg3_hw_status *sblk = tnapi->hw_status;
5141 unsigned int handled = 1;
5143 /* In INTx mode, it is possible for the interrupt to arrive at
5144 * the CPU before the status block posted prior to the interrupt.
5145 * Reading the PCI State register will confirm whether the
5146 * interrupt is ours and will flush the status block.
5148 if (unlikely(!(sblk->status & SD_STATUS_UPDATED))) {
5149 if ((tp->tg3_flags & TG3_FLAG_CHIP_RESETTING) ||
5150 (tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
5157 * Writing any value to intr-mbox-0 clears PCI INTA# and
5158 * chip-internal interrupt pending events.
5159 * Writing non-zero to intr-mbox-0 additional tells the
5160 * NIC to stop sending us irqs, engaging "in-intr-handler"
5163 * Flush the mailbox to de-assert the IRQ immediately to prevent
5164 * spurious interrupts. The flush impacts performance but
5165 * excessive spurious interrupts can be worse in some cases.
5167 tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
5168 if (tg3_irq_sync(tp))
5170 sblk->status &= ~SD_STATUS_UPDATED;
5171 if (likely(tg3_has_work(tnapi))) {
5172 prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
5173 napi_schedule(&tnapi->napi);
5175 /* No work, shared interrupt perhaps? re-enable
5176 * interrupts, and flush that PCI write
5178 tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
5182 return IRQ_RETVAL(handled);
5185 static irqreturn_t tg3_interrupt_tagged(int irq, void *dev_id)
5187 struct tg3_napi *tnapi = dev_id;
5188 struct tg3 *tp = tnapi->tp;
5189 struct tg3_hw_status *sblk = tnapi->hw_status;
5190 unsigned int handled = 1;
5192 /* In INTx mode, it is possible for the interrupt to arrive at
5193 * the CPU before the status block posted prior to the interrupt.
5194 * Reading the PCI State register will confirm whether the
5195 * interrupt is ours and will flush the status block.
5197 if (unlikely(sblk->status_tag == tnapi->last_irq_tag)) {
5198 if ((tp->tg3_flags & TG3_FLAG_CHIP_RESETTING) ||
5199 (tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
5206 * writing any value to intr-mbox-0 clears PCI INTA# and
5207 * chip-internal interrupt pending events.
5208 * writing non-zero to intr-mbox-0 additional tells the
5209 * NIC to stop sending us irqs, engaging "in-intr-handler"
5212 * Flush the mailbox to de-assert the IRQ immediately to prevent
5213 * spurious interrupts. The flush impacts performance but
5214 * excessive spurious interrupts can be worse in some cases.
5216 tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
5219 * In a shared interrupt configuration, sometimes other devices'
5220 * interrupts will scream. We record the current status tag here
5221 * so that the above check can report that the screaming interrupts
5222 * are unhandled. Eventually they will be silenced.
5224 tnapi->last_irq_tag = sblk->status_tag;
5226 if (tg3_irq_sync(tp))
5229 prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
5231 napi_schedule(&tnapi->napi);
5234 return IRQ_RETVAL(handled);
5237 /* ISR for interrupt test */
5238 static irqreturn_t tg3_test_isr(int irq, void *dev_id)
5240 struct tg3_napi *tnapi = dev_id;
5241 struct tg3 *tp = tnapi->tp;
5242 struct tg3_hw_status *sblk = tnapi->hw_status;
5244 if ((sblk->status & SD_STATUS_UPDATED) ||
5245 !(tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
5246 tg3_disable_ints(tp);
5247 return IRQ_RETVAL(1);
5249 return IRQ_RETVAL(0);
5252 static int tg3_init_hw(struct tg3 *, int);
5253 static int tg3_halt(struct tg3 *, int, int);
5255 /* Restart hardware after configuration changes, self-test, etc.
5256 * Invoked with tp->lock held.
5258 static int tg3_restart_hw(struct tg3 *tp, int reset_phy)
5259 __releases(tp->lock)
5260 __acquires(tp->lock)
5264 err = tg3_init_hw(tp, reset_phy);
5266 printk(KERN_ERR PFX "%s: Failed to re-initialize device, "
5267 "aborting.\n", tp->dev->name);
5268 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
5269 tg3_full_unlock(tp);
5270 del_timer_sync(&tp->timer);
5272 tg3_napi_enable(tp);
5274 tg3_full_lock(tp, 0);
5279 #ifdef CONFIG_NET_POLL_CONTROLLER
5280 static void tg3_poll_controller(struct net_device *dev)
5283 struct tg3 *tp = netdev_priv(dev);
5285 for (i = 0; i < tp->irq_cnt; i++)
5286 tg3_interrupt(tp->napi[i].irq_vec, dev);
5290 static void tg3_reset_task(struct work_struct *work)
5292 struct tg3 *tp = container_of(work, struct tg3, reset_task);
5294 unsigned int restart_timer;
5296 tg3_full_lock(tp, 0);
5298 if (!netif_running(tp->dev)) {
5299 tg3_full_unlock(tp);
5303 tg3_full_unlock(tp);
5309 tg3_full_lock(tp, 1);
5311 restart_timer = tp->tg3_flags2 & TG3_FLG2_RESTART_TIMER;
5312 tp->tg3_flags2 &= ~TG3_FLG2_RESTART_TIMER;
5314 if (tp->tg3_flags & TG3_FLAG_TX_RECOVERY_PENDING) {
5315 tp->write32_tx_mbox = tg3_write32_tx_mbox;
5316 tp->write32_rx_mbox = tg3_write_flush_reg32;
5317 tp->tg3_flags |= TG3_FLAG_MBOX_WRITE_REORDER;
5318 tp->tg3_flags &= ~TG3_FLAG_TX_RECOVERY_PENDING;
5321 tg3_halt(tp, RESET_KIND_SHUTDOWN, 0);
5322 err = tg3_init_hw(tp, 1);
5326 tg3_netif_start(tp);
5329 mod_timer(&tp->timer, jiffies + 1);
5332 tg3_full_unlock(tp);
5338 static void tg3_dump_short_state(struct tg3 *tp)
5340 printk(KERN_ERR PFX "DEBUG: MAC_TX_STATUS[%08x] MAC_RX_STATUS[%08x]\n",
5341 tr32(MAC_TX_STATUS), tr32(MAC_RX_STATUS));
5342 printk(KERN_ERR PFX "DEBUG: RDMAC_STATUS[%08x] WDMAC_STATUS[%08x]\n",
5343 tr32(RDMAC_STATUS), tr32(WDMAC_STATUS));
5346 static void tg3_tx_timeout(struct net_device *dev)
5348 struct tg3 *tp = netdev_priv(dev);
5350 if (netif_msg_tx_err(tp)) {
5351 printk(KERN_ERR PFX "%s: transmit timed out, resetting\n",
5353 tg3_dump_short_state(tp);
5356 schedule_work(&tp->reset_task);
5359 /* Test for DMA buffers crossing any 4GB boundaries: 4G, 8G, etc */
5360 static inline int tg3_4g_overflow_test(dma_addr_t mapping, int len)
5362 u32 base = (u32) mapping & 0xffffffff;
5364 return ((base > 0xffffdcc0) &&
5365 (base + len + 8 < base));
5368 /* Test for DMA addresses > 40-bit */
5369 static inline int tg3_40bit_overflow_test(struct tg3 *tp, dma_addr_t mapping,
5372 #if defined(CONFIG_HIGHMEM) && (BITS_PER_LONG == 64)
5373 if (tp->tg3_flags & TG3_FLAG_40BIT_DMA_BUG)
5374 return (((u64) mapping + len) > DMA_BIT_MASK(40));
5381 static void tg3_set_txd(struct tg3_napi *, int, dma_addr_t, int, u32, u32);
5383 /* Workaround 4GB and 40-bit hardware DMA bugs. */
5384 static int tigon3_dma_hwbug_workaround(struct tg3_napi *tnapi,
5385 struct sk_buff *skb, u32 last_plus_one,
5386 u32 *start, u32 base_flags, u32 mss)
5388 struct tg3 *tp = tnapi->tp;
5389 struct sk_buff *new_skb;
5390 dma_addr_t new_addr = 0;
5394 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701)
5395 new_skb = skb_copy(skb, GFP_ATOMIC);
5397 int more_headroom = 4 - ((unsigned long)skb->data & 3);
5399 new_skb = skb_copy_expand(skb,
5400 skb_headroom(skb) + more_headroom,
5401 skb_tailroom(skb), GFP_ATOMIC);
5407 /* New SKB is guaranteed to be linear. */
5409 new_addr = pci_map_single(tp->pdev, new_skb->data, new_skb->len,
5411 /* Make sure the mapping succeeded */
5412 if (pci_dma_mapping_error(tp->pdev, new_addr)) {
5414 dev_kfree_skb(new_skb);
5417 /* Make sure new skb does not cross any 4G boundaries.
5418 * Drop the packet if it does.
5420 } else if ((tp->tg3_flags3 & TG3_FLG3_4G_DMA_BNDRY_BUG) &&
5421 tg3_4g_overflow_test(new_addr, new_skb->len)) {
5422 pci_unmap_single(tp->pdev, new_addr, new_skb->len,
5425 dev_kfree_skb(new_skb);
5428 tg3_set_txd(tnapi, entry, new_addr, new_skb->len,
5429 base_flags, 1 | (mss << 1));
5430 *start = NEXT_TX(entry);
5434 /* Now clean up the sw ring entries. */
5436 while (entry != last_plus_one) {
5440 len = skb_headlen(skb);
5442 len = skb_shinfo(skb)->frags[i-1].size;
5444 pci_unmap_single(tp->pdev,
5445 pci_unmap_addr(&tnapi->tx_buffers[entry],
5447 len, PCI_DMA_TODEVICE);
5449 tnapi->tx_buffers[entry].skb = new_skb;
5450 pci_unmap_addr_set(&tnapi->tx_buffers[entry], mapping,
5453 tnapi->tx_buffers[entry].skb = NULL;
5455 entry = NEXT_TX(entry);
5464 static void tg3_set_txd(struct tg3_napi *tnapi, int entry,
5465 dma_addr_t mapping, int len, u32 flags,
5468 struct tg3_tx_buffer_desc *txd = &tnapi->tx_ring[entry];
5469 int is_end = (mss_and_is_end & 0x1);
5470 u32 mss = (mss_and_is_end >> 1);
5474 flags |= TXD_FLAG_END;
5475 if (flags & TXD_FLAG_VLAN) {
5476 vlan_tag = flags >> 16;
5479 vlan_tag |= (mss << TXD_MSS_SHIFT);
5481 txd->addr_hi = ((u64) mapping >> 32);
5482 txd->addr_lo = ((u64) mapping & 0xffffffff);
5483 txd->len_flags = (len << TXD_LEN_SHIFT) | flags;
5484 txd->vlan_tag = vlan_tag << TXD_VLAN_TAG_SHIFT;
5487 /* hard_start_xmit for devices that don't have any bugs and
5488 * support TG3_FLG2_HW_TSO_2 and TG3_FLG2_HW_TSO_3 only.
5490 static netdev_tx_t tg3_start_xmit(struct sk_buff *skb,
5491 struct net_device *dev)
5493 struct tg3 *tp = netdev_priv(dev);
5494 u32 len, entry, base_flags, mss;
5496 struct tg3_napi *tnapi;
5497 struct netdev_queue *txq;
5498 unsigned int i, last;
5501 txq = netdev_get_tx_queue(dev, skb_get_queue_mapping(skb));
5502 tnapi = &tp->napi[skb_get_queue_mapping(skb)];
5503 if (tp->tg3_flags3 & TG3_FLG3_ENABLE_TSS)
5506 /* We are running in BH disabled context with netif_tx_lock
5507 * and TX reclaim runs via tp->napi.poll inside of a software
5508 * interrupt. Furthermore, IRQ processing runs lockless so we have
5509 * no IRQ context deadlocks to worry about either. Rejoice!
5511 if (unlikely(tg3_tx_avail(tnapi) <= (skb_shinfo(skb)->nr_frags + 1))) {
5512 if (!netif_tx_queue_stopped(txq)) {
5513 netif_tx_stop_queue(txq);
5515 /* This is a hard error, log it. */
5516 printk(KERN_ERR PFX "%s: BUG! Tx Ring full when "
5517 "queue awake!\n", dev->name);
5519 return NETDEV_TX_BUSY;
5522 entry = tnapi->tx_prod;
5525 if ((mss = skb_shinfo(skb)->gso_size) != 0) {
5526 int tcp_opt_len, ip_tcp_len;
5529 if (skb_header_cloned(skb) &&
5530 pskb_expand_head(skb, 0, 0, GFP_ATOMIC)) {
5535 if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6)
5536 hdrlen = skb_headlen(skb) - ETH_HLEN;
5538 struct iphdr *iph = ip_hdr(skb);
5540 tcp_opt_len = tcp_optlen(skb);
5541 ip_tcp_len = ip_hdrlen(skb) + sizeof(struct tcphdr);
5544 iph->tot_len = htons(mss + ip_tcp_len + tcp_opt_len);
5545 hdrlen = ip_tcp_len + tcp_opt_len;
5548 if (tp->tg3_flags2 & TG3_FLG2_HW_TSO_3) {
5549 mss |= (hdrlen & 0xc) << 12;
5551 base_flags |= 0x00000010;
5552 base_flags |= (hdrlen & 0x3e0) << 5;
5556 base_flags |= (TXD_FLAG_CPU_PRE_DMA |
5557 TXD_FLAG_CPU_POST_DMA);
5559 tcp_hdr(skb)->check = 0;
5562 else if (skb->ip_summed == CHECKSUM_PARTIAL)
5563 base_flags |= TXD_FLAG_TCPUDP_CSUM;
5564 #if TG3_VLAN_TAG_USED
5565 if (tp->vlgrp != NULL && vlan_tx_tag_present(skb))
5566 base_flags |= (TXD_FLAG_VLAN |
5567 (vlan_tx_tag_get(skb) << 16));
5570 len = skb_headlen(skb);
5572 /* Queue skb data, a.k.a. the main skb fragment. */
5573 mapping = pci_map_single(tp->pdev, skb->data, len, PCI_DMA_TODEVICE);
5574 if (pci_dma_mapping_error(tp->pdev, mapping)) {
5579 tnapi->tx_buffers[entry].skb = skb;
5580 pci_unmap_addr_set(&tnapi->tx_buffers[entry], mapping, mapping);
5582 if ((tp->tg3_flags3 & TG3_FLG3_USE_JUMBO_BDFLAG) &&
5583 !mss && skb->len > ETH_DATA_LEN)
5584 base_flags |= TXD_FLAG_JMB_PKT;
5586 tg3_set_txd(tnapi, entry, mapping, len, base_flags,
5587 (skb_shinfo(skb)->nr_frags == 0) | (mss << 1));
5589 entry = NEXT_TX(entry);
5591 /* Now loop through additional data fragments, and queue them. */
5592 if (skb_shinfo(skb)->nr_frags > 0) {
5593 last = skb_shinfo(skb)->nr_frags - 1;
5594 for (i = 0; i <= last; i++) {
5595 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
5598 mapping = pci_map_page(tp->pdev,
5601 len, PCI_DMA_TODEVICE);
5602 if (pci_dma_mapping_error(tp->pdev, mapping))
5605 tnapi->tx_buffers[entry].skb = NULL;
5606 pci_unmap_addr_set(&tnapi->tx_buffers[entry], mapping,
5609 tg3_set_txd(tnapi, entry, mapping, len,
5610 base_flags, (i == last) | (mss << 1));
5612 entry = NEXT_TX(entry);
5616 /* Packets are ready, update Tx producer idx local and on card. */
5617 tw32_tx_mbox(tnapi->prodmbox, entry);
5619 tnapi->tx_prod = entry;
5620 if (unlikely(tg3_tx_avail(tnapi) <= (MAX_SKB_FRAGS + 1))) {
5621 netif_tx_stop_queue(txq);
5622 if (tg3_tx_avail(tnapi) > TG3_TX_WAKEUP_THRESH(tnapi))
5623 netif_tx_wake_queue(txq);
5629 return NETDEV_TX_OK;
5633 entry = tnapi->tx_prod;
5634 tnapi->tx_buffers[entry].skb = NULL;
5635 pci_unmap_single(tp->pdev,
5636 pci_unmap_addr(&tnapi->tx_buffers[entry], mapping),
5639 for (i = 0; i <= last; i++) {
5640 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
5641 entry = NEXT_TX(entry);
5643 pci_unmap_page(tp->pdev,
5644 pci_unmap_addr(&tnapi->tx_buffers[entry],
5646 frag->size, PCI_DMA_TODEVICE);
5650 return NETDEV_TX_OK;
5653 static netdev_tx_t tg3_start_xmit_dma_bug(struct sk_buff *,
5654 struct net_device *);
5656 /* Use GSO to workaround a rare TSO bug that may be triggered when the
5657 * TSO header is greater than 80 bytes.
5659 static int tg3_tso_bug(struct tg3 *tp, struct sk_buff *skb)
5661 struct sk_buff *segs, *nskb;
5662 u32 frag_cnt_est = skb_shinfo(skb)->gso_segs * 3;
5664 /* Estimate the number of fragments in the worst case */
5665 if (unlikely(tg3_tx_avail(&tp->napi[0]) <= frag_cnt_est)) {
5666 netif_stop_queue(tp->dev);
5667 if (tg3_tx_avail(&tp->napi[0]) <= frag_cnt_est)
5668 return NETDEV_TX_BUSY;
5670 netif_wake_queue(tp->dev);
5673 segs = skb_gso_segment(skb, tp->dev->features & ~NETIF_F_TSO);
5675 goto tg3_tso_bug_end;
5681 tg3_start_xmit_dma_bug(nskb, tp->dev);
5687 return NETDEV_TX_OK;
5690 /* hard_start_xmit for devices that have the 4G bug and/or 40-bit bug and
5691 * support TG3_FLG2_HW_TSO_1 or firmware TSO only.
5693 static netdev_tx_t tg3_start_xmit_dma_bug(struct sk_buff *skb,
5694 struct net_device *dev)
5696 struct tg3 *tp = netdev_priv(dev);
5697 u32 len, entry, base_flags, mss;
5698 int would_hit_hwbug;
5700 struct tg3_napi *tnapi;
5701 struct netdev_queue *txq;
5702 unsigned int i, last;
5705 txq = netdev_get_tx_queue(dev, skb_get_queue_mapping(skb));
5706 tnapi = &tp->napi[skb_get_queue_mapping(skb)];
5707 if (tp->tg3_flags3 & TG3_FLG3_ENABLE_TSS)
5710 /* We are running in BH disabled context with netif_tx_lock
5711 * and TX reclaim runs via tp->napi.poll inside of a software
5712 * interrupt. Furthermore, IRQ processing runs lockless so we have
5713 * no IRQ context deadlocks to worry about either. Rejoice!
5715 if (unlikely(tg3_tx_avail(tnapi) <= (skb_shinfo(skb)->nr_frags + 1))) {
5716 if (!netif_tx_queue_stopped(txq)) {
5717 netif_tx_stop_queue(txq);
5719 /* This is a hard error, log it. */
5720 printk(KERN_ERR PFX "%s: BUG! Tx Ring full when "
5721 "queue awake!\n", dev->name);
5723 return NETDEV_TX_BUSY;
5726 entry = tnapi->tx_prod;
5728 if (skb->ip_summed == CHECKSUM_PARTIAL)
5729 base_flags |= TXD_FLAG_TCPUDP_CSUM;
5731 if ((mss = skb_shinfo(skb)->gso_size) != 0) {
5733 u32 tcp_opt_len, ip_tcp_len, hdr_len;
5735 if (skb_header_cloned(skb) &&
5736 pskb_expand_head(skb, 0, 0, GFP_ATOMIC)) {
5741 tcp_opt_len = tcp_optlen(skb);
5742 ip_tcp_len = ip_hdrlen(skb) + sizeof(struct tcphdr);
5744 hdr_len = ip_tcp_len + tcp_opt_len;
5745 if (unlikely((ETH_HLEN + hdr_len) > 80) &&
5746 (tp->tg3_flags2 & TG3_FLG2_TSO_BUG))
5747 return (tg3_tso_bug(tp, skb));
5749 base_flags |= (TXD_FLAG_CPU_PRE_DMA |
5750 TXD_FLAG_CPU_POST_DMA);
5754 iph->tot_len = htons(mss + hdr_len);
5755 if (tp->tg3_flags2 & TG3_FLG2_HW_TSO) {